m2m模型翻译
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

418 lines
15 KiB

6 months ago
  1. # -------------------------------------------------------------------------
  2. # Copyright (c) Microsoft Corporation. All rights reserved.
  3. # Licensed under the MIT License. See License.txt in the project root for
  4. # license information.
  5. # --------------------------------------------------------------------------
  6. # This script benchmarks gpt2 model with past state.
  7. # For gpt2 model without past state, use benchmark.py to measure performance.
  8. import argparse
  9. import csv
  10. import logging
  11. import os
  12. import sys
  13. from datetime import datetime
  14. import psutil
  15. import torch
  16. from gpt2_helper import DEFAULT_TOLERANCE, MODEL_CLASSES, PRETRAINED_GPT2_MODELS, Gpt2Helper
  17. from packaging import version
  18. from transformers import AutoConfig
  19. sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
  20. from benchmark_helper import (
  21. Precision,
  22. create_onnxruntime_session,
  23. get_ort_environment_variables,
  24. prepare_environment,
  25. setup_logger,
  26. )
  27. from quantize_helper import QuantizeHelper
  28. logger = logging.getLogger("")
  29. def parse_arguments(argv=None):
  30. parser = argparse.ArgumentParser()
  31. parser.add_argument(
  32. "-m",
  33. "--model_name_or_path",
  34. required=True,
  35. type=str,
  36. help="Model path, or pretrained model name selected in the list: " + ", ".join(PRETRAINED_GPT2_MODELS),
  37. )
  38. parser.add_argument(
  39. "--model_class",
  40. required=False,
  41. type=str,
  42. default="GPT2LMHeadModel",
  43. choices=list(MODEL_CLASSES.keys()),
  44. help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
  45. )
  46. parser.add_argument(
  47. "--cache_dir",
  48. required=False,
  49. type=str,
  50. default=os.path.join(".", "cache_models"),
  51. help="Directory to cache pre-trained models",
  52. )
  53. parser.add_argument(
  54. "--onnx_dir",
  55. required=False,
  56. type=str,
  57. default=os.path.join(".", "onnx_models"),
  58. help="Directory to store onnx models",
  59. )
  60. parser.add_argument(
  61. "--test_times",
  62. required=False,
  63. default=100,
  64. type=int,
  65. help="Number of repeat times to get average inference latency.",
  66. )
  67. parser.add_argument(
  68. "-v",
  69. "--validate_onnx",
  70. required=False,
  71. action="store_true",
  72. help="Validate ONNX model",
  73. )
  74. parser.add_argument(
  75. "-o",
  76. "--optimize_onnx",
  77. required=False,
  78. action="store_true",
  79. help="Use optimizer.py to optimize onnx model",
  80. )
  81. parser.set_defaults(optimize_onnx=False)
  82. parser.add_argument(
  83. "--stage",
  84. type=int,
  85. default=0,
  86. required=False,
  87. choices=[0, 1, 2],
  88. help="Stage in generation: 1 (initial decoder), 2 (decoder), 0 (both). "
  89. "1 - decode the first token when past_sequence_length is zero; "
  90. "2 - decode the remaining tokens when past_sequence_length is not zero; "
  91. "0 - one onnx model for both stages 1 and 2. "
  92. "Note that we will optimize 1 and 2 differently for best performance.",
  93. )
  94. parser.add_argument("--use_gpu", required=False, action="store_true", help="use GPU for inference")
  95. parser.set_defaults(use_gpu=False)
  96. parser.add_argument(
  97. "-p",
  98. "--precision",
  99. type=Precision,
  100. default=Precision.FLOAT32,
  101. choices=list(Precision),
  102. help="Precision of model to run. fp32 for full precision, fp16 for half precision, and int8 for quantization",
  103. )
  104. parser.add_argument("--torchscript", required=False, action="store_true", help="use Torchscript")
  105. parser.set_defaults(torchscript=False)
  106. parser.add_argument("-b", "--batch_sizes", nargs="+", type=int, default=[1], help="batch size")
  107. parser.add_argument(
  108. "--sequence_lengths",
  109. nargs="+",
  110. type=int,
  111. default=[1],
  112. help="sequence lengths (excluding past)",
  113. )
  114. parser.add_argument(
  115. "-s",
  116. "--past_sequence_lengths",
  117. nargs="+",
  118. type=int,
  119. default=[8, 16, 32, 64, 128, 256],
  120. help="past sequence lengths",
  121. )
  122. parser.add_argument(
  123. "-r",
  124. "--result_csv",
  125. required=False,
  126. default=None,
  127. help="CSV file for saving summary results.",
  128. )
  129. parser.add_argument("--thread_num", required=False, type=int, default=-1, help="Threads to use")
  130. parser.add_argument("--include_copy_output_latency", required=False, action="store_true")
  131. parser.set_defaults(include_copy_output_latency=False)
  132. parser.add_argument("--verbose", required=False, action="store_true")
  133. parser.set_defaults(verbose=False)
  134. parser.add_argument("--output_torch_latency", required=False, action="store_true")
  135. parser.set_defaults(output_torch_latency=False)
  136. parser.add_argument("--disable_io_binding", required=False, action="store_true")
  137. parser.set_defaults(disable_io_binding=False)
  138. args = parser.parse_args(argv)
  139. return args
  140. def main(args):
  141. from transformers import __version__ as transformers_version
  142. if version.parse(transformers_version) < version.parse(
  143. "3.1.0"
  144. ): # past_key_values name does not exist in 3.0.2 or older
  145. raise RuntimeError("This tool requires transformers 3.1.0 or later.")
  146. logger.info(f"Arguments:{args}")
  147. if args.precision == Precision.FLOAT16:
  148. assert args.optimize_onnx and args.use_gpu, "fp16 requires --optimize_onnx --use_gpu"
  149. if args.precision == Precision.INT8:
  150. assert not args.use_gpu, "quantization only supports CPU"
  151. if args.stage == 1:
  152. assert args.past_sequence_lengths == [0], "past_sequence_lengths shall be 0 for stage==1 (init decoder)"
  153. torch.set_num_threads(psutil.cpu_count(logical=True) if args.thread_num <= 0 else args.thread_num)
  154. print(torch.__config__.parallel_info())
  155. cache_dir = args.cache_dir
  156. output_dir = args.onnx_dir
  157. prepare_environment(cache_dir, output_dir, args.use_gpu)
  158. model_class = MODEL_CLASSES[args.model_class][0]
  159. gpt2helper = Gpt2Helper
  160. config = AutoConfig.from_pretrained(args.model_name_or_path, torchscript=args.torchscript, cache_dir=cache_dir)
  161. model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir)
  162. # This scirpt does not support float16 for PyTorch.
  163. # if args.float16:
  164. # model.half()
  165. device = torch.device("cuda:0" if args.use_gpu else "cpu")
  166. model.to(device)
  167. use_external_data_format = config.n_layer > 24 # TODO: find a way to check model size > 2GB
  168. onnx_model_paths = gpt2helper.get_onnx_paths(
  169. output_dir,
  170. args.model_name_or_path,
  171. args.model_class,
  172. has_past=True,
  173. new_folder=use_external_data_format,
  174. )
  175. onnx_model_path = onnx_model_paths["raw"]
  176. use_padding = MODEL_CLASSES[args.model_class][2]
  177. gpt2helper.export_onnx(
  178. model,
  179. device,
  180. onnx_model_path,
  181. args.verbose,
  182. use_external_data_format,
  183. has_position_ids=use_padding,
  184. has_attention_mask=use_padding,
  185. )
  186. if args.optimize_onnx or args.precision != Precision.FLOAT32:
  187. onnx_model_path = onnx_model_paths[str(args.precision) if args.precision != Precision.INT8 else "fp32"]
  188. gpt2helper.optimize_onnx(
  189. onnx_model_paths["raw"],
  190. onnx_model_path,
  191. args.precision == Precision.FLOAT16,
  192. model.config.num_attention_heads,
  193. model.config.hidden_size,
  194. use_external_data_format,
  195. auto_mixed_precision=True,
  196. stage=args.stage,
  197. )
  198. if args.precision == Precision.INT8:
  199. logger.info("quantizing model...")
  200. QuantizeHelper.quantize_onnx_model(onnx_model_path, onnx_model_paths["int8"], use_external_data_format)
  201. model = QuantizeHelper.quantize_torch_model(model)
  202. logger.info("finished quantizing model")
  203. onnx_model_path = onnx_model_paths["int8"]
  204. if args.torchscript:
  205. model = gpt2helper.torchscript(
  206. model,
  207. config,
  208. device,
  209. has_position_ids=use_padding,
  210. has_attention_mask=use_padding,
  211. )
  212. session = create_onnxruntime_session(
  213. onnx_model_path,
  214. args.use_gpu,
  215. enable_all_optimization=False,
  216. num_threads=args.thread_num,
  217. verbose=args.verbose,
  218. )
  219. if session is None:
  220. return
  221. # Allocate output buffers for IO Binding
  222. max_output_shapes = gpt2helper.get_output_shapes(
  223. max(args.batch_sizes),
  224. max(args.past_sequence_lengths),
  225. max(args.sequence_lengths),
  226. config,
  227. args.model_class,
  228. )
  229. output_buffers = gpt2helper.get_output_buffers(max_output_shapes, device, args.precision == Precision.FLOAT16)
  230. csv_filename = args.result_csv or "benchmark_result_{}.csv".format(datetime.now().strftime("%Y%m%d-%H%M%S"))
  231. with open(csv_filename, mode="a", newline="") as csv_file:
  232. column_names = [
  233. "model_name",
  234. "model_class",
  235. "stage",
  236. "environment_variables",
  237. "gpu",
  238. "precision",
  239. "optimizer",
  240. "torchscript",
  241. "batch_size",
  242. "sequence_length",
  243. "past_sequence_length",
  244. "disable_io_binding",
  245. "torch_latency",
  246. "onnxruntime_latency",
  247. ]
  248. csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
  249. csv_writer.writeheader()
  250. for batch_size in args.batch_sizes:
  251. for sequence_length in args.sequence_lengths:
  252. for past_sequence_length in args.past_sequence_lengths:
  253. assert batch_size > 0 and sequence_length > 0 and past_sequence_length >= 0
  254. logger.debug(
  255. "Running test for batch_size=%d sequence_length=%d past_sequence_length=%d ...",
  256. batch_size,
  257. sequence_length,
  258. past_sequence_length,
  259. )
  260. dummy_inputs = gpt2helper.get_dummy_inputs(
  261. batch_size,
  262. past_sequence_length,
  263. sequence_length,
  264. config.num_attention_heads,
  265. config.hidden_size,
  266. config.n_layer,
  267. config.vocab_size,
  268. device,
  269. float16=(args.precision == Precision.FLOAT16),
  270. has_position_ids=use_padding,
  271. has_attention_mask=use_padding,
  272. )
  273. output_shapes = gpt2helper.get_output_shapes(
  274. batch_size,
  275. past_sequence_length,
  276. sequence_length,
  277. config,
  278. args.model_class,
  279. )
  280. try:
  281. if args.validate_onnx or args.output_torch_latency:
  282. outputs, torch_latency = gpt2helper.pytorch_inference(model, dummy_inputs, args.test_times)
  283. # Dump Torch output shape
  284. for i, value in enumerate(outputs):
  285. if isinstance(value, tuple):
  286. logger.debug(
  287. f"torch output {i} is tuple of size {len(value)}, shape {value[0].shape}"
  288. )
  289. else:
  290. logger.debug(f"torch output {i} shape {value.shape}")
  291. else:
  292. outputs = None
  293. torch_latency = None
  294. if args.disable_io_binding:
  295. ort_outputs, ort_latency = gpt2helper.onnxruntime_inference(
  296. session, dummy_inputs, args.test_times
  297. )
  298. else:
  299. ort_outputs, ort_latency = gpt2helper.onnxruntime_inference_with_binded_io(
  300. session,
  301. dummy_inputs,
  302. output_buffers,
  303. output_shapes,
  304. args.test_times,
  305. return_numpy=False,
  306. include_copy_output_latency=args.include_copy_output_latency,
  307. )
  308. if args.validate_onnx:
  309. copy_outputs = ort_outputs
  310. if not args.disable_io_binding:
  311. # Results of IO binding might be in GPU. Copy outputs to CPU for comparison.
  312. copy_outputs = []
  313. for output in ort_outputs:
  314. copy_outputs.append(output.cpu().numpy())
  315. if gpt2helper.compare_outputs(
  316. outputs,
  317. copy_outputs,
  318. model_class=args.model_class,
  319. rtol=DEFAULT_TOLERANCE[args.precision],
  320. atol=DEFAULT_TOLERANCE[args.precision],
  321. ):
  322. logger.info(
  323. f"Pytorch and ONNX Runtime outputs are all close (tolerance={DEFAULT_TOLERANCE[args.precision]})."
  324. )
  325. logger.info(
  326. "batch_size=%d, sequence_length=%d, past_sequence_length=%d, onnxruntime_latency=%.2f %s %s",
  327. batch_size,
  328. sequence_length,
  329. past_sequence_length,
  330. ort_latency,
  331. "(disable_io_binding)" if args.disable_io_binding else "",
  332. ", torch_latency={torch_latency}" if torch_latency else "",
  333. )
  334. row = {
  335. "model_name": args.model_name_or_path,
  336. "model_class": args.model_class,
  337. "stage": args.stage,
  338. "environment_variables": get_ort_environment_variables(),
  339. "gpu": args.use_gpu,
  340. "precision": args.precision,
  341. "optimizer": args.optimize_onnx,
  342. "torchscript": args.torchscript,
  343. "batch_size": batch_size,
  344. "sequence_length": sequence_length,
  345. "past_sequence_length": past_sequence_length,
  346. "disable_io_binding": args.disable_io_binding,
  347. "torch_latency": f"{torch_latency:.2f}" if torch_latency else "None",
  348. "onnxruntime_latency": f"{ort_latency:.2f}",
  349. }
  350. csv_writer.writerow(row)
  351. except:
  352. logger.error(f"Exception", exc_info=True)
  353. return None
  354. logger.info(f"Results are saved to file {csv_filename}")
  355. return csv_filename
  356. if __name__ == "__main__":
  357. args = parse_arguments()
  358. setup_logger(args.verbose)
  359. main(args)