{ "config": { "name": "cpu_inference_transformers_text-generation_openai-community/gpt2", "backend": { "name": "pytorch", "version": "2.4.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "text-generation", "library": "transformers", "model_type": "gpt2", "model": "openai-community/gpt2", "processor": "openai-community/gpt2", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "inference", "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario", "iterations": 1, "duration": 1, "warmup_runs": 1, "input_shapes": { "batch_size": 1, "num_choices": 2, "sequence_length": 2 }, "new_tokens": null, "memory": true, "latency": true, "energy": true, "forward_kwargs": {}, "generate_kwargs": { "max_new_tokens": 2, "min_new_tokens": 2 }, "call_kwargs": { "num_inference_steps": 2 } }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16766.7712, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.8.0-1014-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0", "optimum_benchmark_commit": "3878a484586671dc5d651a3d7832a31a5aaf4796", "transformers_version": "4.44.2", "transformers_commit": null, "accelerate_version": "0.34.2", "accelerate_commit": null, "diffusers_version": "0.30.3", "diffusers_commit": null, "optimum_version": "1.22.0", "optimum_commit": null, "timm_version": "1.0.9", "timm_commit": null, "peft_version": null, "peft_commit": null } }, "report": { "load": { "memory": { "unit": "MB", "max_ram": 1123.557376, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 5.018486025000016 ], "count": 1, "total": 5.018486025000016, "mean": 5.018486025000016, "p50": 5.018486025000016, "p90": 5.018486025000016, "p95": 5.018486025000016, "p99": 5.018486025000016, "stdev": 0, "stdev_": 0 }, "throughput": null, "energy": { "unit": "kWh", "cpu": 6.760275137777968e-05, "ram": 2.827167509405372e-06, "gpu": 0, "total": 7.042991888718505e-05 }, "efficiency": null }, "prefill": { "memory": { "unit": "MB", "max_ram": 977.805312, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.04271299300000919, 0.04363790299998982, 0.0434843750000482, 0.043116426999972646, 0.04203660800004627, 0.053266866999990725, 0.04623521799999253, 0.04432193100001314, 0.04397191599997541, 0.04522661200002176, 0.04081514500001049, 0.03984808600000633, 0.040456242999994174, 0.040358009999977185, 0.0403351869999824 ], "count": 15, "total": 0.6498235210000303, "mean": 0.04332156806666868, "p50": 0.043116426999972646, "p90": 0.04583177560000422, "p95": 0.04834471269999198, "p99": 0.05228243613999097, "stdev": 0.003254738193686344, "stdev_": 7.512974111827031 }, "throughput": { "unit": "tokens/s", "value": 46.1663806102805 }, "energy": { "unit": "kWh", "cpu": 1.5965450682222255e-06, "ram": 6.67584037167734e-08, "gpu": 0.0, "total": 1.663303471938999e-06 }, "efficiency": { "unit": "tokens/kWh", "value": 1202426.3964702103 } }, "decode": { "memory": { "unit": "MB", "max_ram": 977.805312, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.027226756999993995, 0.026504356000032203, 0.02576454399996919, 0.025557004999996025, 0.045011580999982925, 0.028917648999993162, 0.0275814209999794, 0.027407197999991695, 0.030450975999997354, 0.02539609500001916, 0.02539325899999767, 0.025114739000002828, 0.024426962000006824, 0.024946934000013243, 0.02476951200003441 ], "count": 15, "total": 0.4144689880000101, "mean": 0.02763126586666734, "p50": 0.02576454399996919, "p90": 0.029837645199995676, "p95": 0.03481915749999301, "p99": 0.042973096299984935, "stdev": 0.0049190430020378696, "stdev_": 17.802452575912927 }, "throughput": { "unit": "tokens/s", "value": 36.19088625274814 }, "energy": { "unit": "kWh", "cpu": 1.1024188814073439e-06, "ram": 4.6097914589188534e-08, "gpu": 0.0, "total": 1.1485167959965314e-06 }, "efficiency": { "unit": "tokens/kWh", "value": 870688.1810399053 } }, "per_token": { "memory": null, "latency": { "unit": "s", "values": [ 0.02683729900002163, 0.026103006000028017, 0.025370085999952607, 0.0252092750000088, 0.04442560499995807, 0.02852716799998234, 0.02723688700001503, 0.02701541599998336, 0.03004166100004113, 0.02504643100002113, 0.025052501999994092, 0.02477755799998249, 0.024107124000011027, 0.024598313000012695, 0.024414299000000028 ], "count": 15, "total": 0.40876263000001245, "mean": 0.02725084200000083, "p50": 0.025370085999952607, "p90": 0.029435863800017614, "p95": 0.034356844200016196, "p99": 0.04241185283996969, "stdev": 0.004860820758867527, "stdev_": 17.83732318754547 }, "throughput": { "unit": "tokens/s", "value": 36.69611383016971 }, "energy": null, "efficiency": null } } }