IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
00b5587 verified
raw
history blame
9.51 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.1+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "be2e25370f413e89ce153d7e4bd57a04011a0855",
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.34.0",
"accelerate_commit": null,
"diffusers_version": "0.30.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.9",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1120.145408,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.858029512000002,
"mean": 4.858029512000002,
"stdev": 0.0,
"p50": 4.858029512000002,
"p90": 4.858029512000002,
"p95": 4.858029512000002,
"p99": 4.858029512000002,
"values": [
4.858029512000002
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.747464896666601e-05,
"ram": 2.8202621840257464e-06,
"gpu": 0,
"total": 7.029491115069175e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 973.635584,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 14,
"total": 0.6458553560000553,
"mean": 0.04613252542857538,
"stdev": 0.0023024062594178118,
"p50": 0.04658173650001629,
"p90": 0.04836021739999126,
"p95": 0.04869596564999057,
"p99": 0.04901554673000561,
"values": [
0.04480722400001014,
0.047518031999999266,
0.04729829199999358,
0.04491247999999359,
0.04909544200000937,
0.04807871100001648,
0.046478448000016215,
0.048480862999980445,
0.04806069800000046,
0.04668502500001637,
0.04575729000001161,
0.045916016999996145,
0.04109036400001287,
0.041676469999998744
]
},
"throughput": {
"unit": "tokens/s",
"value": 43.3533603768668
},
"energy": {
"unit": "kWh",
"cpu": 1.634581853009274e-06,
"ram": 6.831180114627893e-08,
"gpu": 0.0,
"total": 1.702893654155553e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1174471.462219277
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 974.290944,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 14,
"total": 0.40075994499991907,
"mean": 0.028625710357137076,
"stdev": 0.0014440522445720591,
"p50": 0.029129467999993608,
"p90": 0.02990596549999225,
"p95": 0.029979262499988123,
"p99": 0.030032453299988618,
"values": [
0.029323460000000523,
0.02977899300000786,
0.028929975000011154,
0.028262277000010272,
0.029943460999987792,
0.03004575099998874,
0.028398561999978256,
0.02939591599999858,
0.029818476000002647,
0.02900219999997944,
0.029256736000007777,
0.027646764999985862,
0.02536430799997902,
0.025593064999981152
]
},
"throughput": {
"unit": "tokens/s",
"value": 34.93363090466246
},
"energy": {
"unit": "kWh",
"cpu": 9.619579851389037e-07,
"ram": 4.020231554471774e-08,
"gpu": 0.0,
"total": 1.002160300683621e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 997844.3561552506
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 14,
"total": 0.3943050459998858,
"mean": 0.028164646142848988,
"stdev": 0.0013827564768073995,
"p50": 0.02862635249999812,
"p90": 0.02941067309998857,
"p95": 0.029491208149983093,
"p99": 0.02954112242997667,
"values": [
0.02885192899998401,
0.029301148999991256,
0.02844475800000623,
0.02776329400001032,
0.02945761199998742,
0.02955360099997506,
0.027897304999981998,
0.02890827400000262,
0.02924910199999431,
0.02850714499999185,
0.02874556000000439,
0.0273251839999773,
0.02503095399998756,
0.025269178999991482
]
},
"throughput": {
"unit": "tokens/s",
"value": 35.5055055521761
},
"energy": null,
"efficiency": null
}
}
}