IlyasMoutawwakil HF staff commited on
Commit
a358373
1 Parent(s): 171f2d2

Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -6,17 +6,19 @@
6
  "version": "2.3.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
- "library": "transformers",
10
  "model": "openai-community/gpt2",
11
- "processor": "openai-community/gpt2",
12
  "device": "cpu",
13
  "device_ids": null,
14
  "seed": 42,
15
  "inter_op_num_threads": null,
16
  "intra_op_num_threads": null,
17
- "model_kwargs": {},
18
- "processor_kwargs": {},
19
- "hub_kwargs": {},
 
 
 
20
  "no_weights": true,
21
  "device_map": null,
22
  "torch_dtype": null,
@@ -71,17 +73,17 @@
71
  "environment": {
72
  "cpu": " AMD EPYC 7763 64-Core Processor",
73
  "cpu_count": 4,
74
- "cpu_ram_mb": 16757.342208,
75
  "system": "Linux",
76
  "machine": "x86_64",
77
  "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35",
78
  "processor": "x86_64",
79
  "python_version": "3.10.14",
80
  "optimum_benchmark_version": "0.2.0",
81
- "optimum_benchmark_commit": "a8ccb91469272f9f38883fbf3e14bffcd5e95f2d",
82
  "transformers_version": "4.40.2",
83
  "transformers_commit": null,
84
- "accelerate_version": "0.30.0",
85
  "accelerate_commit": null,
86
  "diffusers_version": "0.27.2",
87
  "diffusers_commit": null,
@@ -97,7 +99,7 @@
97
  "prefill": {
98
  "memory": {
99
  "unit": "MB",
100
- "max_ram": 959.721472,
101
  "max_global_vram": null,
102
  "max_process_vram": null,
103
  "max_reserved": null,
@@ -106,49 +108,49 @@
106
  "latency": {
107
  "unit": "s",
108
  "count": 13,
109
- "total": 0.6312671800000658,
110
- "mean": 0.048559013846158905,
111
- "stdev": 0.0023036918286531358,
112
- "p50": 0.049563321999983145,
113
- "p90": 0.05037565600000562,
114
- "p95": 0.050465417600003094,
115
- "p99": 0.05057309071999498,
116
  "values": [
117
- 0.04530646500001012,
118
- 0.04400738800001136,
119
- 0.044212981999976364,
120
- 0.04959109300000364,
121
- 0.05037551999998868,
122
- 0.0486010739999756,
123
- 0.050375690000009854,
124
- 0.05033551500002886,
125
- 0.05002007500002037,
126
- 0.049563321999983145,
127
- 0.05060000899999295,
128
- 0.0491694050000433,
129
- 0.04910864200002152
130
  ]
131
  },
132
  "throughput": {
133
  "unit": "tokens/s",
134
- "value": 41.186997873067455
135
  },
136
  "energy": {
137
  "unit": "kWh",
138
- "cpu": 1.6731792577990781e-06,
139
- "ram": 6.992546118709696e-08,
140
  "gpu": 0.0,
141
- "total": 1.743104718986175e-06
142
  },
143
  "efficiency": {
144
  "unit": "tokens/kWh",
145
- "value": 1147377.9964081794
146
  }
147
  },
148
  "decode": {
149
  "memory": {
150
  "unit": "MB",
151
- "max_ram": 959.721472,
152
  "max_global_vram": null,
153
  "max_process_vram": null,
154
  "max_reserved": null,
@@ -157,43 +159,43 @@
157
  "latency": {
158
  "unit": "s",
159
  "count": 13,
160
- "total": 0.39328847399991673,
161
- "mean": 0.030252959538455134,
162
- "stdev": 0.0013191504688200174,
163
- "p50": 0.030537863999995807,
164
- "p90": 0.03161084880000544,
165
- "p95": 0.031753168599993845,
166
- "p99": 0.03188260731998071,
167
  "values": [
168
- 0.027211818000012045,
169
- 0.028034694999973908,
170
- 0.030499201999987235,
171
- 0.030537863999995807,
172
- 0.029951227000026392,
173
- 0.03191496699997742,
174
- 0.031080486999996992,
175
- 0.02936419999997497,
176
- 0.03006252500000528,
177
- 0.03164530300000479,
178
- 0.03067822499997419,
179
- 0.03083492899997964,
180
- 0.03147303200000806
181
  ]
182
  },
183
  "throughput": {
184
  "unit": "tokens/s",
185
- "value": 33.05461730872579
186
  },
187
  "energy": {
188
  "unit": "kWh",
189
- "cpu": 9.73473069844423e-07,
190
- "ram": 4.0683460135085154e-08,
191
  "gpu": 0.0,
192
- "total": 1.0141565299795071e-06
193
  },
194
  "efficiency": {
195
  "unit": "tokens/kWh",
196
- "value": 986041.0798914905
197
  }
198
  },
199
  "per_token": {
@@ -201,31 +203,31 @@
201
  "latency": {
202
  "unit": "s",
203
  "count": 12,
204
- "total": 0.9520682520000037,
205
- "mean": 0.0793390210000003,
206
- "stdev": 0.0027901983800490988,
207
- "p50": 0.08025918549998323,
208
- "p90": 0.08127993490002723,
209
- "p95": 0.08139592595002795,
210
- "p99": 0.08150457559002461,
211
  "values": [
212
- 0.07208035500002552,
213
- 0.07455751499998087,
214
- 0.08014541799997232,
215
- 0.08037295299999414,
216
- 0.0804896320000239,
217
- 0.08153173800002378,
218
- 0.0797304429999599,
219
- 0.08004407900000388,
220
- 0.08123608599998988,
221
- 0.08128480700003138,
222
- 0.08002632599999515,
223
- 0.08056890000000294
224
  ]
225
  },
226
  "throughput": {
227
  "unit": "tokens/s",
228
- "value": 12.604138384818187
229
  },
230
  "energy": null,
231
  "efficiency": null
 
6
  "version": "2.3.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
 
9
  "model": "openai-community/gpt2",
10
+ "library": "transformers",
11
  "device": "cpu",
12
  "device_ids": null,
13
  "seed": 42,
14
  "inter_op_num_threads": null,
15
  "intra_op_num_threads": null,
16
+ "hub_kwargs": {
17
+ "revision": "main",
18
+ "force_download": false,
19
+ "local_files_only": false,
20
+ "trust_remote_code": false
21
+ },
22
  "no_weights": true,
23
  "device_map": null,
24
  "torch_dtype": null,
 
73
  "environment": {
74
  "cpu": " AMD EPYC 7763 64-Core Processor",
75
  "cpu_count": 4,
76
+ "cpu_ram_mb": 16757.346304,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
  "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.14",
82
  "optimum_benchmark_version": "0.2.0",
83
+ "optimum_benchmark_commit": "b880a42960acf7600cf87a7b5756d96d29e2fb24",
84
  "transformers_version": "4.40.2",
85
  "transformers_commit": null,
86
+ "accelerate_version": "0.30.1",
87
  "accelerate_commit": null,
88
  "diffusers_version": "0.27.2",
89
  "diffusers_commit": null,
 
99
  "prefill": {
100
  "memory": {
101
  "unit": "MB",
102
+ "max_ram": 948.539392,
103
  "max_global_vram": null,
104
  "max_process_vram": null,
105
  "max_reserved": null,
 
108
  "latency": {
109
  "unit": "s",
110
  "count": 13,
111
+ "total": 0.6379554779999523,
112
+ "mean": 0.04907349830768864,
113
+ "stdev": 0.0010666836382432667,
114
+ "p50": 0.049456617000004144,
115
+ "p90": 0.049933542600007284,
116
+ "p95": 0.050156655600005706,
117
+ "p99": 0.0504093367199971,
118
  "values": [
119
+ 0.05047250699999495,
120
+ 0.04963225399998805,
121
+ 0.04764154999998027,
122
+ 0.04960540499999411,
123
+ 0.049456617000004144,
124
+ 0.04654885599998693,
125
+ 0.04994608800001288,
126
+ 0.049883360999984916,
127
+ 0.048579775999996855,
128
+ 0.047913816999994197,
129
+ 0.049307980999998335,
130
+ 0.04911564300002169,
131
+ 0.049851622999995016
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "tokens/s",
136
+ "value": 40.75519514545488
137
  },
138
  "energy": {
139
  "unit": "kWh",
140
+ "cpu": 1.6725711800433975e-06,
141
+ "ram": 6.989933200280913e-08,
142
  "gpu": 0.0,
143
+ "total": 1.7424705120462067e-06
144
  },
145
  "efficiency": {
146
  "unit": "tokens/kWh",
147
+ "value": 1147795.607543093
148
  }
149
  },
150
  "decode": {
151
  "memory": {
152
  "unit": "MB",
153
+ "max_ram": 948.539392,
154
  "max_global_vram": null,
155
  "max_process_vram": null,
156
  "max_reserved": null,
 
159
  "latency": {
160
  "unit": "s",
161
  "count": 13,
162
+ "total": 0.4034393080000598,
163
+ "mean": 0.031033792923081522,
164
+ "stdev": 0.0003609664808729454,
165
+ "p50": 0.03111106100001848,
166
+ "p90": 0.031282662000012354,
167
+ "p95": 0.031466616400012984,
168
+ "p99": 0.031675882480013795,
169
  "values": [
170
+ 0.03111106100001848,
171
+ 0.0312922280000123,
172
+ 0.031728199000014,
173
+ 0.0309515929999975,
174
+ 0.030498660999995764,
175
+ 0.03120739000002004,
176
+ 0.031085653999980423,
177
+ 0.03090312400001949,
178
+ 0.031244398000012552,
179
+ 0.031234318999992183,
180
+ 0.03074208399999634,
181
+ 0.031183795999993436,
182
+ 0.030256801000007272
183
  ]
184
  },
185
  "throughput": {
186
  "unit": "tokens/s",
187
+ "value": 32.222938474795505
188
  },
189
  "energy": {
190
  "unit": "kWh",
191
+ "cpu": 1.0020326353885505e-06,
192
+ "ram": 4.187678354027073e-08,
193
  "gpu": 0.0,
194
+ "total": 1.0439094189288217e-06
195
  },
196
  "efficiency": {
197
  "unit": "tokens/kWh",
198
+ "value": 957937.5201213548
199
  }
200
  },
201
  "per_token": {
 
203
  "latency": {
204
  "unit": "s",
205
  "count": 12,
206
+ "total": 0.9600551610000139,
207
+ "mean": 0.08000459675000116,
208
+ "stdev": 0.000877328166393496,
209
+ "p50": 0.08009505350000268,
210
+ "p90": 0.08096101209998778,
211
+ "p95": 0.0809965785999978,
212
+ "p99": 0.0810171829200081,
213
  "values": [
214
+ 0.08097550599998726,
215
+ 0.0793775440000104,
216
+ 0.08058963799999219,
217
+ 0.07998787000002494,
218
+ 0.07776743699997724,
219
+ 0.08102233400001069,
220
+ 0.08083056699999247,
221
+ 0.07983873099999528,
222
+ 0.07916983700002334,
223
+ 0.08005119699998886,
224
+ 0.08030558999999471,
225
+ 0.0801389100000165
226
  ]
227
  },
228
  "throughput": {
229
  "unit": "tokens/s",
230
+ "value": 12.499281799079695
231
  },
232
  "energy": null,
233
  "efficiency": null