IlyasMoutawwakil HF staff commited on
Commit
4d7e0be
1 Parent(s): fdaa873

Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -6,19 +6,17 @@
6
  "version": "2.3.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
- "model": "openai-community/gpt2",
10
  "library": "transformers",
 
 
11
  "device": "cpu",
12
  "device_ids": null,
13
  "seed": 42,
14
  "inter_op_num_threads": null,
15
  "intra_op_num_threads": null,
16
- "hub_kwargs": {
17
- "revision": "main",
18
- "force_download": false,
19
- "local_files_only": false,
20
- "trust_remote_code": false
21
- },
22
  "no_weights": true,
23
  "device_map": null,
24
  "torch_dtype": null,
@@ -73,14 +71,14 @@
73
  "environment": {
74
  "cpu": " AMD EPYC 7763 64-Core Processor",
75
  "cpu_count": 4,
76
- "cpu_ram_mb": 16757.338112,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
- "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.14",
82
  "optimum_benchmark_version": "0.2.0",
83
- "optimum_benchmark_commit": "3e2eebdc0f80ae6deeb2e1faad3e889ed5a4df2d",
84
  "transformers_version": "4.40.2",
85
  "transformers_commit": null,
86
  "accelerate_version": "0.30.1",
@@ -99,7 +97,7 @@
99
  "prefill": {
100
  "memory": {
101
  "unit": "MB",
102
- "max_ram": 958.312448,
103
  "max_global_vram": null,
104
  "max_process_vram": null,
105
  "max_reserved": null,
@@ -107,51 +105,52 @@
107
  },
108
  "latency": {
109
  "unit": "s",
110
- "count": 14,
111
- "total": 0.6226214250000339,
112
- "mean": 0.04447295892857385,
113
- "stdev": 0.0006497683664670401,
114
- "p50": 0.04445699649998858,
115
- "p90": 0.04529145000000767,
116
- "p95": 0.04560630480000612,
117
- "p99": 0.045885101759996015,
118
  "values": [
119
- 0.045954800999993495,
120
- 0.04446333299998173,
121
- 0.044494441000011875,
122
- 0.04426547500000311,
123
- 0.04376506200000563,
124
- 0.04499464299999545,
125
- 0.04426369099999761,
126
- 0.04452789300000859,
127
- 0.043635319999992817,
128
- 0.04387021799999502,
129
- 0.04445065999999542,
130
- 0.04541865300001291,
131
- 0.043626325000019506,
132
- 0.04489091000002077
 
133
  ]
134
  },
135
  "throughput": {
136
  "unit": "tokens/s",
137
- "value": 44.97114759582595
138
  },
139
  "energy": {
140
  "unit": "kWh",
141
- "cpu": 1.6963920155585101e-06,
142
- "ram": 7.089398842205409e-08,
143
  "gpu": 0.0,
144
- "total": 1.767286003980564e-06
145
  },
146
  "efficiency": {
147
  "unit": "tokens/kWh",
148
- "value": 1131678.741016044
149
  }
150
  },
151
  "decode": {
152
  "memory": {
153
  "unit": "MB",
154
- "max_ram": 958.312448,
155
  "max_global_vram": null,
156
  "max_process_vram": null,
157
  "max_reserved": null,
@@ -159,78 +158,80 @@
159
  },
160
  "latency": {
161
  "unit": "s",
162
- "count": 14,
163
- "total": 0.3892796470000519,
164
- "mean": 0.027805689071432278,
165
- "stdev": 0.0009139966924691778,
166
- "p50": 0.02751088150000669,
167
- "p90": 0.028758083700014935,
168
- "p95": 0.0293590112500155,
169
- "p99": 0.030069515850009054,
170
  "values": [
171
- 0.0284717760000035,
172
- 0.027489577000011423,
173
- 0.028459322999992764,
174
- 0.027429003000008834,
175
- 0.02726347500001225,
176
- 0.027557150999996338,
177
- 0.027213382000013553,
178
- 0.02688716399998725,
179
- 0.027532186000001957,
180
- 0.028088781999997536,
181
- 0.028880787000019836,
182
- 0.02703322499999672,
183
- 0.030247142000007443,
184
- 0.026726674000002504
 
185
  ]
186
  },
187
  "throughput": {
188
  "unit": "tokens/s",
189
- "value": 35.963863273843685
190
  },
191
  "energy": {
192
  "unit": "kWh",
193
- "cpu": 1.0036530932366561e-06,
194
- "ram": 4.1946113965940766e-08,
195
  "gpu": 0.0,
196
- "total": 1.0455992072025971e-06
197
  },
198
  "efficiency": {
199
  "unit": "tokens/kWh",
200
- "value": 956389.4015139955
201
  }
202
  },
203
  "per_token": {
204
  "memory": null,
205
  "latency": {
206
  "unit": "s",
207
- "count": 13,
208
- "total": 0.9377388369999835,
209
- "mean": 0.07213375669230643,
210
- "stdev": 0.0007910113430319274,
211
- "p50": 0.07195642499999622,
212
- "p90": 0.0731509747999894,
213
- "p95": 0.07340619199998741,
214
- "p99": 0.07365363199999024,
215
  "values": [
216
- 0.07198634099998458,
217
- 0.07295490600000676,
218
- 0.07172076599999855,
219
- 0.07105912399998715,
220
- 0.07256558100002053,
221
- 0.07149065799998766,
222
- 0.07143760900001439,
223
- 0.07115158700000279,
224
- 0.07195642499999622,
225
- 0.07319999199998506,
226
- 0.07267799000001673,
227
- 0.07371549199999095,
228
- 0.07182236599999214
 
229
  ]
230
  },
231
  "throughput": {
232
  "unit": "tokens/s",
233
- "value": 13.863134901812996
234
  },
235
  "energy": null,
236
  "efficiency": null
 
6
  "version": "2.3.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
 
9
  "library": "transformers",
10
+ "model": "openai-community/gpt2",
11
+ "processor": "openai-community/gpt2",
12
  "device": "cpu",
13
  "device_ids": null,
14
  "seed": 42,
15
  "inter_op_num_threads": null,
16
  "intra_op_num_threads": null,
17
+ "model_kwargs": {},
18
+ "processor_kwargs": {},
19
+ "hub_kwargs": {},
 
 
 
20
  "no_weights": true,
21
  "device_map": null,
22
  "torch_dtype": null,
 
71
  "environment": {
72
  "cpu": " AMD EPYC 7763 64-Core Processor",
73
  "cpu_count": 4,
74
+ "cpu_ram_mb": 16757.354496,
75
  "system": "Linux",
76
  "machine": "x86_64",
77
+ "platform": "Linux-6.5.0-1021-azure-x86_64-with-glibc2.35",
78
  "processor": "x86_64",
79
  "python_version": "3.10.14",
80
  "optimum_benchmark_version": "0.2.0",
81
+ "optimum_benchmark_commit": "6fd377459e287bb09e9383ba2516b1b2a271a562",
82
  "transformers_version": "4.40.2",
83
  "transformers_commit": null,
84
  "accelerate_version": "0.30.1",
 
97
  "prefill": {
98
  "memory": {
99
  "unit": "MB",
100
+ "max_ram": 948.24448,
101
  "max_global_vram": null,
102
  "max_process_vram": null,
103
  "max_reserved": null,
 
105
  },
106
  "latency": {
107
  "unit": "s",
108
+ "count": 15,
109
+ "total": 0.6625425460000542,
110
+ "mean": 0.044169503066670286,
111
+ "stdev": 0.0009472706701762553,
112
+ "p50": 0.04394076400001268,
113
+ "p90": 0.045213300800003256,
114
+ "p95": 0.04587641910000002,
115
+ "p99": 0.046457753419995244,
116
  "values": [
117
+ 0.04556499000000258,
118
+ 0.043650205000005826,
119
+ 0.04660308699999405,
120
+ 0.04447290200002385,
121
+ 0.04424561099997959,
122
+ 0.04468576700000426,
123
+ 0.04394076400001268,
124
+ 0.04347251500001903,
125
+ 0.044680306999993036,
126
+ 0.04356178099999397,
127
+ 0.043785547000027236,
128
+ 0.04382420799998954,
129
+ 0.04419610900001203,
130
+ 0.04340082199999529,
131
+ 0.04245793100000128
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "tokens/s",
136
+ "value": 45.280110962108
137
  },
138
  "energy": {
139
  "unit": "kWh",
140
+ "cpu": 1.685131037676776e-06,
141
+ "ram": 7.042427836042955e-08,
142
  "gpu": 0.0,
143
+ "total": 1.7555553160372056e-06
144
  },
145
  "efficiency": {
146
  "unit": "tokens/kWh",
147
+ "value": 1139240.6617608475
148
  }
149
  },
150
  "decode": {
151
  "memory": {
152
  "unit": "MB",
153
+ "max_ram": 948.24448,
154
  "max_global_vram": null,
155
  "max_process_vram": null,
156
  "max_reserved": null,
 
158
  },
159
  "latency": {
160
  "unit": "s",
161
+ "count": 15,
162
+ "total": 0.4013152069999535,
163
+ "mean": 0.026754347133330234,
164
+ "stdev": 0.00046463600228426256,
165
+ "p50": 0.026856557999991537,
166
+ "p90": 0.027272766799995907,
167
+ "p95": 0.02735589909999021,
168
+ "p99": 0.027469443019996334,
169
  "values": [
170
+ 0.026856557999991537,
171
+ 0.026271821999984013,
172
+ 0.02723930900000937,
173
+ 0.02729507199998693,
174
+ 0.026379181999999446,
175
+ 0.02653050300000359,
176
+ 0.026603848999997126,
177
+ 0.027058142999976553,
178
+ 0.02700592600001528,
179
+ 0.026911350000005996,
180
+ 0.026138985999978104,
181
+ 0.027204554000007874,
182
+ 0.027497828999997864,
183
+ 0.026504294000005757,
184
+ 0.025817829999994046
185
  ]
186
  },
187
  "throughput": {
188
  "unit": "tokens/s",
189
+ "value": 37.37710342982776
190
  },
191
  "energy": {
192
  "unit": "kWh",
193
+ "cpu": 1.0025930404663087e-06,
194
+ "ram": 4.19014193139778e-08,
195
  "gpu": 0.0,
196
+ "total": 1.0444944597802864e-06
197
  },
198
  "efficiency": {
199
  "unit": "tokens/kWh",
200
+ "value": 957400.9614281286
201
  }
202
  },
203
  "per_token": {
204
  "memory": null,
205
  "latency": {
206
  "unit": "s",
207
+ "count": 14,
208
+ "total": 0.9916712409999775,
209
+ "mean": 0.07083366007142697,
210
+ "stdev": 0.0012201529007763755,
211
+ "p50": 0.07062559400000623,
212
+ "p90": 0.07175336229998663,
213
+ "p95": 0.07250598824998775,
214
+ "p99": 0.07358856324999608,
215
  "values": [
216
+ 0.06992857000000186,
217
+ 0.07385920699999815,
218
+ 0.07177733199998215,
219
+ 0.07064844600000697,
220
+ 0.0712027149999983,
221
+ 0.07060274200000549,
222
+ 0.07053561699999022,
223
+ 0.07169743299999709,
224
+ 0.07049989000000778,
225
+ 0.06994229599999358,
226
+ 0.07104756700002213,
227
+ 0.07166975299998057,
228
+ 0.0699853050000172,
229
+ 0.068274367999976
230
  ]
231
  },
232
  "throughput": {
233
  "unit": "tokens/s",
234
+ "value": 14.11758193762152
235
  },
236
  "energy": null,
237
  "efficiency": null