IlyasMoutawwakil HF staff commited on
Commit
11545f7
1 Parent(s): a8fe9ac

Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -6,19 +6,17 @@
6
  "version": "2.3.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
- "model": "openai-community/gpt2",
10
  "library": "transformers",
 
 
11
  "device": "cpu",
12
  "device_ids": null,
13
  "seed": 42,
14
  "inter_op_num_threads": null,
15
  "intra_op_num_threads": null,
16
- "hub_kwargs": {
17
- "revision": "main",
18
- "force_download": false,
19
- "local_files_only": false,
20
- "trust_remote_code": false
21
- },
22
  "no_weights": true,
23
  "device_map": null,
24
  "torch_dtype": null,
@@ -73,14 +71,14 @@
73
  "environment": {
74
  "cpu": " AMD EPYC 7763 64-Core Processor",
75
  "cpu_count": 4,
76
- "cpu_ram_mb": 16757.346304,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
  "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.14",
82
  "optimum_benchmark_version": "0.2.0",
83
- "optimum_benchmark_commit": "d35829e539df8480b726c647eeabf91e41eae047",
84
  "transformers_version": "4.40.2",
85
  "transformers_commit": null,
86
  "accelerate_version": "0.30.0",
@@ -99,7 +97,7 @@
99
  "prefill": {
100
  "memory": {
101
  "unit": "MB",
102
- "max_ram": 948.645888,
103
  "max_global_vram": null,
104
  "max_process_vram": null,
105
  "max_reserved": null,
@@ -107,51 +105,50 @@
107
  },
108
  "latency": {
109
  "unit": "s",
110
- "count": 14,
111
- "total": 0.6339867150000487,
112
- "mean": 0.04528476535714633,
113
- "stdev": 0.0016215549762257887,
114
- "p50": 0.04481703600001197,
115
- "p90": 0.0462271221999913,
116
- "p95": 0.04779731334999155,
117
- "p99": 0.050004287469986364,
118
  "values": [
119
- 0.04631184999999505,
120
- 0.045226536999962264,
121
- 0.04494494300001861,
122
- 0.044653621000009025,
123
- 0.0438055200000349,
124
- 0.043956301000037,
125
- 0.04468912900000532,
126
- 0.044451223999999456,
127
- 0.045247165999967365,
128
- 0.04547249600000214,
129
- 0.04428059600002143,
130
- 0.04436187700002847,
131
- 0.05055603099998507,
132
- 0.046029423999982555
133
  ]
134
  },
135
  "throughput": {
136
  "unit": "tokens/s",
137
- "value": 44.16496329895154
138
  },
139
  "energy": {
140
  "unit": "kWh",
141
- "cpu": 1.6950056748689663e-06,
142
- "ram": 7.08365921310357e-08,
143
  "gpu": 0.0,
144
- "total": 1.765842267000002e-06
145
  },
146
  "efficiency": {
147
  "unit": "tokens/kWh",
148
- "value": 1132603.9915206071
149
  }
150
  },
151
  "decode": {
152
  "memory": {
153
  "unit": "MB",
154
- "max_ram": 948.645888,
155
  "max_global_vram": null,
156
  "max_process_vram": null,
157
  "max_reserved": null,
@@ -159,78 +156,76 @@
159
  },
160
  "latency": {
161
  "unit": "s",
162
- "count": 14,
163
- "total": 0.3902009259998067,
164
- "mean": 0.02787149471427191,
165
- "stdev": 0.0009935493351508349,
166
- "p50": 0.027663960000012366,
167
- "p90": 0.028723292899968556,
168
- "p95": 0.029497946349965786,
169
- "p99": 0.030467642869971313,
170
  "values": [
171
- 0.028845265999962066,
172
- 0.02765952700002572,
173
- 0.028438688999983697,
174
- 0.027900404999968487,
175
- 0.026684158999955798,
176
- 0.0275202869999589,
177
- 0.028259613999978228,
178
- 0.027668392999999014,
179
- 0.0281797860000097,
180
- 0.027368894999995064,
181
- 0.027082210000003215,
182
- 0.030710066999972696,
183
- 0.026835671000014827,
184
- 0.027047956999979306
185
  ]
186
  },
187
  "throughput": {
188
  "unit": "tokens/s",
189
- "value": 35.87895124576648
190
  },
191
  "energy": {
192
  "unit": "kWh",
193
- "cpu": 1.013396446639597e-06,
194
- "ram": 4.2353689736012584e-08,
195
  "gpu": 0.0,
196
- "total": 1.0557501363756092e-06
197
  },
198
  "efficiency": {
199
  "unit": "tokens/kWh",
200
- "value": 947193.8156058408
201
  }
202
  },
203
  "per_token": {
204
  "memory": null,
205
  "latency": {
206
  "unit": "s",
207
- "count": 13,
208
- "total": 0.9492799199999808,
209
- "mean": 0.07302153230769083,
210
- "stdev": 0.0016894624988902415,
211
- "p50": 0.07290782799998397,
212
- "p90": 0.07464429679997693,
213
- "p95": 0.07599381059999359,
214
- "p99": 0.07726914132003003,
215
  "values": [
216
- 0.07292421599998988,
217
- 0.07337603700000273,
218
- 0.0725685219999832,
219
- 0.07053093600001148,
220
- 0.07146733100000802,
221
- 0.07290782799998397,
222
- 0.07216007099998478,
223
- 0.07349734400003172,
224
- 0.07284272300000794,
225
- 0.07139964499998541,
226
- 0.07493103499996323,
227
- 0.07758797400003914,
228
- 0.07308625799998936
229
  ]
230
  },
231
  "throughput": {
232
  "unit": "tokens/s",
233
- "value": 13.694590737788136
234
  },
235
  "energy": null,
236
  "efficiency": null
 
6
  "version": "2.3.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
 
9
  "library": "transformers",
10
+ "model": "openai-community/gpt2",
11
+ "processor": "openai-community/gpt2",
12
  "device": "cpu",
13
  "device_ids": null,
14
  "seed": 42,
15
  "inter_op_num_threads": null,
16
  "intra_op_num_threads": null,
17
+ "model_kwargs": {},
18
+ "processor_kwargs": {},
19
+ "hub_kwargs": {},
 
 
 
20
  "no_weights": true,
21
  "device_map": null,
22
  "torch_dtype": null,
 
71
  "environment": {
72
  "cpu": " AMD EPYC 7763 64-Core Processor",
73
  "cpu_count": 4,
74
+ "cpu_ram_mb": 16757.342208,
75
  "system": "Linux",
76
  "machine": "x86_64",
77
  "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35",
78
  "processor": "x86_64",
79
  "python_version": "3.10.14",
80
  "optimum_benchmark_version": "0.2.0",
81
+ "optimum_benchmark_commit": "a8ccb91469272f9f38883fbf3e14bffcd5e95f2d",
82
  "transformers_version": "4.40.2",
83
  "transformers_commit": null,
84
  "accelerate_version": "0.30.0",
 
97
  "prefill": {
98
  "memory": {
99
  "unit": "MB",
100
+ "max_ram": 959.721472,
101
  "max_global_vram": null,
102
  "max_process_vram": null,
103
  "max_reserved": null,
 
105
  },
106
  "latency": {
107
  "unit": "s",
108
+ "count": 13,
109
+ "total": 0.6312671800000658,
110
+ "mean": 0.048559013846158905,
111
+ "stdev": 0.0023036918286531358,
112
+ "p50": 0.049563321999983145,
113
+ "p90": 0.05037565600000562,
114
+ "p95": 0.050465417600003094,
115
+ "p99": 0.05057309071999498,
116
  "values": [
117
+ 0.04530646500001012,
118
+ 0.04400738800001136,
119
+ 0.044212981999976364,
120
+ 0.04959109300000364,
121
+ 0.05037551999998868,
122
+ 0.0486010739999756,
123
+ 0.050375690000009854,
124
+ 0.05033551500002886,
125
+ 0.05002007500002037,
126
+ 0.049563321999983145,
127
+ 0.05060000899999295,
128
+ 0.0491694050000433,
129
+ 0.04910864200002152
 
130
  ]
131
  },
132
  "throughput": {
133
  "unit": "tokens/s",
134
+ "value": 41.186997873067455
135
  },
136
  "energy": {
137
  "unit": "kWh",
138
+ "cpu": 1.6731792577990781e-06,
139
+ "ram": 6.992546118709696e-08,
140
  "gpu": 0.0,
141
+ "total": 1.743104718986175e-06
142
  },
143
  "efficiency": {
144
  "unit": "tokens/kWh",
145
+ "value": 1147377.9964081794
146
  }
147
  },
148
  "decode": {
149
  "memory": {
150
  "unit": "MB",
151
+ "max_ram": 959.721472,
152
  "max_global_vram": null,
153
  "max_process_vram": null,
154
  "max_reserved": null,
 
156
  },
157
  "latency": {
158
  "unit": "s",
159
+ "count": 13,
160
+ "total": 0.39328847399991673,
161
+ "mean": 0.030252959538455134,
162
+ "stdev": 0.0013191504688200174,
163
+ "p50": 0.030537863999995807,
164
+ "p90": 0.03161084880000544,
165
+ "p95": 0.031753168599993845,
166
+ "p99": 0.03188260731998071,
167
  "values": [
168
+ 0.027211818000012045,
169
+ 0.028034694999973908,
170
+ 0.030499201999987235,
171
+ 0.030537863999995807,
172
+ 0.029951227000026392,
173
+ 0.03191496699997742,
174
+ 0.031080486999996992,
175
+ 0.02936419999997497,
176
+ 0.03006252500000528,
177
+ 0.03164530300000479,
178
+ 0.03067822499997419,
179
+ 0.03083492899997964,
180
+ 0.03147303200000806
 
181
  ]
182
  },
183
  "throughput": {
184
  "unit": "tokens/s",
185
+ "value": 33.05461730872579
186
  },
187
  "energy": {
188
  "unit": "kWh",
189
+ "cpu": 9.73473069844423e-07,
190
+ "ram": 4.0683460135085154e-08,
191
  "gpu": 0.0,
192
+ "total": 1.0141565299795071e-06
193
  },
194
  "efficiency": {
195
  "unit": "tokens/kWh",
196
+ "value": 986041.0798914905
197
  }
198
  },
199
  "per_token": {
200
  "memory": null,
201
  "latency": {
202
  "unit": "s",
203
+ "count": 12,
204
+ "total": 0.9520682520000037,
205
+ "mean": 0.0793390210000003,
206
+ "stdev": 0.0027901983800490988,
207
+ "p50": 0.08025918549998323,
208
+ "p90": 0.08127993490002723,
209
+ "p95": 0.08139592595002795,
210
+ "p99": 0.08150457559002461,
211
  "values": [
212
+ 0.07208035500002552,
213
+ 0.07455751499998087,
214
+ 0.08014541799997232,
215
+ 0.08037295299999414,
216
+ 0.0804896320000239,
217
+ 0.08153173800002378,
218
+ 0.0797304429999599,
219
+ 0.08004407900000388,
220
+ 0.08123608599998988,
221
+ 0.08128480700003138,
222
+ 0.08002632599999515,
223
+ 0.08056890000000294
 
224
  ]
225
  },
226
  "throughput": {
227
  "unit": "tokens/s",
228
+ "value": 12.604138384818187
229
  },
230
  "energy": null,
231
  "efficiency": null