IlyasMoutawwakil HF staff commited on
Commit
e3d6ef3
·
verified ·
1 Parent(s): f1a03aa

Upload cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json with huggingface_hub

Browse files
cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json CHANGED
@@ -5,11 +5,11 @@
5
  "name": "pytorch",
6
  "version": "2.5.1+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
 
 
8
  "task": "text-generation",
9
  "library": "transformers",
10
  "model_type": "llama",
11
- "model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
12
- "processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
13
  "device": "cpu",
14
  "device_ids": null,
15
  "seed": 42,
@@ -86,10 +86,10 @@
86
  "processor": "x86_64",
87
  "python_version": "3.10.15",
88
  "optimum_benchmark_version": "0.5.0.dev0",
89
- "optimum_benchmark_commit": "f12f642abaf558e58516daaa40124ad343ac2723",
90
  "transformers_version": "4.47.0",
91
  "transformers_commit": null,
92
- "accelerate_version": "1.2.0",
93
  "accelerate_commit": null,
94
  "diffusers_version": "0.31.0",
95
  "diffusers_commit": null,
@@ -107,7 +107,7 @@
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
- "max_ram": 523.026432,
111
  "max_global_vram": null,
112
  "max_process_vram": null,
113
  "max_reserved": null,
@@ -116,42 +116,42 @@
116
  "latency": {
117
  "unit": "s",
118
  "values": [
119
- 0.01909628799998586,
120
- 0.0123986529999911,
121
- 0.012429721000017935,
122
- 0.01155504100000826,
123
- 0.011968631000002006
124
  ],
125
  "count": 5,
126
- "total": 0.06744833400000516,
127
- "mean": 0.013489666800001032,
128
- "p50": 0.0123986529999911,
129
- "p90": 0.016429661199998693,
130
- "p95": 0.017762974599992275,
131
- "p99": 0.018829625319987144,
132
- "stdev": 0.002821500465415861,
133
- "stdev_": 20.91601302869575
134
  },
135
  "throughput": {
136
  "unit": "samples/s",
137
- "value": 741.3081544756343
138
  },
139
  "energy": {
140
  "unit": "kWh",
141
- "cpu": 8.57111153888926e-06,
142
- "ram": 3.579594120329122e-07,
143
  "gpu": 0,
144
- "total": 8.929070950922172e-06
145
  },
146
  "efficiency": {
147
  "unit": "samples/kWh",
148
- "value": 1119937.3434217393
149
  }
150
  },
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
- "max_ram": 523.026432,
155
  "max_global_vram": null,
156
  "max_process_vram": null,
157
  "max_reserved": null,
@@ -160,22 +160,22 @@
160
  "latency": {
161
  "unit": "s",
162
  "values": [
163
- 0.01909628799998586,
164
- 0.0123986529999911
165
  ],
166
  "count": 2,
167
- "total": 0.03149494099997696,
168
- "mean": 0.01574747049998848,
169
- "p50": 0.01574747049998848,
170
- "p90": 0.018426524499986385,
171
- "p95": 0.018761406249986123,
172
- "p99": 0.019029311649985912,
173
- "stdev": 0.0033488174999973808,
174
- "stdev_": 21.26574868008059
175
  },
176
  "throughput": {
177
  "unit": "samples/s",
178
- "value": 254.00904862802733
179
  },
180
  "energy": null,
181
  "efficiency": null
@@ -183,7 +183,7 @@
183
  "train": {
184
  "memory": {
185
  "unit": "MB",
186
- "max_ram": 523.026432,
187
  "max_global_vram": null,
188
  "max_process_vram": null,
189
  "max_reserved": null,
@@ -192,23 +192,23 @@
192
  "latency": {
193
  "unit": "s",
194
  "values": [
195
- 0.012429721000017935,
196
- 0.01155504100000826,
197
- 0.011968631000002006
198
  ],
199
  "count": 3,
200
- "total": 0.0359533930000282,
201
- "mean": 0.011984464333342734,
202
- "p50": 0.011968631000002006,
203
- "p90": 0.012337503000014748,
204
- "p95": 0.012383612000016342,
205
- "p99": 0.012420499200017617,
206
- "stdev": 0.00035726208534514307,
207
- "stdev_": 2.981043419280591
208
  },
209
  "throughput": {
210
  "unit": "samples/s",
211
- "value": 500.64815857535007
212
  },
213
  "energy": null,
214
  "efficiency": null
 
5
  "name": "pytorch",
6
  "version": "2.5.1+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
+ "model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
9
+ "processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
10
  "task": "text-generation",
11
  "library": "transformers",
12
  "model_type": "llama",
 
 
13
  "device": "cpu",
14
  "device_ids": null,
15
  "seed": 42,
 
86
  "processor": "x86_64",
87
  "python_version": "3.10.15",
88
  "optimum_benchmark_version": "0.5.0.dev0",
89
+ "optimum_benchmark_commit": "7cec62e016d76fe612308e4c2c074fc7f09289fd",
90
  "transformers_version": "4.47.0",
91
  "transformers_commit": null,
92
+ "accelerate_version": "1.2.1",
93
  "accelerate_commit": null,
94
  "diffusers_version": "0.31.0",
95
  "diffusers_commit": null,
 
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
+ "max_ram": 522.866688,
111
  "max_global_vram": null,
112
  "max_process_vram": null,
113
  "max_reserved": null,
 
116
  "latency": {
117
  "unit": "s",
118
  "values": [
119
+ 0.019453390999984776,
120
+ 0.012075559000010117,
121
+ 0.011965802999981179,
122
+ 0.011934053999993921,
123
+ 0.011987785000002305
124
  ],
125
  "count": 5,
126
+ "total": 0.0674165919999723,
127
+ "mean": 0.013483318399994459,
128
+ "p50": 0.011987785000002305,
129
+ "p90": 0.016502258199994912,
130
+ "p95": 0.017977824599989842,
131
+ "p99": 0.01915827771998579,
132
+ "stdev": 0.0029854060594256856,
133
+ "stdev_": 22.141478609797662
134
  },
135
  "throughput": {
136
  "unit": "samples/s",
137
+ "value": 741.6571872992415
138
  },
139
  "energy": {
140
  "unit": "kWh",
141
+ "cpu": 8.703254777777798e-06,
142
+ "ram": 3.635053251718738e-07,
143
  "gpu": 0,
144
+ "total": 9.066760102949672e-06
145
  },
146
  "efficiency": {
147
  "unit": "samples/kWh",
148
+ "value": 1102929.8102578802
149
  }
150
  },
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
+ "max_ram": 522.866688,
155
  "max_global_vram": null,
156
  "max_process_vram": null,
157
  "max_reserved": null,
 
160
  "latency": {
161
  "unit": "s",
162
  "values": [
163
+ 0.019453390999984776,
164
+ 0.012075559000010117
165
  ],
166
  "count": 2,
167
+ "total": 0.03152894999999489,
168
+ "mean": 0.015764474999997447,
169
+ "p50": 0.015764474999997447,
170
+ "p90": 0.01871560779998731,
171
+ "p95": 0.019084499399986042,
172
+ "p99": 0.01937961267998503,
173
+ "stdev": 0.00368891599998733,
174
+ "stdev_": 23.400183006334988
175
  },
176
  "throughput": {
177
  "unit": "samples/s",
178
+ "value": 253.73505936611576
179
  },
180
  "energy": null,
181
  "efficiency": null
 
183
  "train": {
184
  "memory": {
185
  "unit": "MB",
186
+ "max_ram": 522.866688,
187
  "max_global_vram": null,
188
  "max_process_vram": null,
189
  "max_reserved": null,
 
192
  "latency": {
193
  "unit": "s",
194
  "values": [
195
+ 0.011965802999981179,
196
+ 0.011934053999993921,
197
+ 0.011987785000002305
198
  ],
199
  "count": 3,
200
+ "total": 0.035887641999977404,
201
+ "mean": 0.011962547333325801,
202
+ "p50": 0.011965802999981179,
203
+ "p90": 0.01198338859999808,
204
+ "p95": 0.011985586800000192,
205
+ "p99": 0.011987345360001882,
206
+ "stdev": 2.2056059099537384e-05,
207
+ "stdev_": 0.184375940048259
208
  },
209
  "throughput": {
210
  "unit": "samples/s",
211
+ "value": 501.56541352065796
212
  },
213
  "energy": null,
214
  "efficiency": null