IlyasMoutawwakil HF staff commited on
Commit
6ecce72
1 Parent(s): ffc304a

Upload cpu_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cpu_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cpu_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
@@ -86,7 +86,7 @@
86
  "processor": "x86_64",
87
  "python_version": "3.10.14",
88
  "optimum_benchmark_version": "0.4.0",
89
- "optimum_benchmark_commit": "505086556c6e125f92759cd19b806135534e5ab3",
90
  "transformers_version": "4.44.2",
91
  "transformers_commit": null,
92
  "accelerate_version": "0.34.0",
@@ -105,7 +105,7 @@
105
  "overall": {
106
  "memory": {
107
  "unit": "MB",
108
- "max_ram": 2850.398208,
109
  "max_global_vram": null,
110
  "max_process_vram": null,
111
  "max_reserved": null,
@@ -114,24 +114,24 @@
114
  "latency": {
115
  "unit": "s",
116
  "count": 5,
117
- "total": 3.2473782189999554,
118
- "mean": 0.6494756437999911,
119
- "stdev": 0.036318642236164315,
120
- "p50": 0.6334434879999833,
121
- "p90": 0.6896817113999987,
122
- "p95": 0.705295729200003,
123
- "p99": 0.7177869434400066,
124
  "values": [
125
- 0.7209097470000074,
126
- 0.6334434879999833,
127
- 0.6240291929999842,
128
- 0.626156132999995,
129
- 0.6428396579999855
130
  ]
131
  },
132
  "throughput": {
133
  "unit": "samples/s",
134
- "value": 15.397036202145163
135
  },
136
  "energy": null,
137
  "efficiency": null
@@ -139,7 +139,7 @@
139
  "warmup": {
140
  "memory": {
141
  "unit": "MB",
142
- "max_ram": 2850.398208,
143
  "max_global_vram": null,
144
  "max_process_vram": null,
145
  "max_reserved": null,
@@ -148,21 +148,21 @@
148
  "latency": {
149
  "unit": "s",
150
  "count": 2,
151
- "total": 1.3543532349999907,
152
- "mean": 0.6771766174999954,
153
- "stdev": 0.04373312950001207,
154
- "p50": 0.6771766174999954,
155
- "p90": 0.712163121100005,
156
- "p95": 0.7165364340500062,
157
- "p99": 0.7200350844100072,
158
  "values": [
159
- 0.7209097470000074,
160
- 0.6334434879999833
161
  ]
162
  },
163
  "throughput": {
164
  "unit": "samples/s",
165
- "value": 5.906878496140672
166
  },
167
  "energy": null,
168
  "efficiency": null
@@ -170,7 +170,7 @@
170
  "train": {
171
  "memory": {
172
  "unit": "MB",
173
- "max_ram": 2850.398208,
174
  "max_global_vram": null,
175
  "max_process_vram": null,
176
  "max_reserved": null,
@@ -179,22 +179,22 @@
179
  "latency": {
180
  "unit": "s",
181
  "count": 3,
182
- "total": 1.8930249839999647,
183
- "mean": 0.6310083279999882,
184
- "stdev": 0.008410954984127904,
185
- "p50": 0.626156132999995,
186
- "p90": 0.6395029529999874,
187
- "p95": 0.6411713054999865,
188
- "p99": 0.6425059874999858,
189
  "values": [
190
- 0.6240291929999842,
191
- 0.626156132999995,
192
- 0.6428396579999855
193
  ]
194
  },
195
  "throughput": {
196
  "unit": "samples/s",
197
- "value": 9.508590827980502
198
  },
199
  "energy": null,
200
  "efficiency": null
 
3
  "name": "cpu_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.4.1+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
 
86
  "processor": "x86_64",
87
  "python_version": "3.10.14",
88
  "optimum_benchmark_version": "0.4.0",
89
+ "optimum_benchmark_commit": "ea76e356b5c355783ee27d2d429a010ded791f8b",
90
  "transformers_version": "4.44.2",
91
  "transformers_commit": null,
92
  "accelerate_version": "0.34.0",
 
105
  "overall": {
106
  "memory": {
107
  "unit": "MB",
108
+ "max_ram": 2851.741696,
109
  "max_global_vram": null,
110
  "max_process_vram": null,
111
  "max_reserved": null,
 
114
  "latency": {
115
  "unit": "s",
116
  "count": 5,
117
+ "total": 3.266236042999992,
118
+ "mean": 0.6532472085999984,
119
+ "stdev": 0.03652047129713275,
120
+ "p50": 0.6332496910000032,
121
+ "p90": 0.6987992510000083,
122
+ "p95": 0.7062108920000071,
123
+ "p99": 0.7121402048000062,
124
  "values": [
125
+ 0.7136225330000059,
126
+ 0.6332496910000032,
127
+ 0.6264709259999961,
128
+ 0.616328564999975,
129
+ 0.676564328000012
130
  ]
131
  },
132
  "throughput": {
133
  "unit": "samples/s",
134
+ "value": 15.308140422722083
135
  },
136
  "energy": null,
137
  "efficiency": null
 
139
  "warmup": {
140
  "memory": {
141
  "unit": "MB",
142
+ "max_ram": 2851.741696,
143
  "max_global_vram": null,
144
  "max_process_vram": null,
145
  "max_reserved": null,
 
148
  "latency": {
149
  "unit": "s",
150
  "count": 2,
151
+ "total": 1.346872224000009,
152
+ "mean": 0.6734361120000045,
153
+ "stdev": 0.04018642100000136,
154
+ "p50": 0.6734361120000045,
155
+ "p90": 0.7055852488000056,
156
+ "p95": 0.7096038909000058,
157
+ "p99": 0.7128188045800059,
158
  "values": [
159
+ 0.7136225330000059,
160
+ 0.6332496910000032
161
  ]
162
  },
163
  "throughput": {
164
  "unit": "samples/s",
165
+ "value": 5.9396874161093
166
  },
167
  "energy": null,
168
  "efficiency": null
 
170
  "train": {
171
  "memory": {
172
  "unit": "MB",
173
+ "max_ram": 2851.741696,
174
  "max_global_vram": null,
175
  "max_process_vram": null,
176
  "max_reserved": null,
 
179
  "latency": {
180
  "unit": "s",
181
  "count": 3,
182
+ "total": 1.919363818999983,
183
+ "mean": 0.639787939666661,
184
+ "stdev": 0.026332412546742477,
185
+ "p50": 0.6264709259999961,
186
+ "p90": 0.6665456476000088,
187
+ "p95": 0.6715549878000104,
188
+ "p99": 0.6755624599600116,
189
  "values": [
190
+ 0.6264709259999961,
191
+ 0.616328564999975,
192
+ 0.676564328000012
193
  ]
194
  },
195
  "throughput": {
196
  "unit": "samples/s",
197
+ "value": 9.378107382152418
198
  },
199
  "energy": null,
200
  "efficiency": null