IlyasMoutawwakil HF staff commited on
Commit
5bafdd5
·
verified ·
1 Parent(s): 34d3fdd

Upload cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.6.0.dev20240917+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
@@ -110,7 +110,7 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1347.772416,
114
  "max_global_vram": 3566.731264,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 2910.846976,
@@ -119,24 +119,24 @@
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.5287372741699219,
123
- "mean": 0.10574745483398437,
124
- "stdev": 0.12210795080737968,
125
- "p50": 0.04447948837280274,
126
- "p90": 0.22824775390625004,
127
- "p95": 0.2891044830322265,
128
- "p99": 0.33778986633300784,
129
  "values": [
130
- 0.34996121215820314,
131
- 0.045677566528320314,
132
- 0.044303359985351565,
133
- 0.04431564712524414,
134
- 0.04447948837280274
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 94.56492372037943
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,7 +144,7 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1347.772416,
148
  "max_global_vram": 3566.731264,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 2910.846976,
@@ -153,21 +153,21 @@
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.3956387786865235,
157
- "mean": 0.19781938934326174,
158
- "stdev": 0.1521418228149414,
159
- "p50": 0.19781938934326174,
160
- "p90": 0.3195328475952149,
161
- "p95": 0.334747029876709,
162
- "p99": 0.3469183757019043,
163
  "values": [
164
- 0.34996121215820314,
165
- 0.045677566528320314
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 20.220464805192012
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,7 +175,7 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1347.772416,
179
  "max_global_vram": 3566.731264,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 2910.846976,
@@ -184,22 +184,22 @@
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.13309849548339844,
188
- "mean": 0.044366165161132814,
189
- "stdev": 8.028846400770407e-05,
190
- "p50": 0.04431564712524414,
191
- "p90": 0.04444672012329102,
192
- "p95": 0.04446310424804688,
193
- "p99": 0.044476211547851564,
194
  "values": [
195
- 0.044303359985351565,
196
- 0.04431564712524414,
197
- 0.04447948837280274
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 135.23819284828178
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.4.1+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1339.629568,
114
  "max_global_vram": 3566.731264,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 2910.846976,
 
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.5259868202209473,
123
+ "mean": 0.10519736404418947,
124
+ "stdev": 0.1223932053408925,
125
+ "p50": 0.044030975341796875,
126
+ "p90": 0.22784287109375004,
127
+ "p95": 0.2889127990722656,
128
+ "p99": 0.3377687414550781,
129
  "values": [
130
+ 0.34998272705078126,
131
+ 0.044633087158203126,
132
+ 0.044030975341796875,
133
+ 0.04377395248413086,
134
+ 0.043566078186035154
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 95.05941608764431
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1339.629568,
148
  "max_global_vram": 3566.731264,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 2910.846976,
 
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.3946158142089844,
157
+ "mean": 0.1973079071044922,
158
+ "stdev": 0.15267481994628906,
159
+ "p50": 0.1973079071044922,
160
+ "p90": 0.31944776306152345,
161
+ "p95": 0.33471524505615236,
162
+ "p99": 0.3469292306518555,
163
  "values": [
164
+ 0.34998272705078126,
165
+ 0.044633087158203126
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 20.272882413585393
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1339.629568,
179
  "max_global_vram": 3566.731264,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 2910.846976,
 
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.1313710060119629,
188
+ "mean": 0.04379033533732096,
189
+ "stdev": 0.00019014667981643546,
190
+ "p50": 0.04377395248413086,
191
+ "p90": 0.043979570770263675,
192
+ "p95": 0.044005273056030275,
193
+ "p99": 0.04402583488464355,
194
  "values": [
195
+ 0.044030975341796875,
196
+ 0.04377395248413086,
197
+ 0.043566078186035154
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 137.01653467098276
203
  },
204
  "energy": null,
205
  "efficiency": null