IlyasMoutawwakil HF staff commited on
Commit
195fc62
·
verified ·
1 Parent(s): ab2752f

Upload cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.0+cu124",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
@@ -110,7 +110,7 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1299.554304,
114
  "max_global_vram": 3379.03616,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 2730.491904,
@@ -119,24 +119,24 @@
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.5483161697387695,
123
- "mean": 0.1096632339477539,
124
- "stdev": 0.11634699812622813,
125
- "p50": 0.051469310760498044,
126
- "p90": 0.2265274429321289,
127
- "p95": 0.2844401741027831,
128
- "p99": 0.3307703590393066,
129
  "values": [
130
- 0.3423529052734375,
131
- 0.051469310760498044,
132
- 0.052789249420166016,
133
- 0.050936832427978515,
134
- 0.050767871856689455
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 91.18826465362339
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,7 +144,7 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1299.554304,
148
  "max_global_vram": 3379.03616,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 2730.491904,
@@ -153,21 +153,21 @@
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.3938222160339355,
157
- "mean": 0.19691110801696776,
158
- "stdev": 0.14544179725646972,
159
- "p50": 0.19691110801696776,
160
- "p90": 0.3132645458221435,
161
- "p95": 0.3278087255477905,
162
- "p99": 0.33944406932830806,
163
  "values": [
164
- 0.3423529052734375,
165
- 0.051469310760498044
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 20.313734660694315
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,7 +175,7 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1299.554304,
179
  "max_global_vram": 3379.03616,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 2730.491904,
@@ -184,22 +184,22 @@
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.154493953704834,
188
- "mean": 0.051497984568278,
189
- "stdev": 0.0009156639146738015,
190
- "p50": 0.050936832427978515,
191
- "p90": 0.05241876602172851,
192
- "p95": 0.05260400772094727,
193
- "p99": 0.05275220108032227,
194
  "values": [
195
- 0.052789249420166016,
196
- 0.050936832427978515,
197
- 0.050767871856689455
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 116.50941391783925
203
  },
204
  "energy": null,
205
  "efficiency": null
 
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.4.0+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
 
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
+ "max_ram": 1167.527936,
114
  "max_global_vram": 3379.03616,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 2730.491904,
 
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
+ "total": 0.8808540458679199,
123
+ "mean": 0.176170809173584,
124
+ "stdev": 0.2524065846847361,
125
+ "p50": 0.05005311965942383,
126
+ "p90": 0.4287940780639649,
127
+ "p95": 0.5548888298034667,
128
+ "p99": 0.6557646311950683,
129
  "values": [
130
+ 0.6809835815429688,
131
+ 0.05050982284545898,
132
+ 0.049616897583007816,
133
+ 0.04969062423706055,
134
+ 0.05005311965942383
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
+ "value": 56.76309285806161
140
  },
141
  "energy": null,
142
  "efficiency": null
 
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
+ "max_ram": 1167.527936,
148
  "max_global_vram": 3379.03616,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 2730.491904,
 
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
+ "total": 0.7314934043884278,
157
+ "mean": 0.3657467021942139,
158
+ "stdev": 0.3152368793487549,
159
+ "p50": 0.3657467021942139,
160
+ "p90": 0.6179362056732178,
161
+ "p95": 0.6494598936080933,
162
+ "p99": 0.6746788439559936,
163
  "values": [
164
+ 0.6809835815429688,
165
+ 0.05050982284545898
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
+ "value": 10.93653059891699
171
  },
172
  "energy": null,
173
  "efficiency": null
 
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
+ "max_ram": 1167.527936,
179
  "max_global_vram": 3379.03616,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 2730.491904,
 
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
+ "total": 0.14936064147949218,
188
+ "mean": 0.049786880493164064,
189
+ "stdev": 0.0001906504220075295,
190
+ "p50": 0.04969062423706055,
191
+ "p90": 0.04998062057495117,
192
+ "p95": 0.0500168701171875,
193
+ "p99": 0.050045869750976565,
194
  "values": [
195
+ 0.049616897583007816,
196
+ 0.04969062423706055,
197
+ 0.05005311965942383
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
+ "value": 120.5136763052231
203
  },
204
  "energy": null,
205
  "efficiency": null