IlyasMoutawwakil HF staff commited on
Commit
54aa4cc
·
verified ·
1 Parent(s): a8b51f6

Upload cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json CHANGED
@@ -7,6 +7,7 @@
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
 
10
  "model": "FacebookAI/roberta-base",
11
  "processor": "FacebookAI/roberta-base",
12
  "device": "cuda",
@@ -79,7 +80,7 @@
79
  "environment": {
80
  "cpu": " AMD EPYC 7R32",
81
  "cpu_count": 16,
82
- "cpu_ram_mb": 66697.293824,
83
  "system": "Linux",
84
  "machine": "x86_64",
85
  "platform": "Linux-5.10.219-208.866.amzn2.x86_64-x86_64-with-glibc2.35",
@@ -110,7 +111,7 @@
110
  "overall": {
111
  "memory": {
112
  "unit": "MB",
113
- "max_ram": 1112.748032,
114
  "max_global_vram": 3379.03616,
115
  "max_process_vram": 0.0,
116
  "max_reserved": 2730.491904,
@@ -119,24 +120,24 @@
119
  "latency": {
120
  "unit": "s",
121
  "count": 5,
122
- "total": 0.867106746673584,
123
- "mean": 0.1734213493347168,
124
- "stdev": 0.24745303630454504,
125
- "p50": 0.049791999816894535,
126
- "p90": 0.42113490905761725,
127
- "p95": 0.5447308761596679,
128
- "p99": 0.6436076498413086,
129
  "values": [
130
- 0.6683268432617188,
131
- 0.05034700775146484,
132
- 0.04929945755004883,
133
- 0.049791999816894535,
134
- 0.04934143829345703
135
  ]
136
  },
137
  "throughput": {
138
  "unit": "samples/s",
139
- "value": 57.663027293711195
140
  },
141
  "energy": null,
142
  "efficiency": null
@@ -144,7 +145,7 @@
144
  "warmup": {
145
  "memory": {
146
  "unit": "MB",
147
- "max_ram": 1112.748032,
148
  "max_global_vram": 3379.03616,
149
  "max_process_vram": 0.0,
150
  "max_reserved": 2730.491904,
@@ -153,21 +154,21 @@
153
  "latency": {
154
  "unit": "s",
155
  "count": 2,
156
- "total": 0.7186738510131836,
157
- "mean": 0.3593369255065918,
158
- "stdev": 0.30898991775512696,
159
- "p50": 0.3593369255065918,
160
- "p90": 0.6065288597106934,
161
- "p95": 0.6374278514862061,
162
- "p99": 0.6621470449066162,
163
  "values": [
164
- 0.6683268432617188,
165
- 0.05034700775146484
166
  ]
167
  },
168
  "throughput": {
169
  "unit": "samples/s",
170
- "value": 11.131614137235731
171
  },
172
  "energy": null,
173
  "efficiency": null
@@ -175,7 +176,7 @@
175
  "train": {
176
  "memory": {
177
  "unit": "MB",
178
- "max_ram": 1112.748032,
179
  "max_global_vram": 3379.03616,
180
  "max_process_vram": 0.0,
181
  "max_reserved": 2730.491904,
@@ -184,22 +185,22 @@
184
  "latency": {
185
  "unit": "s",
186
  "count": 3,
187
- "total": 0.14843289566040038,
188
- "mean": 0.04947763188680013,
189
- "stdev": 0.00022295140323720124,
190
- "p50": 0.04934143829345703,
191
- "p90": 0.049701887512207034,
192
- "p95": 0.049746943664550784,
193
- "p99": 0.04978298858642578,
194
  "values": [
195
- 0.04929945755004883,
196
- 0.049791999816894535,
197
- 0.04934143829345703
198
  ]
199
  },
200
  "throughput": {
201
  "unit": "samples/s",
202
- "value": 121.26691943800786
203
  },
204
  "energy": null,
205
  "efficiency": null
 
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
10
+ "model_type": "roberta",
11
  "model": "FacebookAI/roberta-base",
12
  "processor": "FacebookAI/roberta-base",
13
  "device": "cuda",
 
80
  "environment": {
81
  "cpu": " AMD EPYC 7R32",
82
  "cpu_count": 16,
83
+ "cpu_ram_mb": 66697.285632,
84
  "system": "Linux",
85
  "machine": "x86_64",
86
  "platform": "Linux-5.10.219-208.866.amzn2.x86_64-x86_64-with-glibc2.35",
 
111
  "overall": {
112
  "memory": {
113
  "unit": "MB",
114
+ "max_ram": 1108.80768,
115
  "max_global_vram": 3379.03616,
116
  "max_process_vram": 0.0,
117
  "max_reserved": 2730.491904,
 
120
  "latency": {
121
  "unit": "s",
122
  "count": 5,
123
+ "total": 0.9124310836791991,
124
+ "mean": 0.1824862167358398,
125
+ "stdev": 0.2660169611473305,
126
+ "p50": 0.04961075210571289,
127
+ "p90": 0.4487722885131836,
128
+ "p95": 0.5816459098815917,
129
+ "p99": 0.6879448069763183,
130
  "values": [
131
+ 0.71451953125,
132
+ 0.050151424407958986,
133
+ 0.04899737548828125,
134
+ 0.04961075210571289,
135
+ 0.049152000427246094
136
  ]
137
  },
138
  "throughput": {
139
  "unit": "samples/s",
140
+ "value": 54.79865920216662
141
  },
142
  "energy": null,
143
  "efficiency": null
 
145
  "warmup": {
146
  "memory": {
147
  "unit": "MB",
148
+ "max_ram": 1108.80768,
149
  "max_global_vram": 3379.03616,
150
  "max_process_vram": 0.0,
151
  "max_reserved": 2730.491904,
 
154
  "latency": {
155
  "unit": "s",
156
  "count": 2,
157
+ "total": 0.764670955657959,
158
+ "mean": 0.3823354778289795,
159
+ "stdev": 0.3321840534210205,
160
+ "p50": 0.3823354778289795,
161
+ "p90": 0.648082720565796,
162
+ "p95": 0.6813011259078979,
163
+ "p99": 0.7078758501815796,
164
  "values": [
165
+ 0.71451953125,
166
+ 0.050151424407958986
167
  ]
168
  },
169
  "throughput": {
170
  "unit": "samples/s",
171
+ "value": 10.462016297083524
172
  },
173
  "energy": null,
174
  "efficiency": null
 
176
  "train": {
177
  "memory": {
178
  "unit": "MB",
179
+ "max_ram": 1108.80768,
180
  "max_global_vram": 3379.03616,
181
  "max_process_vram": 0.0,
182
  "max_reserved": 2730.491904,
 
185
  "latency": {
186
  "unit": "s",
187
  "count": 3,
188
+ "total": 0.14776012802124025,
189
+ "mean": 0.04925337600708008,
190
+ "stdev": 0.00026046813604607813,
191
+ "p50": 0.049152000427246094,
192
+ "p90": 0.049519001770019534,
193
+ "p95": 0.04956487693786621,
194
+ "p99": 0.049601577072143556,
195
  "values": [
196
+ 0.04899737548828125,
197
+ 0.04961075210571289,
198
+ 0.049152000427246094
199
  ]
200
  },
201
  "throughput": {
202
  "unit": "samples/s",
203
+ "value": 121.81906066982111
204
  },
205
  "energy": null,
206
  "efficiency": null