IlyasMoutawwakil HF staff commited on
Commit
0073deb
·
verified ·
1 Parent(s): 33a4469

Upload cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.3.0+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
@@ -76,10 +76,10 @@
76
  "environment": {
77
  "cpu": " AMD EPYC 7R32",
78
  "cpu_count": 16,
79
- "cpu_ram_mb": 66697.29792,
80
  "system": "Linux",
81
  "machine": "x86_64",
82
- "platform": "Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35",
83
  "processor": "x86_64",
84
  "python_version": "3.10.12",
85
  "gpu": [
@@ -89,15 +89,15 @@
89
  "gpu_vram_mb": 24146608128,
90
  "optimum_benchmark_version": "0.2.1",
91
  "optimum_benchmark_commit": null,
92
- "transformers_version": "4.41.1",
93
  "transformers_commit": null,
94
- "accelerate_version": "0.30.1",
95
  "accelerate_commit": null,
96
- "diffusers_version": "0.27.2",
97
  "diffusers_commit": null,
98
  "optimum_version": null,
99
  "optimum_commit": null,
100
- "timm_version": "1.0.3",
101
  "timm_commit": null,
102
  "peft_version": null,
103
  "peft_commit": null
@@ -107,8 +107,8 @@
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
- "max_ram": 1106.747392,
111
- "max_global_vram": 3376.939008,
112
  "max_process_vram": 0.0,
113
  "max_reserved": 2730.491904,
114
  "max_allocated": 2516.23424
@@ -116,24 +116,24 @@
116
  "latency": {
117
  "unit": "s",
118
  "count": 5,
119
- "total": 1.1056629943847656,
120
- "mean": 0.2211325988769531,
121
- "stdev": 0.28761936387226733,
122
- "p50": 0.07699967956542969,
123
- "p90": 0.5092378723144532,
124
- "p95": 0.6528039031982421,
125
- "p99": 0.7676567279052734,
126
  "values": [
127
- 0.7963699340820313,
128
- 0.07697510528564454,
129
- 0.07853977966308594,
130
- 0.07699967956542969,
131
- 0.07677849578857422
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "samples/s",
136
- "value": 45.22173596650214
137
  },
138
  "energy": null,
139
  "efficiency": null
@@ -141,8 +141,8 @@
141
  "warmup": {
142
  "memory": {
143
  "unit": "MB",
144
- "max_ram": 1106.747392,
145
- "max_global_vram": 3376.939008,
146
  "max_process_vram": 0.0,
147
  "max_reserved": 2730.491904,
148
  "max_allocated": 2516.23424
@@ -150,21 +150,21 @@
150
  "latency": {
151
  "unit": "s",
152
  "count": 2,
153
- "total": 0.8733450393676758,
154
- "mean": 0.4366725196838379,
155
- "stdev": 0.35969741439819336,
156
- "p50": 0.4366725196838379,
157
- "p90": 0.7244304512023926,
158
- "p95": 0.7604001926422119,
159
- "p99": 0.7891759857940674,
160
  "values": [
161
- 0.7963699340820313,
162
- 0.07697510528564454
163
  ]
164
  },
165
  "throughput": {
166
  "unit": "samples/s",
167
- "value": 9.160182561742385
168
  },
169
  "energy": null,
170
  "efficiency": null
@@ -172,8 +172,8 @@
172
  "train": {
173
  "memory": {
174
  "unit": "MB",
175
- "max_ram": 1106.747392,
176
- "max_global_vram": 3376.939008,
177
  "max_process_vram": 0.0,
178
  "max_reserved": 2730.491904,
179
  "max_allocated": 2516.23424
@@ -181,22 +181,22 @@
181
  "latency": {
182
  "unit": "s",
183
  "count": 3,
184
- "total": 0.23231795501708985,
185
- "mean": 0.07743931833902995,
186
- "stdev": 0.0007833653511584085,
187
- "p50": 0.07699967956542969,
188
- "p90": 0.0782317596435547,
189
- "p95": 0.07838576965332031,
190
- "p99": 0.07850897766113281,
191
  "values": [
192
- 0.07853977966308594,
193
- 0.07699967956542969,
194
- 0.07677849578857422
195
  ]
196
  },
197
  "throughput": {
198
  "unit": "samples/s",
199
- "value": 77.48002085622645
200
  },
201
  "energy": null,
202
  "efficiency": null
 
3
  "name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.3.1+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
 
76
  "environment": {
77
  "cpu": " AMD EPYC 7R32",
78
  "cpu_count": 16,
79
+ "cpu_ram_mb": 66697.293824,
80
  "system": "Linux",
81
  "machine": "x86_64",
82
+ "platform": "Linux-5.10.219-208.866.amzn2.x86_64-x86_64-with-glibc2.35",
83
  "processor": "x86_64",
84
  "python_version": "3.10.12",
85
  "gpu": [
 
89
  "gpu_vram_mb": 24146608128,
90
  "optimum_benchmark_version": "0.2.1",
91
  "optimum_benchmark_commit": null,
92
+ "transformers_version": "4.42.3",
93
  "transformers_commit": null,
94
+ "accelerate_version": "0.31.0",
95
  "accelerate_commit": null,
96
+ "diffusers_version": "0.29.2",
97
  "diffusers_commit": null,
98
  "optimum_version": null,
99
  "optimum_commit": null,
100
+ "timm_version": "1.0.7",
101
  "timm_commit": null,
102
  "peft_version": null,
103
  "peft_commit": null
 
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
+ "max_ram": 1734.729728,
111
+ "max_global_vram": 3379.03616,
112
  "max_process_vram": 0.0,
113
  "max_reserved": 2730.491904,
114
  "max_allocated": 2516.23424
 
116
  "latency": {
117
  "unit": "s",
118
  "count": 5,
119
+ "total": 0.9213266143798828,
120
+ "mean": 0.18426532287597658,
121
+ "stdev": 0.2688018879717385,
122
+ "p50": 0.05003059387207031,
123
+ "p90": 0.4534712448120118,
124
+ "p95": 0.5876695213317871,
125
+ "p99": 0.6950281425476075,
126
  "values": [
127
+ 0.7218677978515625,
128
+ 0.05087641525268555,
129
+ 0.05003059387207031,
130
+ 0.049320960998535154,
131
+ 0.0492308464050293
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "samples/s",
136
+ "value": 54.269570877048295
137
  },
138
  "energy": null,
139
  "efficiency": null
 
141
  "warmup": {
142
  "memory": {
143
  "unit": "MB",
144
+ "max_ram": 1734.729728,
145
+ "max_global_vram": 3379.03616,
146
  "max_process_vram": 0.0,
147
  "max_reserved": 2730.491904,
148
  "max_allocated": 2516.23424
 
150
  "latency": {
151
  "unit": "s",
152
  "count": 2,
153
+ "total": 0.7727442131042481,
154
+ "mean": 0.38637210655212406,
155
+ "stdev": 0.33549569129943846,
156
+ "p50": 0.38637210655212406,
157
+ "p90": 0.6547686595916749,
158
+ "p95": 0.6883182287216186,
159
+ "p99": 0.7151578840255738,
160
  "values": [
161
+ 0.7218677978515625,
162
+ 0.05087641525268555
163
  ]
164
  },
165
  "throughput": {
166
  "unit": "samples/s",
167
+ "value": 10.35271421556508
168
  },
169
  "energy": null,
170
  "efficiency": null
 
172
  "train": {
173
  "memory": {
174
  "unit": "MB",
175
+ "max_ram": 1734.729728,
176
+ "max_global_vram": 3379.03616,
177
  "max_process_vram": 0.0,
178
  "max_reserved": 2730.491904,
179
  "max_allocated": 2516.23424
 
181
  "latency": {
182
  "unit": "s",
183
  "count": 3,
184
+ "total": 0.14858240127563477,
185
+ "mean": 0.049527467091878254,
186
+ "stdev": 0.00035766145789978623,
187
+ "p50": 0.049320960998535154,
188
+ "p90": 0.04988866729736328,
189
+ "p95": 0.049959630584716794,
190
+ "p99": 0.050016401214599604,
191
  "values": [
192
+ 0.05003059387207031,
193
+ 0.049320960998535154,
194
+ 0.0492308464050293
195
  ]
196
  },
197
  "throughput": {
198
  "unit": "samples/s",
199
+ "value": 121.1448990288443
200
  },
201
  "energy": null,
202
  "efficiency": null