IlyasMoutawwakil HF staff commited on
Commit
9fbb833
·
verified ·
1 Parent(s): 742a96d

Upload cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cuda_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -6,19 +6,17 @@
6
  "version": "2.3.0+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
- "model": "openai-community/gpt2",
10
  "library": "transformers",
 
 
11
  "device": "cuda",
12
  "device_ids": "0",
13
  "seed": 42,
14
  "inter_op_num_threads": null,
15
  "intra_op_num_threads": null,
16
- "hub_kwargs": {
17
- "revision": "main",
18
- "force_download": false,
19
- "local_files_only": false,
20
- "trust_remote_code": false
21
- },
22
  "no_weights": true,
23
  "device_map": null,
24
  "torch_dtype": null,
@@ -107,7 +105,7 @@
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
- "max_ram": 1126.240256,
111
  "max_global_vram": 3563.585536,
112
  "max_process_vram": 0.0,
113
  "max_reserved": 2915.04128,
@@ -116,24 +114,24 @@
116
  "latency": {
117
  "unit": "s",
118
  "count": 5,
119
- "total": 0.8612689819335938,
120
- "mean": 0.17225379638671875,
121
- "stdev": 0.24553052044267784,
122
- "p50": 0.0493568000793457,
123
- "p90": 0.4180201599121095,
124
- "p95": 0.5406673065185547,
125
- "p99": 0.6387850238037109,
126
  "values": [
127
- 0.663314453125,
128
- 0.050078720092773435,
129
- 0.04928204727172852,
130
- 0.0492369613647461,
131
- 0.0493568000793457
132
  ]
133
  },
134
  "throughput": {
135
  "unit": "samples/s",
136
- "value": 58.05387288852246
137
  },
138
  "energy": null,
139
  "efficiency": null
@@ -141,7 +139,7 @@
141
  "warmup": {
142
  "memory": {
143
  "unit": "MB",
144
- "max_ram": 1126.240256,
145
  "max_global_vram": 3563.585536,
146
  "max_process_vram": 0.0,
147
  "max_reserved": 2915.04128,
@@ -150,21 +148,21 @@
150
  "latency": {
151
  "unit": "s",
152
  "count": 2,
153
- "total": 0.7133931732177735,
154
- "mean": 0.35669658660888676,
155
- "stdev": 0.30661786651611334,
156
- "p50": 0.35669658660888676,
157
- "p90": 0.6019908798217773,
158
- "p95": 0.6326526664733887,
159
- "p99": 0.6571820957946778,
160
  "values": [
161
- 0.663314453125,
162
- 0.050078720092773435
163
  ]
164
  },
165
  "throughput": {
166
  "unit": "samples/s",
167
- "value": 11.214012553436483
168
  },
169
  "energy": null,
170
  "efficiency": null
@@ -172,7 +170,7 @@
172
  "train": {
173
  "memory": {
174
  "unit": "MB",
175
- "max_ram": 1126.240256,
176
  "max_global_vram": 3563.585536,
177
  "max_process_vram": 0.0,
178
  "max_reserved": 2915.04128,
@@ -181,22 +179,22 @@
181
  "latency": {
182
  "unit": "s",
183
  "count": 3,
184
- "total": 0.14787580871582032,
185
- "mean": 0.049291936238606775,
186
- "stdev": 4.942113669717215e-05,
187
- "p50": 0.04928204727172852,
188
- "p90": 0.049341849517822264,
189
- "p95": 0.049349324798583984,
190
- "p99": 0.04935530502319336,
191
  "values": [
192
- 0.04928204727172852,
193
- 0.0492369613647461,
194
- 0.0493568000793457
195
  ]
196
  },
197
  "throughput": {
198
  "unit": "samples/s",
199
- "value": 121.72376371980775
200
  },
201
  "energy": null,
202
  "efficiency": null
 
6
  "version": "2.3.0+cu121",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
 
9
  "library": "transformers",
10
+ "model": "openai-community/gpt2",
11
+ "processor": "openai-community/gpt2",
12
  "device": "cuda",
13
  "device_ids": "0",
14
  "seed": 42,
15
  "inter_op_num_threads": null,
16
  "intra_op_num_threads": null,
17
+ "model_kwargs": {},
18
+ "processor_kwargs": {},
19
+ "hub_kwargs": {},
 
 
 
20
  "no_weights": true,
21
  "device_map": null,
22
  "torch_dtype": null,
 
105
  "overall": {
106
  "memory": {
107
  "unit": "MB",
108
+ "max_ram": 1126.281216,
109
  "max_global_vram": 3563.585536,
110
  "max_process_vram": 0.0,
111
  "max_reserved": 2915.04128,
 
114
  "latency": {
115
  "unit": "s",
116
  "count": 5,
117
+ "total": 0.9414779014587402,
118
+ "mean": 0.18829558029174803,
119
+ "stdev": 0.27529863191806336,
120
+ "p50": 0.05070336151123047,
121
+ "p90": 0.4639477813720704,
122
+ "p95": 0.6014197830200194,
123
+ "p99": 0.711397384338379,
124
  "values": [
125
+ 0.7388917846679688,
126
+ 0.05070336151123047,
127
+ 0.05153177642822265,
128
+ 0.05051289749145508,
129
+ 0.049838081359863284
130
  ]
131
  },
132
  "throughput": {
133
  "unit": "samples/s",
134
+ "value": 53.10799108776663
135
  },
136
  "energy": null,
137
  "efficiency": null
 
139
  "warmup": {
140
  "memory": {
141
  "unit": "MB",
142
+ "max_ram": 1126.281216,
143
  "max_global_vram": 3563.585536,
144
  "max_process_vram": 0.0,
145
  "max_reserved": 2915.04128,
 
148
  "latency": {
149
  "unit": "s",
150
  "count": 2,
151
+ "total": 0.7895951461791992,
152
+ "mean": 0.3947975730895996,
153
+ "stdev": 0.34409421157836917,
154
+ "p50": 0.3947975730895996,
155
+ "p90": 0.670072942352295,
156
+ "p95": 0.7044823635101318,
157
+ "p99": 0.7320099004364014,
158
  "values": [
159
+ 0.7388917846679688,
160
+ 0.05070336151123047
161
  ]
162
  },
163
  "throughput": {
164
  "unit": "samples/s",
165
+ "value": 10.131774541309545
166
  },
167
  "energy": null,
168
  "efficiency": null
 
170
  "train": {
171
  "memory": {
172
  "unit": "MB",
173
+ "max_ram": 1126.281216,
174
  "max_global_vram": 3563.585536,
175
  "max_process_vram": 0.0,
176
  "max_reserved": 2915.04128,
 
179
  "latency": {
180
  "unit": "s",
181
  "count": 3,
182
+ "total": 0.151882755279541,
183
+ "mean": 0.050627585093180334,
184
+ "stdev": 0.0006961875612401831,
185
+ "p50": 0.05051289749145508,
186
+ "p90": 0.05132800064086914,
187
+ "p95": 0.051429888534545894,
188
+ "p99": 0.0515113988494873,
189
  "values": [
190
+ 0.05153177642822265,
191
+ 0.05051289749145508,
192
+ 0.049838081359863284
193
  ]
194
  },
195
  "throughput": {
196
  "unit": "samples/s",
197
+ "value": 118.5124668489909
198
  },
199
  "energy": null,
200
  "efficiency": null