IlyasMoutawwakil HF staff commited on
Commit
a604fe0
1 Parent(s): b537ff9

Upload cpu_inference_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json with huggingface_hub

Browse files
cpu_inference_transformers_multiple-choice_FacebookAI/roberta-base/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cpu_inference_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.3.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
@@ -73,23 +73,23 @@
73
  "environment": {
74
  "cpu": " AMD EPYC 7763 64-Core Processor",
75
  "cpu_count": 4,
76
- "cpu_ram_mb": 16757.354496,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
- "platform": "Linux-6.5.0-1021-azure-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.14",
82
  "optimum_benchmark_version": "0.2.1",
83
- "optimum_benchmark_commit": "347e13ca9f7f904f55669603cfb9f0b6c7e8672c",
84
- "transformers_version": "4.41.1",
85
  "transformers_commit": null,
86
- "accelerate_version": "0.30.1",
87
  "accelerate_commit": null,
88
- "diffusers_version": "0.27.2",
89
  "diffusers_commit": null,
90
  "optimum_version": null,
91
  "optimum_commit": null,
92
- "timm_version": "1.0.3",
93
  "timm_commit": null,
94
  "peft_version": null,
95
  "peft_commit": null
@@ -99,7 +99,7 @@
99
  "forward": {
100
  "memory": {
101
  "unit": "MB",
102
- "max_ram": 940.077056,
103
  "max_global_vram": null,
104
  "max_process_vram": null,
105
  "max_reserved": null,
@@ -107,52 +107,56 @@
107
  },
108
  "latency": {
109
  "unit": "s",
110
- "count": 21,
111
- "total": 1.0166593470000294,
112
- "mean": 0.04841234985714425,
113
- "stdev": 0.0030332976676242475,
114
- "p50": 0.04873118300002943,
115
- "p90": 0.05087690900001007,
116
- "p95": 0.05198777800001153,
117
- "p99": 0.05426029240002208,
118
  "values": [
119
- 0.05001850899998317,
120
- 0.04921639200000527,
121
- 0.047019502000011926,
122
- 0.04811772599998676,
123
- 0.048939699000015935,
124
- 0.05074964400000681,
125
- 0.04783162699999366,
126
- 0.04958036699997592,
127
- 0.05482842100002472,
128
- 0.05087690900001007,
129
- 0.04873118300002943,
130
- 0.05198777800001153,
131
- 0.04780527799999845,
132
- 0.05031911600002559,
133
- 0.048522746999992705,
134
- 0.04742359100004023,
135
- 0.049348117000022285,
136
- 0.04793970699995498,
137
- 0.04478786800001444,
138
- 0.04163343199996916,
139
- 0.040981733999956305
 
 
 
 
140
  ]
141
  },
142
  "throughput": {
143
  "unit": "samples/s",
144
- "value": 20.655886420527242
145
  },
146
  "energy": {
147
  "unit": "kWh",
148
- "cpu": 1.6200559933980308e-06,
149
- "ram": 6.770467025683048e-08,
150
  "gpu": 0.0,
151
- "total": 1.6877606636548612e-06
152
  },
153
  "efficiency": {
154
  "unit": "samples/kWh",
155
- "value": 592501.0705217473
156
  }
157
  }
158
  }
 
3
  "name": "cpu_inference_transformers_multiple-choice_FacebookAI/roberta-base",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.3.1+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "multiple-choice",
9
  "library": "transformers",
 
73
  "environment": {
74
  "cpu": " AMD EPYC 7763 64-Core Processor",
75
  "cpu_count": 4,
76
+ "cpu_ram_mb": 16757.338112,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
+ "platform": "Linux-6.5.0-1022-azure-x86_64-with-glibc2.35",
80
  "processor": "x86_64",
81
  "python_version": "3.10.14",
82
  "optimum_benchmark_version": "0.2.1",
83
+ "optimum_benchmark_commit": "d920fe9626db1e7915f6d3574b5b54b0159cd100",
84
+ "transformers_version": "4.42.3",
85
  "transformers_commit": null,
86
+ "accelerate_version": "0.31.0",
87
  "accelerate_commit": null,
88
+ "diffusers_version": "0.29.2",
89
  "diffusers_commit": null,
90
  "optimum_version": null,
91
  "optimum_commit": null,
92
+ "timm_version": "1.0.7",
93
  "timm_commit": null,
94
  "peft_version": null,
95
  "peft_commit": null
 
99
  "forward": {
100
  "memory": {
101
  "unit": "MB",
102
+ "max_ram": 942.379008,
103
  "max_global_vram": null,
104
  "max_process_vram": null,
105
  "max_reserved": null,
 
107
  },
108
  "latency": {
109
  "unit": "s",
110
+ "count": 25,
111
+ "total": 1.0359930630000918,
112
+ "mean": 0.04143972252000367,
113
+ "stdev": 0.0007665022351241194,
114
+ "p50": 0.04148488999999245,
115
+ "p90": 0.042040176200021054,
116
+ "p95": 0.042152350600008506,
117
+ "p99": 0.04375422708000542,
118
  "values": [
119
+ 0.04216107100000954,
120
+ 0.04161525299997493,
121
+ 0.04161107599998104,
122
+ 0.04146869900000638,
123
+ 0.04137253999999757,
124
+ 0.0418630149999899,
125
+ 0.04137262000000419,
126
+ 0.04148488999999245,
127
+ 0.04148896700002069,
128
+ 0.04149640100001761,
129
+ 0.041924237000046105,
130
+ 0.041202303000034135,
131
+ 0.04158726100001786,
132
+ 0.04110388999998804,
133
+ 0.041526998999984244,
134
+ 0.04211746900000435,
135
+ 0.04160354200001848,
136
+ 0.040779235000002245,
137
+ 0.04143717099998412,
138
+ 0.04425732900000412,
139
+ 0.04049497500000143,
140
+ 0.040192391000005045,
141
+ 0.04087252800002261,
142
+ 0.04044404000001123,
143
+ 0.04051516099997343
144
  ]
145
  },
146
  "throughput": {
147
  "unit": "samples/s",
148
+ "value": 24.131435714061134
149
  },
150
  "energy": {
151
  "unit": "kWh",
152
+ "cpu": 1.5882180002000598e-06,
153
+ "ram": 6.63744527190905e-08,
154
  "gpu": 0.0,
155
+ "total": 1.6545924529191503e-06
156
  },
157
  "efficiency": {
158
  "unit": "samples/kWh",
159
+ "value": 604378.436657153
160
  }
161
  }
162
  }