lvkaokao
commited on
Commit
•
dca5dbd
1
Parent(s):
d033aa4
update.
Browse files
src/leaderboard/read_evals.py
CHANGED
@@ -59,10 +59,16 @@ class EvalResult:
|
|
59 |
quant_type = QuantType.from_str(config.get("quant_type", "GPTQ"))
|
60 |
weight_dtype = WeightDtype.from_str(data["task_info"].get("weight_dtype", "int4"))
|
61 |
compute_dtype = ComputeDtype.from_str(data["task_info"].get("compute_dtype", "bfloat16"))
|
62 |
-
double_quant = data["quantization_config"].get("bnb_4bit_use_double_quant", False)
|
63 |
model_params = config["model_params"]
|
64 |
model_size = config["model_size"]
|
65 |
-
group_size = data["quantization_config"].get("group_size", -1)
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
local = config.get("local", False)
|
68 |
if not local:
|
|
|
59 |
quant_type = QuantType.from_str(config.get("quant_type", "GPTQ"))
|
60 |
weight_dtype = WeightDtype.from_str(data["task_info"].get("weight_dtype", "int4"))
|
61 |
compute_dtype = ComputeDtype.from_str(data["task_info"].get("compute_dtype", "bfloat16"))
|
62 |
+
# double_quant = data["quantization_config"].get("bnb_4bit_use_double_quant", False)
|
63 |
model_params = config["model_params"]
|
64 |
model_size = config["model_size"]
|
65 |
+
# group_size = data["quantization_config"].get("group_size", -1)
|
66 |
+
if data.get("quantization_config", None):
|
67 |
+
double_quant = data["quantization_config"].get("bnb_4bit_use_double_quant", False)
|
68 |
+
group_size = data["quantization_config"].get("group_size", -1)
|
69 |
+
else:
|
70 |
+
double_quant = False
|
71 |
+
group_size = -1
|
72 |
|
73 |
local = config.get("local", False)
|
74 |
if not local:
|