Upload folder using huggingface_hub
Browse files- README.md +7 -1
- results.json +60 -0
- results_arc.json +68 -0
- results_hellaswag.json +66 -0
- results_truthfulqa.json +60 -0
- results_winogrande.json +57 -0
README.md
CHANGED
@@ -52,4 +52,10 @@ print(decoded[0])
|
|
52 |
This instruction model is based on Mistral-7B-v0.1, a transformer model with the following architecture choices:
|
53 |
- Grouped-Query Attention
|
54 |
- Sliding-Window Attention
|
55 |
-
- Byte-fallback BPE tokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
This instruction model is based on Mistral-7B-v0.1, a transformer model with the following architecture choices:
|
53 |
- Grouped-Query Attention
|
54 |
- Sliding-Window Attention
|
55 |
+
- Byte-fallback BPE tokenizer
|
56 |
+
|
57 |
+
## Results
|
58 |
+
|
59 |
+
| model_name | Average | arc_challenge | hellaswag | truthfulqa_mc2 | winogrande |
|
60 |
+
|:-----------------|----------:|----------------:|------------:|-----------------:|-------------:|
|
61 |
+
| Zenith-7B-dpo-v3 | 0.707576 | 0.613481 | 0.848337 | 0.602897 | 0.765588 |
|
results.json
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"results": {
|
3 |
+
"truthfulqa_mc2": {
|
4 |
+
"acc,none": 0.602896952995968,
|
5 |
+
"acc_stderr,none": 0.0158343852936674,
|
6 |
+
"alias": "truthfulqa_mc2"
|
7 |
+
}
|
8 |
+
},
|
9 |
+
"configs": {
|
10 |
+
"truthfulqa_mc2": {
|
11 |
+
"task": "truthfulqa_mc2",
|
12 |
+
"group": [
|
13 |
+
"truthfulqa"
|
14 |
+
],
|
15 |
+
"dataset_path": "/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/eval/truthful_qa",
|
16 |
+
"dataset_name": "multiple_choice",
|
17 |
+
"validation_split": "validation",
|
18 |
+
"doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
|
19 |
+
"doc_to_target": 0,
|
20 |
+
"doc_to_choice": "{{mc2_targets.choices}}",
|
21 |
+
"process_results": "def process_results_mc2(doc, results):\n lls, is_greedy = zip(*results)\n\n # Split on the first `0` as everything before it is true (`1`).\n split_idx = list(doc[\"mc2_targets\"][\"labels\"]).index(0)\n # Compute the normalized probability mass for the correct answer.\n ll_true, ll_false = lls[:split_idx], lls[split_idx:]\n p_true, p_false = np.exp(np.array(ll_true)), np.exp(np.array(ll_false))\n p_true = p_true / (sum(p_true) + sum(p_false))\n\n return {\"acc\": sum(p_true)}\n",
|
22 |
+
"description": "",
|
23 |
+
"target_delimiter": " ",
|
24 |
+
"fewshot_delimiter": "\n\n",
|
25 |
+
"num_fewshot": 0,
|
26 |
+
"metric_list": [
|
27 |
+
{
|
28 |
+
"metric": "acc",
|
29 |
+
"aggregation": "mean",
|
30 |
+
"higher_is_better": true
|
31 |
+
}
|
32 |
+
],
|
33 |
+
"output_type": "multiple_choice",
|
34 |
+
"repeats": 1,
|
35 |
+
"should_decontaminate": true,
|
36 |
+
"doc_to_decontamination_query": "question",
|
37 |
+
"metadata": {
|
38 |
+
"version": 2.0
|
39 |
+
}
|
40 |
+
}
|
41 |
+
},
|
42 |
+
"versions": {
|
43 |
+
"truthfulqa_mc2": 2.0
|
44 |
+
},
|
45 |
+
"n-shot": {
|
46 |
+
"truthfulqa_mc2": 0
|
47 |
+
},
|
48 |
+
"config": {
|
49 |
+
"model": "vllm",
|
50 |
+
"model_args": "pretrained=/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/Zenith-7B-dpo-3,tensor_parallel_size=1,dtype=auto,gpu_memory_utilization=0.9,data_parallel_size=1,max_model_len=4096",
|
51 |
+
"batch_size": "auto:128",
|
52 |
+
"batch_sizes": [],
|
53 |
+
"device": "cuda",
|
54 |
+
"use_cache": "/lustre07/scratch/gagan30/arocr/cache/",
|
55 |
+
"limit": null,
|
56 |
+
"bootstrap_iters": 100000,
|
57 |
+
"gen_kwargs": null
|
58 |
+
},
|
59 |
+
"git_hash": "2d0a6460"
|
60 |
+
}
|
results_arc.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"results": {
|
3 |
+
"arc_challenge": {
|
4 |
+
"acc,none": 0.5614334470989761,
|
5 |
+
"acc_stderr,none": 0.014500682618212865,
|
6 |
+
"acc_norm,none": 0.613481228668942,
|
7 |
+
"acc_norm_stderr,none": 0.014230084761910473,
|
8 |
+
"alias": "arc_challenge"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"configs": {
|
12 |
+
"arc_challenge": {
|
13 |
+
"task": "arc_challenge",
|
14 |
+
"group": [
|
15 |
+
"ai2_arc"
|
16 |
+
],
|
17 |
+
"dataset_path": "/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/eval/ai2_arc",
|
18 |
+
"dataset_name": "ARC-Challenge",
|
19 |
+
"training_split": "train",
|
20 |
+
"validation_split": "validation",
|
21 |
+
"test_split": "test",
|
22 |
+
"doc_to_text": "Question: {{question}}\nAnswer:",
|
23 |
+
"doc_to_target": "{{choices.label.index(answerKey)}}",
|
24 |
+
"doc_to_choice": "{{choices.text}}",
|
25 |
+
"description": "",
|
26 |
+
"target_delimiter": " ",
|
27 |
+
"fewshot_delimiter": "\n\n",
|
28 |
+
"num_fewshot": 25,
|
29 |
+
"metric_list": [
|
30 |
+
{
|
31 |
+
"metric": "acc",
|
32 |
+
"aggregation": "mean",
|
33 |
+
"higher_is_better": true
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"metric": "acc_norm",
|
37 |
+
"aggregation": "mean",
|
38 |
+
"higher_is_better": true
|
39 |
+
}
|
40 |
+
],
|
41 |
+
"output_type": "multiple_choice",
|
42 |
+
"repeats": 1,
|
43 |
+
"should_decontaminate": true,
|
44 |
+
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
|
45 |
+
"metadata": {
|
46 |
+
"version": 1.0
|
47 |
+
}
|
48 |
+
}
|
49 |
+
},
|
50 |
+
"versions": {
|
51 |
+
"arc_challenge": 1.0
|
52 |
+
},
|
53 |
+
"n-shot": {
|
54 |
+
"arc_challenge": 25
|
55 |
+
},
|
56 |
+
"config": {
|
57 |
+
"model": "vllm",
|
58 |
+
"model_args": "pretrained=/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/Zenith-7B-dpo-3,tensor_parallel_size=1,dtype=auto,gpu_memory_utilization=0.9,data_parallel_size=1,max_model_len=4096",
|
59 |
+
"batch_size": "auto:128",
|
60 |
+
"batch_sizes": [],
|
61 |
+
"device": "cuda",
|
62 |
+
"use_cache": "/lustre07/scratch/gagan30/arocr/cache/",
|
63 |
+
"limit": null,
|
64 |
+
"bootstrap_iters": 100000,
|
65 |
+
"gen_kwargs": null
|
66 |
+
},
|
67 |
+
"git_hash": "2d0a6460"
|
68 |
+
}
|
results_hellaswag.json
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"results": {
|
3 |
+
"hellaswag": {
|
4 |
+
"acc,none": 0.6504680342561243,
|
5 |
+
"acc_stderr,none": 0.004758476684324035,
|
6 |
+
"acc_norm,none": 0.8483369846644094,
|
7 |
+
"acc_norm_stderr,none": 0.0035796087435066605,
|
8 |
+
"alias": "hellaswag"
|
9 |
+
}
|
10 |
+
},
|
11 |
+
"configs": {
|
12 |
+
"hellaswag": {
|
13 |
+
"task": "hellaswag",
|
14 |
+
"group": [
|
15 |
+
"multiple_choice"
|
16 |
+
],
|
17 |
+
"dataset_path": "/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/eval/hellaswag",
|
18 |
+
"training_split": "train",
|
19 |
+
"validation_split": "validation",
|
20 |
+
"process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
|
21 |
+
"doc_to_text": "{{query}}",
|
22 |
+
"doc_to_target": "{{label}}",
|
23 |
+
"doc_to_choice": "choices",
|
24 |
+
"description": "",
|
25 |
+
"target_delimiter": " ",
|
26 |
+
"fewshot_delimiter": "\n\n",
|
27 |
+
"num_fewshot": 10,
|
28 |
+
"metric_list": [
|
29 |
+
{
|
30 |
+
"metric": "acc",
|
31 |
+
"aggregation": "mean",
|
32 |
+
"higher_is_better": true
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"metric": "acc_norm",
|
36 |
+
"aggregation": "mean",
|
37 |
+
"higher_is_better": true
|
38 |
+
}
|
39 |
+
],
|
40 |
+
"output_type": "multiple_choice",
|
41 |
+
"repeats": 1,
|
42 |
+
"should_decontaminate": false,
|
43 |
+
"metadata": {
|
44 |
+
"version": 1.0
|
45 |
+
}
|
46 |
+
}
|
47 |
+
},
|
48 |
+
"versions": {
|
49 |
+
"hellaswag": 1.0
|
50 |
+
},
|
51 |
+
"n-shot": {
|
52 |
+
"hellaswag": 10
|
53 |
+
},
|
54 |
+
"config": {
|
55 |
+
"model": "vllm",
|
56 |
+
"model_args": "pretrained=/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/Zenith-7B-dpo-3,tensor_parallel_size=1,dtype=auto,gpu_memory_utilization=0.9,data_parallel_size=1,max_model_len=4096",
|
57 |
+
"batch_size": "auto:128",
|
58 |
+
"batch_sizes": [],
|
59 |
+
"device": "cuda",
|
60 |
+
"use_cache": "/lustre07/scratch/gagan30/arocr/cache/",
|
61 |
+
"limit": null,
|
62 |
+
"bootstrap_iters": 100000,
|
63 |
+
"gen_kwargs": null
|
64 |
+
},
|
65 |
+
"git_hash": "2d0a6460"
|
66 |
+
}
|
results_truthfulqa.json
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"results": {
|
3 |
+
"truthfulqa_mc2": {
|
4 |
+
"acc,none": 0.602896952995968,
|
5 |
+
"acc_stderr,none": 0.0158343852936674,
|
6 |
+
"alias": "truthfulqa_mc2"
|
7 |
+
}
|
8 |
+
},
|
9 |
+
"configs": {
|
10 |
+
"truthfulqa_mc2": {
|
11 |
+
"task": "truthfulqa_mc2",
|
12 |
+
"group": [
|
13 |
+
"truthfulqa"
|
14 |
+
],
|
15 |
+
"dataset_path": "/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/eval/truthful_qa",
|
16 |
+
"dataset_name": "multiple_choice",
|
17 |
+
"validation_split": "validation",
|
18 |
+
"doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
|
19 |
+
"doc_to_target": 0,
|
20 |
+
"doc_to_choice": "{{mc2_targets.choices}}",
|
21 |
+
"process_results": "def process_results_mc2(doc, results):\n lls, is_greedy = zip(*results)\n\n # Split on the first `0` as everything before it is true (`1`).\n split_idx = list(doc[\"mc2_targets\"][\"labels\"]).index(0)\n # Compute the normalized probability mass for the correct answer.\n ll_true, ll_false = lls[:split_idx], lls[split_idx:]\n p_true, p_false = np.exp(np.array(ll_true)), np.exp(np.array(ll_false))\n p_true = p_true / (sum(p_true) + sum(p_false))\n\n return {\"acc\": sum(p_true)}\n",
|
22 |
+
"description": "",
|
23 |
+
"target_delimiter": " ",
|
24 |
+
"fewshot_delimiter": "\n\n",
|
25 |
+
"num_fewshot": 0,
|
26 |
+
"metric_list": [
|
27 |
+
{
|
28 |
+
"metric": "acc",
|
29 |
+
"aggregation": "mean",
|
30 |
+
"higher_is_better": true
|
31 |
+
}
|
32 |
+
],
|
33 |
+
"output_type": "multiple_choice",
|
34 |
+
"repeats": 1,
|
35 |
+
"should_decontaminate": true,
|
36 |
+
"doc_to_decontamination_query": "question",
|
37 |
+
"metadata": {
|
38 |
+
"version": 2.0
|
39 |
+
}
|
40 |
+
}
|
41 |
+
},
|
42 |
+
"versions": {
|
43 |
+
"truthfulqa_mc2": 2.0
|
44 |
+
},
|
45 |
+
"n-shot": {
|
46 |
+
"truthfulqa_mc2": 0
|
47 |
+
},
|
48 |
+
"config": {
|
49 |
+
"model": "vllm",
|
50 |
+
"model_args": "pretrained=/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/Zenith-7B-dpo-3,tensor_parallel_size=1,dtype=auto,gpu_memory_utilization=0.9,data_parallel_size=1,max_model_len=4096",
|
51 |
+
"batch_size": "auto:128",
|
52 |
+
"batch_sizes": [],
|
53 |
+
"device": "cuda",
|
54 |
+
"use_cache": "/lustre07/scratch/gagan30/arocr/cache/",
|
55 |
+
"limit": null,
|
56 |
+
"bootstrap_iters": 100000,
|
57 |
+
"gen_kwargs": null
|
58 |
+
},
|
59 |
+
"git_hash": "2d0a6460"
|
60 |
+
}
|
results_winogrande.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"results": {
|
3 |
+
"winogrande": {
|
4 |
+
"acc,none": 0.7655880031570639,
|
5 |
+
"acc_stderr,none": 0.011906130106237992,
|
6 |
+
"alias": "winogrande"
|
7 |
+
}
|
8 |
+
},
|
9 |
+
"configs": {
|
10 |
+
"winogrande": {
|
11 |
+
"task": "winogrande",
|
12 |
+
"dataset_path": "/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/eval/winogrande",
|
13 |
+
"dataset_name": "winogrande_xl",
|
14 |
+
"training_split": "train",
|
15 |
+
"validation_split": "validation",
|
16 |
+
"doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n",
|
17 |
+
"doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n",
|
18 |
+
"doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n",
|
19 |
+
"description": "",
|
20 |
+
"target_delimiter": " ",
|
21 |
+
"fewshot_delimiter": "\n\n",
|
22 |
+
"num_fewshot": 5,
|
23 |
+
"metric_list": [
|
24 |
+
{
|
25 |
+
"metric": "acc",
|
26 |
+
"aggregation": "mean",
|
27 |
+
"higher_is_better": true
|
28 |
+
}
|
29 |
+
],
|
30 |
+
"output_type": "multiple_choice",
|
31 |
+
"repeats": 1,
|
32 |
+
"should_decontaminate": true,
|
33 |
+
"doc_to_decontamination_query": "sentence",
|
34 |
+
"metadata": {
|
35 |
+
"version": 1.0
|
36 |
+
}
|
37 |
+
}
|
38 |
+
},
|
39 |
+
"versions": {
|
40 |
+
"winogrande": 1.0
|
41 |
+
},
|
42 |
+
"n-shot": {
|
43 |
+
"winogrande": 5
|
44 |
+
},
|
45 |
+
"config": {
|
46 |
+
"model": "vllm",
|
47 |
+
"model_args": "pretrained=/lustre07/scratch/gagan30/arocr/meta-llama/self_rewarding_models/Zenith-7B-dpo-3,tensor_parallel_size=1,dtype=auto,gpu_memory_utilization=0.9,data_parallel_size=1,max_model_len=4096",
|
48 |
+
"batch_size": "auto:128",
|
49 |
+
"batch_sizes": [],
|
50 |
+
"device": "cuda",
|
51 |
+
"use_cache": "/lustre07/scratch/gagan30/arocr/cache/",
|
52 |
+
"limit": null,
|
53 |
+
"bootstrap_iters": 100000,
|
54 |
+
"gen_kwargs": null
|
55 |
+
},
|
56 |
+
"git_hash": "2d0a6460"
|
57 |
+
}
|