AppleSwing commited on
Commit
900a631
1 Parent(s): f9311f7
backend-cli.py CHANGED
@@ -28,6 +28,8 @@ import time
28
  import pprint
29
  import logging
30
 
 
 
31
 
32
  # Configure the root logger
33
  logging.basicConfig(
@@ -42,6 +44,20 @@ eval_logger = logging.getLogger("lm-eval")
42
  # Explicitly set the level for 'lm-eval' logger to WARNING
43
  eval_logger.setLevel(logging.WARNING)
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  def my_set_eval_request(api, eval_request, set_to_status, hf_repo, local_dir):
47
  for i in range(10):
@@ -137,6 +153,12 @@ def process_evaluation(task: Task, eval_request: EvalRequest, limit: Optional[in
137
  stop_event = threading.Event()
138
  monitor_thread = threading.Thread(target=monitor_gpus, args=(stop_event, 5, gpu_stats_list))
139
  monitor_thread.start()
 
 
 
 
 
 
140
 
141
  try:
142
  results = run_evaluation(
@@ -443,7 +465,7 @@ def get_args():
443
  parser = argparse.ArgumentParser(description="Run the backend")
444
  parser.add_argument("--debug", action="store_true", help="Run in debug mode")
445
  # debug parameters
446
- parser.add_argument("--task", type=str, default="selfcheckgpt,mmlu", help="Task to debug")
447
  parser.add_argument("--model", type=str, default="mistralai/Mixtral-8x7B-Instruct-v0.1,mistralai/Mixtral-8x7B-v0.1", help="Model to debug")
448
  parser.add_argument("--precision", type=str, default="float32,float16,8bit,4bit", help="Precision to debug")
449
  parser.add_argument("--inference-framework", type=str, default="hf-chat", help="Inference framework to debug")
@@ -471,23 +493,23 @@ if __name__ == "__main__":
471
  task_name = task.benchmark
472
  if task_name not in debug_task_name:
473
  continue
474
- try:
475
- eval_request = EvalRequest(
476
- model=debug_model_name,
477
- private=False,
478
- status="",
479
- json_filepath="",
480
- precision=precision, # Use precision from arguments
481
- inference_framework=args.inference_framework, # Use inference framework from arguments
482
- gpu_type=args.gpu_type
483
- )
484
- curr_gpu_type = get_gpu_details()
485
- if eval_request.gpu_type != curr_gpu_type:
486
- print(f"GPU type mismatch: {eval_request.gpu_type} vs {curr_gpu_type}")
487
- raise Exception("GPU type mismatch")
488
- results = process_evaluation(task, eval_request, limit=args.limit)
489
- except Exception as e:
490
- print(f"debug running error: {e}")
491
  else:
492
  while True:
493
  res = False
 
28
  import pprint
29
  import logging
30
 
31
+ from lm_eval.filters.extraction import RegexFilter
32
+
33
 
34
  # Configure the root logger
35
  logging.basicConfig(
 
44
  # Explicitly set the level for 'lm-eval' logger to WARNING
45
  eval_logger.setLevel(logging.WARNING)
46
 
47
+ def tuple_input_decorator(func):
48
+ def wrapper(self, resps, docs):
49
+ stripped_resps = [[resp_data[0] for resp_data in group] for group in resps]
50
+
51
+ filtered_resps = func(self, stripped_resps, docs)
52
+
53
+ combined_resps = []
54
+ for original_group, new_group in zip(resps, filtered_resps):
55
+ combined_group = [(new_resp,) + rest_of_data[1:] for new_resp, rest_of_data in zip(new_group, original_group)]
56
+ combined_resps.append(combined_group)
57
+
58
+ return combined_resps
59
+ return wrapper
60
+
61
 
62
  def my_set_eval_request(api, eval_request, set_to_status, hf_repo, local_dir):
63
  for i in range(10):
 
153
  stop_event = threading.Event()
154
  monitor_thread = threading.Thread(target=monitor_gpus, args=(stop_event, 5, gpu_stats_list))
155
  monitor_thread.start()
156
+
157
+ original_apply = RegexFilter.apply
158
+ if task.benchmark == "gsm8k":
159
+ RegexFilter.apply = tuple_input_decorator(RegexFilter.apply)
160
+ else:
161
+ RegexFilter.apply = original_apply
162
 
163
  try:
164
  results = run_evaluation(
 
465
  parser = argparse.ArgumentParser(description="Run the backend")
466
  parser.add_argument("--debug", action="store_true", help="Run in debug mode")
467
  # debug parameters
468
+ parser.add_argument("--task", type=str, default="selfcheckgpt,mmlu, gsm8k", help="Task to debug")
469
  parser.add_argument("--model", type=str, default="mistralai/Mixtral-8x7B-Instruct-v0.1,mistralai/Mixtral-8x7B-v0.1", help="Model to debug")
470
  parser.add_argument("--precision", type=str, default="float32,float16,8bit,4bit", help="Precision to debug")
471
  parser.add_argument("--inference-framework", type=str, default="hf-chat", help="Inference framework to debug")
 
493
  task_name = task.benchmark
494
  if task_name not in debug_task_name:
495
  continue
496
+ # try:
497
+ eval_request = EvalRequest(
498
+ model=debug_model_name,
499
+ private=False,
500
+ status="",
501
+ json_filepath="",
502
+ precision=precision, # Use precision from arguments
503
+ inference_framework=args.inference_framework, # Use inference framework from arguments
504
+ gpu_type=args.gpu_type
505
+ )
506
+ curr_gpu_type = get_gpu_details()
507
+ if eval_request.gpu_type != curr_gpu_type:
508
+ print(f"GPU type mismatch: {eval_request.gpu_type} vs {curr_gpu_type}")
509
+ raise Exception("GPU type mismatch")
510
+ results = process_evaluation(task, eval_request, limit=args.limit)
511
+ # except Exception as e:
512
+ # print(f"debug running error: {e}")
513
  else:
514
  while True:
515
  res = False
src/backend/envs.py CHANGED
@@ -57,6 +57,7 @@ class Tasks(Enum):
57
 
58
  # task20 = Task("race", "acc", "RACE", 0)
59
  task21 = Task("mmlu", "acc", "MMLU", 5)
 
60
 
61
 
62
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
 
57
 
58
  # task20 = Task("race", "acc", "RACE", 0)
59
  task21 = Task("mmlu", "acc", "MMLU", 5)
60
+ task22 = Task("gsm8k", "exact_match", "GSM8K", 5)
61
 
62
 
63
  EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
src/backend/tasks/gsm8k/README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GSM8k
2
+
3
+ ## Paper
4
+ Training Verifiers to Solve Math Word Problems
5
+ https://arxiv.org/abs/2110.14168
6
+
7
+ State-of-the-art language models can match human performance on many tasks, but
8
+ they still struggle to robustly perform multi-step mathematical reasoning. To
9
+ diagnose the failures of current models and support research, we introduce GSM8K,
10
+ a dataset of 8.5K high quality linguistically diverse grade school math word problems.
11
+ We find that even the largest transformer models fail to achieve high test performance,
12
+ despite the conceptual simplicity of this problem distribution.
13
+
14
+ NOTE: See the official implementation of the task:
15
+ https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py
16
+ for how to make use of the dataset's calculator annotations in your language
17
+ model's sample/generation function.
18
+
19
+ Homepage: https://github.com/openai/grade-school-math
20
+
21
+
22
+ ## Citation
23
+ ```
24
+ @misc{cobbe2021training,
25
+ title={Training Verifiers to Solve Math Word Problems},
26
+ author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman},
27
+ year={2021},
28
+ eprint={2110.14168},
29
+ archivePrefix={arXiv},
30
+ primaryClass={cs.LG}
31
+ }
32
+ ```
33
+
34
+ ### Groups and Tasks
35
+
36
+ #### Groups
37
+
38
+ - `math_word_problems`
39
+ - `chain_of_thought`
40
+ - `self_consistency`
41
+
42
+ #### Tasks
43
+
44
+ - `gsm8k_yaml`
45
+ - `gsm8k_cot`: GSM8K with Chain-of-Thought
46
+ - `gsm8k_cot_self_consistency`: GSM8K with Chain-of-Thought and Self-Consistency
47
+
48
+ ### Checklist
49
+
50
+ - [x] Is in Eval-harness v1.0 ?
51
+ - [ ] Has been checked for regression from v1.0?
52
+ - [ ] Has been checked for equivalence with original paper methodology?
53
+ - [ ] "Main" checked variant clearly denoted?
54
+
55
+ ### Variant Wishlist
56
+
57
+ - [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation)
58
+ - [ ] Using Verifiers
59
+ - [ ] Majority voting "without CoT"
src/backend/tasks/gsm8k/gsm8k-cot-self-consistency.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ include: gsm8k-cot.yaml
2
+ group:
3
+ - chain_of_thought
4
+ - self_consistency
5
+ task: gsm8k_cot_self_consistency
6
+ generation_kwargs:
7
+ until:
8
+ - "Q:"
9
+ - "\n\n"
10
+ do_sample: true
11
+ temperature: 0.2
12
+ repeats: 64
13
+ filter_list:
14
+ - name: "score-first" # pick only the first response, and report metrics on that
15
+ filter:
16
+ - function: "regex"
17
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
18
+ - function: "take_first"
19
+ - name: "maj@64"
20
+ filter:
21
+ - function: "regex"
22
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
23
+ - function: "majority_vote"
24
+ - function: "take_first"
25
+ - name: "maj@8" # get Maj@8 , via selecting the first 8 responses. Using a better estimator would be optimal.
26
+ filter:
27
+ - function: "take_first_k"
28
+ k: 8
29
+ - function: "regex"
30
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
31
+ - function: "majority_vote"
32
+ - function: "take_first"
33
+ metadata:
34
+ version: 2.0
src/backend/tasks/gsm8k/gsm8k-cot-zeroshot.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - math_word_problems
3
+ task: gsm8k_cot_zeroshot
4
+ dataset_path: gsm8k
5
+ dataset_name: main
6
+ output_type: generate_until
7
+ training_split: train
8
+ fewshot_split: train
9
+ test_split: test
10
+ doc_to_text: "Q: {{question}}\nA: Let's think step by step."
11
+ doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}"
12
+ metric_list:
13
+ - metric: exact_match
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ ignore_case: true
17
+ ignore_punctuation: false
18
+ regexes_to_ignore:
19
+ - ","
20
+ - "\\$"
21
+ - "(?s).*#### "
22
+ - "\\.$"
23
+ generation_kwargs:
24
+ until:
25
+ - "Q:"
26
+ - "</s>"
27
+ - "<|im_end|>"
28
+ do_sample: false
29
+ repeats: 1
30
+ num_fewshot: 0
31
+ filter_list:
32
+ - name: "strict-match"
33
+ filter:
34
+ - function: "regex"
35
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)."
36
+ - function: "take_first"
37
+ - name: "flexible-extract"
38
+ filter:
39
+ - function: "regex"
40
+ group_select: -1
41
+ regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
42
+ - function: "take_first"
43
+ metadata:
44
+ version: 3.0
src/backend/tasks/gsm8k/gsm8k-cot.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - chain_of_thought
3
+ task: gsm8k_cot
4
+ dataset_path: gsm8k
5
+ dataset_name: main
6
+ output_type: generate_until
7
+ test_split: test
8
+ doc_to_text: "Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\nA: There are 15 trees originally. Then there were 21 trees after some more were planted. So there must have been 21 - 15 = 6. The answer is 6.\n\n\
9
+ Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\nA: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The answer is 5.\n\n\
10
+ Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\nA: Originally, Leah had 32 chocolates. Her sister had 42. So in total they had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The answer is 39.\n\n\
11
+ Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\nA: Jason started with 20 lollipops. Then he had 12 after giving some to Denny. So he gave Denny 20 - 12 = 8. The answer is 8.\n\n\
12
+ Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\nA: Shawn started with 5 toys. If he got 2 toys each from his mom and dad, then that is 4 more toys. 5 + 4 = 9. The answer is 9.\n\n\
13
+ Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\nA: There were originally 9 computers. For each of 4 days, 5 more computers were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The answer is 29.\n\n\
14
+ Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\nA: Michael started with 58 golf balls. After losing 23 on tuesday, he had 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The answer is 33.\n\n\
15
+ Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\nA: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15 dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The answer is 8.\n\n\
16
+ Q: {{question}}\nA:"
17
+ doc_to_target: "{{answer.split('####')[-1].strip()}}"
18
+ metric_list:
19
+ - metric: exact_match
20
+ aggregation: mean
21
+ higher_is_better: true
22
+ ignore_case: true
23
+ ignore_punctuation: false
24
+ regexes_to_ignore:
25
+ - ","
26
+ - "\\$"
27
+ - "(?s).*#### "
28
+ - "\\.$"
29
+ generation_kwargs:
30
+ until:
31
+ - "Q:"
32
+ - "</s>"
33
+ - "<|im_end|>"
34
+ do_sample: false
35
+ repeats: 1
36
+ num_fewshot: 0
37
+ filter_list:
38
+ - name: "strict-match"
39
+ filter:
40
+ - function: "regex"
41
+ regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)."
42
+ - function: "take_first"
43
+ - name: "flexible-extract"
44
+ filter:
45
+ - function: "regex"
46
+ group_select: -1
47
+ regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
48
+ - function: "take_first"
49
+ metadata:
50
+ version: 3.0
51
+ num_fewshot: 8
src/backend/tasks/gsm8k/gsm8k.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - math_word_problems
3
+ task: gsm8k
4
+ dataset_path: gsm8k
5
+ dataset_name: main
6
+ output_type: generate_until
7
+ training_split: train
8
+ fewshot_split: train
9
+ test_split: test
10
+ doc_to_text: "Question: {{question}}\nAnswer:"
11
+ doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}"
12
+ metric_list:
13
+ - metric: exact_match
14
+ aggregation: mean
15
+ higher_is_better: true
16
+ ignore_case: true
17
+ ignore_punctuation: false
18
+ regexes_to_ignore:
19
+ - ","
20
+ - "\\$"
21
+ - "(?s).*#### "
22
+ - "\\.$"
23
+ generation_kwargs:
24
+ until:
25
+ - "Question:"
26
+ - "</s>"
27
+ - "<|im_end|>"
28
+ do_sample: false
29
+ temperature: 0.0
30
+ repeats: 1
31
+ num_fewshot: 5
32
+ filter_list:
33
+ - name: "strict-match"
34
+ filter:
35
+ - function: "regex"
36
+ regex_pattern: "#### (\\-?[0-9\\.\\,]+)"
37
+ - function: "take_first"
38
+ - name: "flexible-extract"
39
+ filter:
40
+ - function: "regex"
41
+ group_select: -1
42
+ regex_pattern: "(-?[$0-9.,]{2,})|(-?[0-9]+)"
43
+ - function: "take_first"
44
+ metadata:
45
+ version: 3.0
src/utils.py CHANGED
@@ -52,7 +52,7 @@ def parse_nvidia_smi():
52
  print("Failed to query GPU indices.")
53
  return []
54
  gpu_indices = result.stdout.strip().split('\n')
55
- print(f"gpu_indices: {gpu_indices}")
56
  gpu_stats = []
57
 
58
  gpu_info_pattern = re.compile(r'(\d+)C\s+P\d+\s+(\d+)W / \d+W\s+\|\s+(\d+)MiB / \d+MiB\s+\|\s+(\d+)%')
@@ -80,7 +80,7 @@ def parse_nvidia_smi():
80
 
81
  if len(gpu_info) >= 4:
82
  gpu_stats.append(gpu_info)
83
- print(f"gpu_stats: {gpu_stats}")
84
  gpu_name = f"{len(gpu_stats)}x{gpu_name}"
85
  gpu_stats_total = {
86
  GPU_TEMP: 0,
 
52
  print("Failed to query GPU indices.")
53
  return []
54
  gpu_indices = result.stdout.strip().split('\n')
55
+ # print(f"gpu_indices: {gpu_indices}")
56
  gpu_stats = []
57
 
58
  gpu_info_pattern = re.compile(r'(\d+)C\s+P\d+\s+(\d+)W / \d+W\s+\|\s+(\d+)MiB / \d+MiB\s+\|\s+(\d+)%')
 
80
 
81
  if len(gpu_info) >= 4:
82
  gpu_stats.append(gpu_info)
83
+ # print(f"gpu_stats: {gpu_stats}")
84
  gpu_name = f"{len(gpu_stats)}x{gpu_name}"
85
  gpu_stats_total = {
86
  GPU_TEMP: 0,