|
import json |
|
import re |
|
from pathlib import Path |
|
import argparse |
|
import numpy as np |
|
import tqdm |
|
from datasets import load_from_disk, load_dataset |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from transformers.generation import GenerationConfig |
|
|
|
import os |
|
import pandas as pd |
|
import numpy as np |
|
import argparse |
|
import datasets |
|
import torch |
|
import re |
|
from thefuzz import process |
|
from typing import List |
|
from tqdm import tqdm |
|
from transformers.trainer_utils import set_seed |
|
|
|
from typing import Tuple, List, Union, Iterable |
|
|
|
import numpy as np |
|
import torch |
|
import torch.nn.functional as F |
|
from transformers import PreTrainedTokenizer |
|
from transformers import logging |
|
from transformers.generation import LogitsProcessor |
|
from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List |
|
HistoryType = List[Tuple[str, str]] |
|
TokensType = List[int] |
|
BatchTokensType = List[List[int]] |
|
|
|
def make_context( |
|
tokenizer: PreTrainedTokenizer, |
|
query: str, |
|
history: List[Tuple[str, str]] = None, |
|
system: str = "", |
|
max_window_size: int = 6144, |
|
chat_format: str = "chatml", |
|
): |
|
if history is None: |
|
history = [] |
|
|
|
im_start, im_end = "<|im_start|>", "<|im_end|>" |
|
im_start_tokens = [tokenizer.im_start_id] |
|
im_end_tokens = [tokenizer.im_end_id] |
|
nl_tokens = tokenizer.encode("\n") |
|
|
|
def _tokenize_str(role, content): |
|
return f"{role}\n{content}", tokenizer.encode( |
|
role |
|
) + nl_tokens + tokenizer.encode(content) |
|
|
|
system_text, system_tokens_part = _tokenize_str("system", system) |
|
system_tokens = im_start_tokens + system_tokens_part + im_end_tokens |
|
|
|
raw_text = "" |
|
context_tokens = [] |
|
|
|
for turn_query, turn_response in reversed(history): |
|
query_text, query_tokens_part = _tokenize_str("user", turn_query) |
|
query_tokens = im_start_tokens + query_tokens_part + im_end_tokens |
|
response_text, response_tokens_part = _tokenize_str( |
|
"assistant", turn_response |
|
) |
|
response_tokens = im_start_tokens + response_tokens_part + im_end_tokens |
|
|
|
next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens |
|
prev_chat = ( |
|
f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}" |
|
) |
|
|
|
current_context_size = ( |
|
len(system_tokens) + len(next_context_tokens) + len(context_tokens) |
|
) |
|
if current_context_size < max_window_size: |
|
context_tokens = next_context_tokens + context_tokens |
|
raw_text = prev_chat + raw_text |
|
else: |
|
break |
|
|
|
context_tokens = system_tokens + context_tokens |
|
raw_text = f"{im_start}{system_text}{im_end}" + raw_text |
|
context_tokens += ( |
|
nl_tokens |
|
+ im_start_tokens |
|
+ _tokenize_str("user", query)[1] |
|
+ im_end_tokens |
|
+ nl_tokens |
|
+ im_start_tokens |
|
+ tokenizer.encode("assistant") |
|
+ nl_tokens |
|
) |
|
raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n" |
|
|
|
return raw_text, context_tokens |
|
|
|
def chat( |
|
model, |
|
tokenizer: PreTrainedTokenizer, |
|
query: str, |
|
history: Optional[HistoryType], |
|
system: str = "You are a helpful assistant.", |
|
append_history: bool = True |
|
) -> Tuple[str, HistoryType]: |
|
|
|
|
|
if history is None: |
|
history = [] |
|
|
|
raw_text, context_tokens = make_context( |
|
tokenizer, |
|
query, |
|
history=history, |
|
system=system, |
|
max_window_size=6144, |
|
chat_format = "chatml", |
|
) |
|
|
|
stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]] |
|
input_ids = torch.tensor([context_tokens]).cuda() |
|
outputs = model.generate( |
|
input_ids, |
|
|
|
return_dict_in_generate = False, |
|
) |
|
|
|
response = decode_tokens( |
|
outputs[0], |
|
tokenizer, |
|
raw_text_len=len(raw_text), |
|
context_length=len(context_tokens), |
|
chat_format='chatml', |
|
verbose=False, |
|
) |
|
|
|
if append_history: |
|
history.append((query, response)) |
|
|
|
return response, history |
|
|
|
def decode_tokens( |
|
tokens: Union[torch.LongTensor, TokensType], |
|
tokenizer: PreTrainedTokenizer, |
|
raw_text_len: int, |
|
context_length: int, |
|
chat_format: str = "chatml", |
|
verbose: bool = False, |
|
return_end_reason: bool = False, |
|
) -> str: |
|
if torch.is_tensor(tokens): |
|
tokens = tokens.cpu().numpy().tolist() |
|
|
|
|
|
return _decode_chatml( |
|
tokens, |
|
stop_words=[], |
|
eod_token_ids=[tokenizer.im_start_id, tokenizer.im_end_id], |
|
tokenizer=tokenizer, |
|
raw_text_len=raw_text_len, |
|
context_length=context_length, |
|
verbose=verbose, |
|
return_end_reason=return_end_reason, |
|
) |
|
|
|
|
|
def _decode_chatml( |
|
tokens: List[int], |
|
*, |
|
stop_words: List[str], |
|
eod_token_ids: List[int], |
|
tokenizer: PreTrainedTokenizer, |
|
raw_text_len: int, |
|
context_length: int, |
|
verbose: bool = False, |
|
return_end_reason: bool = False, |
|
chat_format = "chatml", |
|
): |
|
end_reason = f"Gen length {len(tokens)}" |
|
eod_token_idx = context_length |
|
for eod_token_idx in range(context_length, len(tokens)): |
|
if tokens[eod_token_idx] in eod_token_ids: |
|
end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}" |
|
break |
|
|
|
trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx])[raw_text_len:] |
|
if verbose: |
|
print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens)[raw_text_len:]) |
|
print("\nRaw Generate:", trim_decode_tokens) |
|
print("\nEnd Reason:", end_reason) |
|
for stop_word in stop_words: |
|
trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip() |
|
trim_decode_tokens = trim_decode_tokens.strip() |
|
if verbose: |
|
print("\nGenerate:", trim_decode_tokens) |
|
|
|
if return_end_reason: |
|
return trim_decode_tokens, end_reason |
|
else: |
|
return trim_decode_tokens |
|
|
|
|
|
|
|
def load_models_tokenizer(args): |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from transformers.generation import GenerationConfig |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint_path, trust_remote_code=True) |
|
model = AutoModelForCausalLM.from_pretrained(args.checkpoint_path, device_map="auto", trust_remote_code=True).eval() |
|
model.generation_config = GenerationConfig.from_pretrained(args.checkpoint_path, trust_remote_code=True) |
|
model.generation_config.do_sample = False |
|
return model, tokenizer |
|
|
|
''' |
|
python eval/evaluate_chat_gsm8k.py [--use-fewshot] |
|
''' |
|
|
|
INVALID_ANS = "[invalid]" |
|
DEVICE = "cuda:0" |
|
|
|
def doc_to_text(doc, use_fewshot): |
|
if use_fewshot: |
|
context = ( |
|
"Question: Angelo and Melanie want to plan how many hours over the next week they should study together for their test next week. They have 2 chapters of their textbook to study and 4 worksheets to memorize. They figure out that they should dedicate 3 hours to each chapter of their textbook and 1.5 hours for each worksheet. If they plan to study no more than 4 hours each day, how many days should they plan to study total over the next week if they take a 10-minute break every hour, include 3 10-minute snack breaks each day, and 30 minutes for lunch each day?\nLet's think step by step\n" |
|
"Angelo and Melanie think they should dedicate 3 hours to each of the 2 chapters, 3 hours x 2 chapters = 6 hours total.\nFor the worksheets they plan to dedicate 1.5 hours for each worksheet, 1.5 hours x 4 worksheets = 6 hours total.\nAngelo and Melanie need to start with planning 12 hours to study, at 4 hours a day, 12 / 4 = 3 days.\nHowever, they need to include time for breaks and lunch. Every hour they want to include a 10-minute break, so 12 total hours x 10 minutes = 120 extra minutes for breaks.\nThey also want to include 3 10-minute snack breaks, 3 x 10 minutes = 30 minutes.\nAnd they want to include 30 minutes for lunch each day, so 120 minutes for breaks + 30 minutes for snack breaks + 30 minutes for lunch = 180 minutes, or 180 / 60 minutes per hour = 3 extra hours.\nSo Angelo and Melanie want to plan 12 hours to study + 3 hours of breaks = 15 hours total.\nThey want to study no more than 4 hours each day, 15 hours / 4 hours each day = 3.75\nThey will need to plan to study 4 days to allow for all the time they need.\nThe answer is 4\n\n" |
|
"Question: Mark's basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What's the total number of points scored by both teams added together?\nLet's think step by step\n" |
|
"Mark's team scores 25 2 pointers, meaning they scored 25*2= 50 points in 2 pointers.\nHis team also scores 6 3 pointers, meaning they scored 8*3= 24 points in 3 pointers\nThey scored 10 free throws, and free throws count as one point so they scored 10*1=10 points in free throws.\nAll together his team scored 50+24+10= 84 points\nMark's opponents scored double his team's number of 2 pointers, meaning they scored 50*2=100 points in 2 pointers.\nHis opponents scored half his team's number of 3 pointers, meaning they scored 24/2= 12 points in 3 pointers.\nThey also scored half Mark's team's points in free throws, meaning they scored 10/2=5 points in free throws.\nAll together Mark's opponents scored 100+12+5=117 points\nThe total score for the game is both team's scores added together, so it is 84+117=201 points\nThe answer is 201\n\n" |
|
"Question: Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?\nLet's think step by step\n" |
|
"When Bella buys 2/5 times more marbles, she'll have increased the number of marbles by 2/5*60 = 24\nThe total number of marbles she'll have is 60+24 = 84\nIf Bella currently has 60 marbles, and she has two times as many marbles as frisbees, she has 60/2 = 30 frisbees.\nIf Bella buys 2/5 times more frisbees, she'll have 2/5*30 = 12 more frisbees.\nThe total number of frisbees she'll have will increase to 30+12 = 42\nBella also has 20 more frisbees than deck cards, meaning she has 30-20 = 10 deck cards\nIf she buys 2/5 times more deck cards, she'll have 2/5*10 = 4 more deck cards.\nThe total number of deck cards she'll have is 10+4 = 14\nTogether, Bella will have a total of 14+42+84 = 140 items\nThe answer is 140\n\n" |
|
"Question: A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?\nLet's think step by step\n" |
|
"For the first three baskets, the number of apples and oranges in one basket is 9+15=24\nIn total, together with bananas, the number of fruits in one basket is 24+14=38 for the first three baskets.\nSince there are three baskets each having 38 fruits, there are 3*38=114 fruits in the first three baskets.\nThe number of apples in the fourth basket is 9-2=7\nThere are also 15-2=13 oranges in the fourth basket\nThe combined number of oranges and apples in the fourth basket is 13+7=20\nThe fourth basket also contains 14-2=12 bananas.\nIn total, the fourth basket has 20+12=32 fruits.\nThe four baskets together have 32+114=146 fruits.\nThe answer is 146\n\n" |
|
f"Question: {doc['question']}\nLet's think step by step" |
|
) |
|
else: |
|
context = doc["question"] |
|
return context |
|
|
|
|
|
def decode(tokens_list, tokenizer, raw_text_len): |
|
sents = [] |
|
for tokens in tokens_list: |
|
tokens = tokens.cpu().numpy().tolist() |
|
sent = tokenizer.tokenizer.decode(tokens[raw_text_len:]) |
|
sent = sent.split("<|endoftext|>")[0] |
|
sent = sent.split("\n\n\n")[0] |
|
sent = sent.split("\n\n")[0] |
|
sent = sent.split("Question:")[0] |
|
sents.append(sent) |
|
return sents |
|
|
|
|
|
def generate_sample(model, tokenizer, question): |
|
response, _ = chat( |
|
model, |
|
tokenizer, |
|
question, |
|
history=None, |
|
) |
|
print(question) |
|
print("-------------") |
|
print(response) |
|
print("=============") |
|
return response |
|
|
|
|
|
def extract_answer_hf(completion): |
|
def _get_last_digit(s): |
|
_PAT_LAST_DIGIT = re.compile( |
|
r"(?<=(\s|[\$%#{]))([+-])?(?=(\S))(0|([1-9](\d*|\d{0,2}(,\d{3})*)))?(\.\d*[1-9])?(?=(\s|[.,}]|$))" |
|
) |
|
match = list(_PAT_LAST_DIGIT.finditer(s)) |
|
if match: |
|
last_digit = match[-1].group().replace(",", "").replace("+", "") |
|
|
|
else: |
|
last_digit = None |
|
print(f"No digits found in {s!r}") |
|
return last_digit |
|
|
|
job_gen = completion.strip(".").replace("\n", "\\n") |
|
last_digit = _get_last_digit(job_gen) |
|
if last_digit is not None: |
|
return eval(last_digit) |
|
return INVALID_ANS |
|
|
|
|
|
def extract_answer(completion): |
|
try: |
|
last_number = re.findall(r"\d+", completion)[-1] |
|
return eval(last_number) |
|
except: |
|
return INVALID_ANS |
|
|
|
|
|
def is_correct(completion, answer): |
|
gold = extract_answer(answer) |
|
assert gold != INVALID_ANS, "No ground truth answer found in the document." |
|
return extract_answer(completion) == gold |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser(description="Test HF checkpoint.") |
|
parser.add_argument( |
|
"-c", |
|
"--checkpoint-path", |
|
type=Path, |
|
help="Checkpoint path", |
|
default="Qwen/Qwen-7B-Chat", |
|
) |
|
parser.add_argument("-f", "--sample-input-file", type=str, default=None) |
|
parser.add_argument( |
|
"-o", "--sample-output-file", type=str, default="gsm8k_res.jsonl" |
|
) |
|
parser.add_argument("--use-fewshot", action="store_true") |
|
|
|
args = parser.parse_args() |
|
|
|
if args.sample_input_file is not None: |
|
dataset = load_from_disk(args.sample_input_file) |
|
else: |
|
dataset = load_dataset("gsm8k", "main") |
|
|
|
print("Loading tokenizer ...") |
|
tokenizer = AutoTokenizer.from_pretrained( |
|
args.checkpoint_path, trust_remote_code=True, bf16=True, use_flash_attn=True |
|
) |
|
|
|
print("Loading model ...") |
|
model = AutoModelForCausalLM.from_pretrained( |
|
args.checkpoint_path, device_map="auto", trust_remote_code=True |
|
).eval() |
|
model.generation_config = GenerationConfig.from_pretrained( |
|
args.checkpoint_path, trust_remote_code=True |
|
) |
|
model.generation_config.do_sample = False |
|
|
|
test = dataset["test"] |
|
|
|
f_output = open(args.sample_output_file, "w", encoding="utf-8") |
|
tot_length = test.num_rows |
|
acc_res = [] |
|
for doc in tqdm(test): |
|
context = doc_to_text(doc, args.use_fewshot) |
|
print(context) |
|
completion = generate_sample(model, tokenizer, context) |
|
answer = doc["answer"] |
|
acc = is_correct(completion, answer) |
|
doc["completion"] = completion |
|
doc["acc"] = acc |
|
f_output.write(json.dumps(doc, ensure_ascii=False) + "\n") |
|
f_output.flush() |
|
acc_res.append(acc) |
|
|
|
f_output.close() |
|
print("4-shot Acc: " if args.use_fewshot else "Zero-shot Acc", np.mean(acc_res)) |
|
|