File size: 3,218 Bytes
f2ad62a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90


from transformers import AutoTokenizer
from vllm import LLM, SamplingParams
from arguments import get_args
from dataset import load_data, get_inputs
import torch
import os

def get_prompt_list(args):

    ## get tokenizer
    tokenizer = AutoTokenizer.from_pretrained(args.model_id)

    ## get input data
    if args.eval_dataset == "doc2dial":
        input_datapath = os.path.join(args.data_folder, args.doc2dial_path)
    elif args.eval_dataset == "convfinqa":
        input_datapath = os.path.join(args.data_folder, args.convfinqa_path)
    elif args.eval_dataset == "quac":
        input_datapath = os.path.join(args.data_folder, args.quac_path)
    elif args.eval_dataset == "qrecc":
        input_datapath = os.path.join(args.data_folder, args.qrecc_path)
    elif args.eval_dataset == "doqa_cooking":
        input_datapath = os.path.join(args.data_folder, args.doqa_cooking_path)
    elif args.eval_dataset == "doqa_travel":
        input_datapath = os.path.join(args.data_folder, args.doqa_travel_path)
    elif args.eval_dataset == "doqa_movies":
        input_datapath = os.path.join(args.data_folder, args.doqa_movies_path)
    elif args.eval_dataset == "coqa":
        input_datapath = os.path.join(args.data_folder, args.coqa_path)
    elif args.eval_dataset == "sqa":
        input_datapath = os.path.join(args.data_folder, args.sqa_path)
    elif args.eval_dataset == "topiocqa":
        input_datapath = os.path.join(args.data_folder, args.topiocqa_path)
    elif args.eval_dataset == "inscit":
        input_datapath = os.path.join(args.data_folder, args.inscit_path)
    elif args.eval_dataset == "hybridial":
        input_datapath = os.path.join(args.data_folder, args.hybridial_path)

    else:
        raise Exception("please input a correct eval_dataset name!")
    
    data_list = load_data(input_datapath)
    print("number of samples in the dataset:", len(data_list))
    prompt_list = get_inputs(data_list, args.eval_dataset, tokenizer, num_ctx=args.num_ctx, max_output_len=args.out_seq_len)

    return prompt_list


def main():
    args = get_args()
    
    ## bos token for llama-3
    bos_token = "<|begin_of_text|>"

    ## get model_path
    model_path = os.path.join(args.model_folder, args.model_name)
    
    ## get prompt_list
    prompt_list = get_prompt_list(args)
    
    ## get output_datapath
    output_datapath = os.path.join(args.output_folder, "%s_output.txt" % args.eval_dataset)

    ## run inference
    sampling_params = SamplingParams(temperature=0, top_k=1, max_tokens=args.max_tokens)

    ## This changes the GPU support to 8
    model_vllm = LLM(model_path, tensor_parallel_size=8)

    output_list = []
    for prompt in prompt_list:
        prompt = bos_token + prompt
        output = model_vllm.generate([prompt], sampling_params)[0]
        generated_text = output.outputs[0].text
        generated_text = generated_text.strip().replace("\n", " ")
        
        # print("generated_text:", generated_text)
        output_list.append(generated_text)

    print("writing to %s" % output_datapath)
    with open(output_datapath, "w") as f:
        for output in output_list:
            f.write(output + "\n")


if __name__ == "__main__":
    main()