slz1 commited on
Commit
60d2674
·
verified ·
1 Parent(s): ea827ee

Upload inference_models.py

Browse files
Files changed (1) hide show
  1. inference_models.py +199 -0
inference_models.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM
5
+
6
+
7
+ # get model and tokenizer
8
+ def get_inference_model(model_dir):
9
+ inference_tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
10
+ inference_model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).half().cuda()
11
+ inference_model.eval()
12
+ return inference_tokenizer, inference_model
13
+
14
+
15
+ # get llama model and tokenizer
16
+ def get_inference_model_llama(model_dir):
17
+ inference_model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.bfloat16)
18
+ inference_tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
19
+ device = "cuda"
20
+ inference_model.to(device)
21
+ return inference_tokenizer, inference_model
22
+
23
+ # get mistral model and tokenizer
24
+ def get_inference_model_mistral(model_dir):
25
+ inference_model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, torch_dtype=torch.bfloat16)
26
+ inference_tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
27
+ # inference_tokenizer.pad_token = inference_tokenizer.eos_token
28
+ device = "cuda"
29
+ inference_model.to(device)
30
+ return inference_tokenizer, inference_model
31
+
32
+
33
+ # get glm model response
34
+ def get_local_response(query, model, tokenizer, max_length=2048, truncation=True, do_sample=False, max_new_tokens=1024, temperature=0.7):
35
+ cnt = 2
36
+ all_response = ''
37
+ while cnt:
38
+ try:
39
+ inputs = tokenizer([query], return_tensors="pt", truncation=truncation, max_length=max_length).to('cuda')
40
+ output_ = model.generate(**inputs, do_sample=do_sample, max_new_tokens=max_new_tokens, temperature=temperature)
41
+ output = output_.tolist()[0][len(inputs["input_ids"][0]):]
42
+ response = tokenizer.decode(output)
43
+
44
+ print(f'obtain response:{response}\n')
45
+ all_response = response
46
+ break
47
+ except Exception as e:
48
+ print(f'Error:{e}, obtain response again...\n')
49
+ cnt -= 1
50
+ if not cnt:
51
+ return []
52
+ split_response = all_response.strip().split('\n')
53
+ return split_response
54
+
55
+
56
+ # get llama model response
57
+ # def get_local_response_llama(query, model, tokenizer, max_length=2048, truncation=True, max_new_tokens=1024, temperature=0.7, do_sample=False):
58
+ # cnt = 2
59
+ # all_response = ''
60
+ # # messages = [{"role": "user", "content": query}]
61
+ # # data = tokenizer.apply_chat_template(messages, return_tensors="pt").cuda()
62
+ # terminators = [
63
+ # tokenizer.eos_token_id,
64
+ # tokenizer.convert_tokens_to_ids("<|eot_id|>")
65
+ # ]
66
+ # message = '<|start_header_id|>user<|end_header_id|>\n\n{query}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n'.format(query=query)
67
+ # data = tokenizer.encode_plus(message, max_length=max_length, truncation=truncation, return_tensors='pt')
68
+ # input_ids = data['input_ids'].to('cuda')
69
+ # attention_mask = data['attention_mask'].to('cuda')
70
+ # while cnt:
71
+ # try:
72
+ # # query = "<s>Human: " + query + "</s><s>Assistant: "
73
+ # # input_ids = tokenizer([query], return_tensors="pt", add_special_tokens=False).input_ids.to('cuda')
74
+ # output = model.generate(input_ids, attention_mask=attention_mask, do_sample=do_sample, max_new_tokens=max_new_tokens, temperature=temperature, eos_token_id=terminators, pad_token_id=tokenizer.eos_token_id)
75
+ # ori_string = tokenizer.decode(output[0], skip_special_tokens=False)
76
+ # processed_string = ori_string.split('<|end_header_id|>')[2].strip().split('<|eot_id|>')[0].strip()
77
+ # response = processed_string.split('<|end_of_text|>')[0].strip()
78
+
79
+ # # print(f'获得回复:{response}\n')
80
+ # all_response = response
81
+ # break
82
+ # except Exception as e:
83
+ # print(f'Error:{e}, obtain response again...\n')
84
+ # cnt -= 1
85
+ # if not cnt:
86
+ # return []
87
+ # # split_response = all_response.split("Assistant:")[-1].strip().split('\n')
88
+ # split_response = all_response.split('\n')
89
+ # return split_response
90
+ # def get_local_response_llama(query, model, tokenizer, max_length=2048, truncation=True, max_new_tokens=2048, temperature=0.7, do_sample=False):
91
+ # cnt = 2
92
+ # all_response = ''
93
+ # # messages = [{"role": "user", "content": query}]
94
+ # # data = tokenizer.apply_chat_template(messages, return_tensors="pt").cuda()
95
+ # terminators = [
96
+ # tokenizer.eos_token_id,
97
+
98
+ # # tokenizer.convert_tokens_to_ids("<|eot_id|>")
99
+ # ]
100
+ # # message = '<|start_header_id|>user<|end_header_id|>\n\n{query}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n'.format(query=query)
101
+ # message = '<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n'.format(query=query)
102
+ # data = tokenizer.encode_plus(message, max_length=max_length, truncation=truncation, return_tensors='pt')
103
+ # input_ids = data['input_ids'].to('cuda')
104
+ # attention_mask = data['attention_mask'].to('cuda')
105
+ # while cnt:
106
+ # try:
107
+ # # query = "<s>Human: " + query + "</s><s>Assistant: "
108
+ # # input_ids = tokenizer([query], return_tensors="pt", add_special_tokens=False).input_ids.to('cuda')
109
+ # output = model.generate(input_ids, attention_mask=attention_mask, do_sample=do_sample, max_new_tokens=max_new_tokens, temperature=temperature, eos_token_id=terminators, pad_token_id=tokenizer.eos_token_id)
110
+ # ori_string = tokenizer.decode(output[0], skip_special_tokens=False)
111
+ # # processed_string = ori_string.split('<|end_header_id|>')[2].strip().split('<|eot_id|>')[0].strip()
112
+ # # processed_string = ori_string.split('<|end_header_id|>')[2].strip().split('<|eot_id|>')[0].strip()
113
+ # # response = processed_string.split('<|end_of_text|>')[0].strip()
114
+ # response = ori_string.split('|im_start|>assistant')[-1].strip()
115
+ # # print(f'获得回复:{response}\n')
116
+ # all_response = response.replace('<|im_end|>', '')
117
+ # break
118
+ # except Exception as e:
119
+ # print(f'Error:{e}, obtain response again...\n')
120
+ # cnt -= 1
121
+ # if not cnt:
122
+ # return []
123
+ # # split_response = all_response.split("Assistant:")[-1].strip().split('\n')
124
+ # split_response = all_response.split('\n')
125
+ # return split_response
126
+
127
+ # ================================QwQ 32B preview Version================================
128
+ def get_local_response_llama(query, model, tokenizer, max_length=2048, truncation=True, max_new_tokens=2048, temperature=0.7, do_sample=False):
129
+ cnt = 2
130
+ all_response = ''
131
+ terminators = [
132
+ tokenizer.eos_token_id,
133
+ ]
134
+
135
+ messages = [
136
+ {"role": "system", "content": "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step."},
137
+ {"role": "user", "content": query}
138
+ ]
139
+ text = tokenizer.apply_chat_template(
140
+ messages,
141
+ tokenize=False,
142
+ add_generation_prompt=True
143
+ )
144
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
145
+
146
+ while cnt:
147
+ try:
148
+ generated_ids = model.generate(
149
+ **model_inputs,
150
+ do_sample=do_sample, max_new_tokens=3062, temperature=temperature, eos_token_id=terminators,
151
+ )
152
+ generated_ids = [
153
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
154
+ ]
155
+ all_response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
156
+ break
157
+ except Exception as e:
158
+ print(f'Error:{e}, obtain response again...\n')
159
+ cnt -= 1
160
+ if not cnt:
161
+ return []
162
+
163
+ split_response = all_response.split('\n')
164
+
165
+ return split_response
166
+
167
+
168
+ # get mistral model response
169
+ def get_local_response_mistral(query, model, tokenizer, max_length=1024, truncation=True, max_new_tokens=1024, temperature=0.7, do_sample=False):
170
+ cnt = 2
171
+ all_response = ''
172
+ # messages = [{"role": "user", "content": query}]
173
+ # data = tokenizer.apply_chat_template(messages, max_length=max_length, truncation=truncation, return_tensors="pt").cuda()
174
+ message = '[INST]' + query + '[/INST]'
175
+ data = tokenizer.encode_plus(message, max_length=max_length, truncation=truncation, return_tensors='pt')
176
+ input_ids = data['input_ids'].to('cuda')
177
+ attention_mask = data['attention_mask'].to('cuda')
178
+ while cnt:
179
+ try:
180
+ output = model.generate(input_ids, attention_mask=attention_mask, max_new_tokens=max_new_tokens, do_sample=do_sample, temperature=temperature, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id)
181
+ ori_string = tokenizer.decode(output[0])
182
+ processed_string = ori_string.split('[/INST]')[1].strip()
183
+ response = processed_string.split('</s>')[0].strip()
184
+
185
+ print(f'obtain response:{response}\n')
186
+ all_response = response
187
+ break
188
+ except Exception as e:
189
+ print(f'Error:{e}, obtain response again...\n')
190
+ cnt -= 1
191
+ if not cnt:
192
+ return []
193
+ all_response = all_response.split('The answer is:')[0].strip() # intermediate steps should not always include a final answer
194
+ ans_count = all_response.split('####')
195
+ if len(ans_count) >= 2:
196
+ all_response = ans_count[0] + 'Therefore, the answer is:' + ans_count[1]
197
+ all_response = all_response.replace('[SOL]', '').replace('[ANS]', '').replace('[/ANS]', '').replace('[INST]', '').replace('[/INST]', '').replace('[ANSW]', '').replace('[/ANSW]', '') # remove unique answer mark for mistral
198
+ split_response = all_response.split('\n')
199
+ return split_response