Add chat template

#6
by Rocketknight1 HF staff - opened
Files changed (2) hide show
  1. README.md +20 -22
  2. tokenizer_config.json +1 -0
README.md CHANGED
@@ -120,27 +120,25 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(
120
  use_fast=False,
121
  )
122
 
123
- system_message = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
124
- user_message = "How can you determine if a restaurant is popular among locals or mainly attracts tourists, and why might this information be useful?"
 
 
 
125
 
126
- prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
127
-
128
- inputs = tokenizer(prompt, return_tensors='pt')
129
- output_ids = model.generate(inputs["input_ids"],)
130
- answer = tokenizer.batch_decode(output_ids)[0]
131
 
132
  print(answer)
133
 
134
  # This example continues showing how to add a second turn message by the user to the conversation
135
- second_turn_user_message = "Give me a list of the key points of your first answer."
136
-
137
- # we set add_special_tokens=False because we dont want to automatically add a bos_token between messages
138
- second_turn_message_in_markup = f"\n<|im_start|>user\n{second_turn_user_message}<|im_end|>\n<|im_start|>assistant"
139
- second_turn_tokens = tokenizer(second_turn_message_in_markup, return_tensors='pt', add_special_tokens=False)
140
- second_turn_input = torch.cat([output_ids, second_turn_tokens['input_ids']], dim=1)
141
 
142
- output_ids_2 = model.generate(second_turn_input,)
143
- second_turn_answer = tokenizer.batch_decode(output_ids_2)[0]
144
 
145
  print(second_turn_answer)
146
  ```
@@ -209,16 +207,16 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(
209
  add_special_tokens=False,
210
  )
211
 
212
- system_message = "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."
213
- user_message = "\" \n :You can't just say, \"\"that's crap\"\" and remove it without gaining a consensus. You already know this, based on your block history. —/ \" \nIs the comment obscene? \nOptions : Yes, No."
214
-
215
- prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
216
 
217
- inputs = tokenizer(prompt, return_tensors='pt')
218
  inputs = inputs.to(device)
219
 
220
- output_ids = model.generate(inputs["input_ids"], max_length=4096, do_sample=False, temperature=0.0, use_cache=True)
221
- sequence_length = inputs["input_ids"].shape[1]
222
  new_output_ids = output_ids[:, sequence_length:]
223
  answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
224
  final_output = answers[0] if not should_filter_out(answers[0]) else "[Content Filtered]"
 
120
  use_fast=False,
121
  )
122
 
123
+ messages = [
124
+ {"role": "system", "content": "You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."},
125
+ {"role": "user", "content": "How can you determine if a restaurant is popular among locals or mainly attracts tourists, and why might this information be useful?"}
126
+ ]
127
+ inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
128
 
129
+ output_ids = model.generate(inputs)
130
+ answer = tokenizer.decode(output_ids[0])
 
 
 
131
 
132
  print(answer)
133
 
134
  # This example continues showing how to add a second turn message by the user to the conversation
135
+ messages.append(
136
+ {"role": "user", "content": "Give me a list of the key points of your first answer."}
137
+ )
138
+ second_turn_input = tokenizer.apply_chat_template(messages, return_tensors='pt')
 
 
139
 
140
+ output_ids_2 = model.generate(second_turn_input)
141
+ second_turn_answer = tokenizer.decode(output_ids_2[0])
142
 
143
  print(second_turn_answer)
144
  ```
 
207
  add_special_tokens=False,
208
  )
209
 
210
+ messages = [
211
+ {"role": "system", "content":"You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior."},
212
+ {"role": "user", "content": "\" \n :You can't just say, \"\"that's crap\"\" and remove it without gaining a consensus. You already know this, based on your block history. —/ \" \nIs the comment obscene? \nOptions : Yes, No."}
213
+ ]
214
 
215
+ inputs = tokenizer.apply_chat_template(messages, return_tensors='pt')
216
  inputs = inputs.to(device)
217
 
218
+ output_ids = model.generate(inputs, max_length=4096, do_sample=False, temperature=0.0, use_cache=True)
219
+ sequence_length = inputs.shape[1]
220
  new_output_ids = output_ids[:, sequence_length:]
221
  answers = tokenizer.batch_decode(new_output_ids, skip_special_tokens=True)
222
  final_output = answers[0] if not should_filter_out(answers[0]) else "[Content Filtered]"
tokenizer_config.json CHANGED
@@ -9,6 +9,7 @@
9
  "rstrip": false,
10
  "single_word": false
11
  },
 
12
  "clean_up_tokenization_spaces": false,
13
  "eos_token": {
14
  "__type": "AddedToken",
 
9
  "rstrip": false,
10
  "single_word": false
11
  },
12
+ "chat_template": "{{ bos_token }} {% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
13
  "clean_up_tokenization_spaces": false,
14
  "eos_token": {
15
  "__type": "AddedToken",