rasyosef commited on
Commit
fd20f2c
1 Parent(s): 4695c36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -9,6 +9,7 @@ checkpoint = "microsoft/phi-2"
9
  # Download and load model and tokenizer
10
  tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
11
  model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True)
 
12
 
13
  # Text generation pipeline
14
  phi2 = pipeline("text-generation", tokenizer=tokenizer, model=model)
@@ -27,10 +28,10 @@ def generate(prompt, chat_history, max_new_tokens):
27
  final_prompt += "Output:"
28
 
29
  generated_text = phi2(final_prompt, max_new_tokens=max_new_tokens)[0]["generated_text"]
30
- response = generated_text.split("Output:")[1]
31
 
32
  if "User:" in response:
33
- response = response.split("User:")[0]
34
 
35
  if "Assistant:" in response:
36
  response = response.split("Assistant:")[1].strip()
 
9
  # Download and load model and tokenizer
10
  tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
11
  model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True)
12
+ model.config.eos_token_id = tokenizer.eos_token_id
13
 
14
  # Text generation pipeline
15
  phi2 = pipeline("text-generation", tokenizer=tokenizer, model=model)
 
28
  final_prompt += "Output:"
29
 
30
  generated_text = phi2(final_prompt, max_new_tokens=max_new_tokens)[0]["generated_text"]
31
+ response = generated_text[len(final_prompt):].strip()
32
 
33
  if "User:" in response:
34
+ response = response.split("User:")[0].strip()
35
 
36
  if "Assistant:" in response:
37
  response = response.split("Assistant:")[1].strip()