rasyosef commited on
Commit
a13b06e
1 Parent(s): 45ea516

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -11,15 +11,11 @@ checkpoint = "microsoft/phi-2"
11
  tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
12
  model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True)
13
 
14
- # Streamer
15
- streamer = TextIteratorStreamer(tokenizer=tokenizer, skip_prompt=True)
16
-
17
  # Text generation pipeline
18
  phi2 = pipeline(
19
  "text-generation",
20
  tokenizer=tokenizer,
21
- model=model,
22
- streamer=streamer,
23
  pad_token_id=tokenizer.eos_token_id,
24
  eos_token_id=tokenizer.eos_token_id,
25
  device_map="cpu"
@@ -38,7 +34,9 @@ def generate(prompt, chat_history, max_new_tokens):
38
  final_prompt += "User: " + prompt + "\n"
39
  final_prompt += "Output:"
40
 
41
- thread = Thread(target=phi2, kwargs={"text_inputs":final_prompt, "max_new_tokens":max_new_tokens})
 
 
42
  thread.start()
43
 
44
  generated_text = ""
 
11
  tokenizer = AutoTokenizer.from_pretrained(checkpoint, trust_remote_code=True)
12
  model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.float32, device_map="cpu", trust_remote_code=True)
13
 
 
 
 
14
  # Text generation pipeline
15
  phi2 = pipeline(
16
  "text-generation",
17
  tokenizer=tokenizer,
18
+ model=model,
 
19
  pad_token_id=tokenizer.eos_token_id,
20
  eos_token_id=tokenizer.eos_token_id,
21
  device_map="cpu"
 
34
  final_prompt += "User: " + prompt + "\n"
35
  final_prompt += "Output:"
36
 
37
+ # Streamer
38
+ streamer = TextIteratorStreamer(tokenizer=tokenizer, skip_prompt=True)
39
+ thread = Thread(target=phi2, kwargs={"text_inputs":final_prompt, "max_new_tokens":max_new_tokens, "streamer":streamer})
40
  thread.start()
41
 
42
  generated_text = ""