freddyaboulton HF staff commited on
Commit
175b3a3
·
verified ·
1 Parent(s): 4d39435

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -39,8 +39,11 @@ def generate(user_message: tuple[int, np.ndarray],
39
  history: list[dict],
40
  code: str):
41
 
42
- msg_text = whisper({"array": audio[1].astype(np.float32) / 32768.0, "sampling_rate": user_message[0]})["text"]
43
- history.append({"role": "user", "content": user_prompt.format(user_message=msg_text, code=code)})
 
 
 
44
  input_text = tokenizer.apply_chat_template(history, tokenize=False)
45
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
46
  outputs = model.generate(inputs, max_new_tokens=500, temperature=0.2, top_p=0.9, do_sample=True)
 
39
  history: list[dict],
40
  code: str):
41
 
42
+ msg_text = whisper({"array": user_message[1].astype(np.float32) / 32768.0, "sampling_rate": user_message[0]})["text"]
43
+ print("msg_text", msg_text)
44
+ user_msg_formatted = user_prompt.format(user_message=msg_text, code=code)
45
+ print("user_msg_formatted", user_msg_formatted)
46
+ history.append({"role": "user", "content": user_msg_formatted})
47
  input_text = tokenizer.apply_chat_template(history, tokenize=False)
48
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
49
  outputs = model.generate(inputs, max_new_tokens=500, temperature=0.2, top_p=0.9, do_sample=True)