karthik commited on
Commit
b31a175
1 Parent(s): 283994a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -9,12 +9,12 @@ client = InferenceClient(
9
  def format_prompt(message, history):
10
  prompt = ""
11
  for user_prompt, bot_response in history:
12
- prompt += f"GPT4 Correct User: {user_prompt}<|end_of_turn|>GPT4 Correct Assistant: {response}<|end_of_turn|>"
13
  prompt += f"GPT4 Correct User: {message}<|end_of_turn|>GPT4 Correct Assistant:"
14
  return prompt
15
 
16
  def generate(
17
- prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
18
  ):
19
  temperature = float(temperature)
20
  if temperature < 1e-2:
@@ -35,6 +35,8 @@ def generate(
35
  output = ""
36
 
37
  for response in stream:
 
 
38
  output += response.token.text
39
  yield output
40
  return output
 
9
  def format_prompt(message, history):
10
  prompt = ""
11
  for user_prompt, bot_response in history:
12
+ prompt += f"GPT4 Correct User: {user_prompt}<|end_of_turn|>GPT4 Correct Assistant: {bot_response}<|end_of_turn|>"
13
  prompt += f"GPT4 Correct User: {message}<|end_of_turn|>GPT4 Correct Assistant:"
14
  return prompt
15
 
16
  def generate(
17
+ prompt, history, temperature=0.9, max_new_tokens=1024, top_p=0.95, repetition_penalty=1.0,
18
  ):
19
  temperature = float(temperature)
20
  if temperature < 1e-2:
 
35
  output = ""
36
 
37
  for response in stream:
38
+ if response.token.text=="<|end_of_turn|>":
39
+ break
40
  output += response.token.text
41
  yield output
42
  return output