ysharma HF staff commited on
Commit
6caa93d
Β·
1 Parent(s): 59e004a

update stream code

Browse files
Files changed (1) hide show
  1. app.py +17 -6
app.py CHANGED
@@ -31,7 +31,7 @@ def translate_Chinese_English(chinese_text):
31
  return trans_eng_text[0]
32
 
33
  # Define generator to stream model predictions
34
- def predict_glm_stream(input, history=[]): #, top_p, temperature):
35
  top_p = 1.0
36
  temperature = 1.0
37
  for response, history in model_glm.stream_chat(tokenizer_glm, input, history, top_p=1.0, temperature=1.0): #max_length=max_length,
@@ -42,7 +42,18 @@ def predict_glm_stream(input, history=[]): #, top_p, temperature):
42
  print(f"In for loop translated history is ^^- {history}")
43
  yield history, history #[history] + updates
44
 
45
-
 
 
 
 
 
 
 
 
 
 
 
46
  """
47
  def predict(input, max_length, top_p, temperature, history=None):
48
  if history is None:
@@ -111,11 +122,11 @@ with gr.Blocks(css="""#col_container {margin-left: auto; margin-right: auto;}
111
 
112
 
113
  inputs.submit( predict_glm_stream,
114
- [inputs, state_glm, ],
115
- [chatbot_glm, state_glm],)
116
  b1.click( predict_glm_stream,
117
- [inputs, state_glm, ],
118
- [chatbot_glm, state_glm],)
119
 
120
  #b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
121
  b2.click(reset_chat, [chatbot_glm, state_glm], [chatbot_glm, state_glm])
 
31
  return trans_eng_text[0]
32
 
33
  # Define generator to stream model predictions
34
+ def predict_glm_stream_old(input, history=[]): #, top_p, temperature):
35
  top_p = 1.0
36
  temperature = 1.0
37
  for response, history in model_glm.stream_chat(tokenizer_glm, input, history, top_p=1.0, temperature=1.0): #max_length=max_length,
 
42
  print(f"In for loop translated history is ^^- {history}")
43
  yield history, history #[history] + updates
44
 
45
+ # Define function to generate model predictions and update the history
46
+ def predict_glm_stream(input, history=[]): #, top_p, temperature):
47
+ for response, updates in model_glm.stream_chat(tokenizer_glm, input, history[-1] if history else history, top_p=1.0, temperature=1.0): #history
48
+ print(f"In for loop resonse is ^^- {response}")
49
+ print(f"In for loop updates is ^^- {updates}")
50
+ # translate Chinese to English
51
+ #history = [(query, translate_Chinese_English(response)) for query, response in history]
52
+ print(f"In for loop OG history is ^^- {history}")
53
+ print(f"In for loop translated history is ^^- {history+updates}")
54
+ yield history+updates
55
+
56
+
57
  """
58
  def predict(input, max_length, top_p, temperature, history=None):
59
  if history is None:
 
122
 
123
 
124
  inputs.submit( predict_glm_stream,
125
+ [inputs, chatbot_glm, ], #[inputs, state_glm, ],
126
+ [chatbot_glm],) #[chatbot_glm, state_glm],)
127
  b1.click( predict_glm_stream,
128
+ [inputs, chatbot_glm, ], #[inputs, state_glm, ],
129
+ [chatbot_glm],) #[chatbot_glm, state_glm],)
130
 
131
  #b2.click(reset_chat, [chatbot_chatgpt, state_chatgpt], [chatbot_chatgpt, state_chatgpt])
132
  b2.click(reset_chat, [chatbot_glm, state_glm], [chatbot_glm, state_glm])