ysharma HF staff commited on
Commit
d5ae503
·
1 Parent(s): 11a43f0
Files changed (1) hide show
  1. app.py +29 -2
app.py CHANGED
@@ -26,7 +26,7 @@ from langchain.tools import AIPluginTool
26
  # Example system message : system = SystemMessage(content = "You are a helpful AI assistant")
27
 
28
  # driver
29
- def predict(user_input, chatbot):
30
 
31
  chat = ChatOpenAI(temperature=1.0, streaming=True, model='gpt-3.5-turbo-0613')
32
  messages=[]
@@ -43,14 +43,41 @@ def predict(user_input, chatbot):
43
  gpt_response = chat(messages)
44
  return gpt_response.content
45
 
 
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  def echo_stream(message, history):
49
  for i in range(len(message)):
50
  time.sleep(0.3)
51
  yield message[:i]
52
 
53
- ChatInterface(echo_stream).queue().launch(debug=True)
54
 
55
  #chatbot = gr.Chatbot()
56
  #gr.ChatInterface(predict, delete_last_btn="del").queue().launch(share=False, debug=True) #examples=["How are you?", "What's up?"],
 
26
  # Example system message : system = SystemMessage(content = "You are a helpful AI assistant")
27
 
28
  # driver
29
+ def predict_old(user_input, chatbot):
30
 
31
  chat = ChatOpenAI(temperature=1.0, streaming=True, model='gpt-3.5-turbo-0613')
32
  messages=[]
 
43
  gpt_response = chat(messages)
44
  return gpt_response.content
45
 
46
+ def predict(inputs, chatbot):
47
 
48
+ print(f'inputs is - {inputs}')
49
+ print(f'chatbot is - {chatbot}')
50
+ messages = []
51
+ for conv in chatbot:
52
+ user = conv[0]
53
+ assistant = conv[1]
54
+ messages.append({"role": "user", "content":user })
55
+ messages.append({"role": "assistant", "content":assistant})
56
+ messages.append({"role": "user", "content":inputs})
57
+ print(f'messages is - {messages}')
58
+
59
+ # a ChatCompletion request
60
+ response = openai.ChatCompletion.create(
61
+ model='gpt-3.5-turbo',
62
+ messages= messages, # [{'role': 'user', 'content': "What is life? Answer in three words."}],
63
+ temperature=1.0,
64
+ stream=True # for streaming the output to chatbot
65
+ )
66
+ print(f'response is - {response}')
67
+
68
+ partial_message = ""
69
+ for chunk in response:
70
+ if len(chunk['choices'][0]['delta']) != 0:
71
+ print(chunk['choices'][0]['delta']['content'])
72
+ yield chunk['choices'][0]['delta']['content']
73
+ #time.sleep(0.5)
74
 
75
  def echo_stream(message, history):
76
  for i in range(len(message)):
77
  time.sleep(0.3)
78
  yield message[:i]
79
 
80
+ ChatInterface(predict).queue().launch(debug=True)
81
 
82
  #chatbot = gr.Chatbot()
83
  #gr.ChatInterface(predict, delete_last_btn="del").queue().launch(share=False, debug=True) #examples=["How are you?", "What's up?"],