Spaces:
Build error
Build error
import gradio as gr | |
import os | |
import openai | |
import gradio as gr | |
from gradio import ChatInterface | |
import time | |
# Get the value of the openai_api_key from environment variable | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
# Import things that are needed generically from langchain | |
from langchain import LLMMathChain, SerpAPIWrapper | |
from langchain.agents import AgentType, initialize_agent, load_tools | |
from langchain.chat_models import ChatOpenAI | |
from langchain.tools import BaseTool, StructuredTool, Tool, tool | |
from langchain.tools import MoveFileTool, format_tool_to_openai_function | |
from langchain.schema import ( | |
AIMessage, | |
HumanMessage, | |
SystemMessage | |
) | |
from langchain.utilities import WikipediaAPIWrapper | |
from langchain.tools import AIPluginTool | |
# Question- how can one set up a system message for their Chatbot while using ChatInterface | |
# Example system message : system = SystemMessage(content = "You are a helpful AI assistant") | |
# driver | |
def predict_old(user_input, chatbot): | |
chat = ChatOpenAI(temperature=1.0, streaming=True, model='gpt-3.5-turbo-0613') | |
messages=[] | |
for conv in chatbot: | |
human = HumanMessage(content=conv[0]) | |
ai = AIMessage(content=conv[1]) | |
messages.append(human) | |
messages.append(ai) | |
messages.append(HumanMessage(content=user_input)) | |
# getting gpt3.5's response | |
gpt_response = chat(messages) | |
return gpt_response.content | |
def predict(inputs, chatbot): | |
print(f'inputs is - {inputs}') | |
print(f'chatbot is - {chatbot}') | |
#if chatbot[0][-1] is None: #[["hi there'", None]] | |
# chatbot=[] | |
messages = [] | |
for conv in chatbot: | |
user = conv[0] | |
messages.append({"role": "user", "content":user }) | |
if chatbot[0][-1] is None: #[["hi there'", None]] | |
#chatbot=[] | |
break | |
assistant = conv[1] | |
messages.append({"role": "assistant", "content":assistant}) | |
#messages.append({"role": "user", "content":inputs}) | |
print(f'messages is - {messages}') | |
# a ChatCompletion request | |
response = openai.ChatCompletion.create( | |
model='gpt-3.5-turbo', | |
messages= messages, # [{'role': 'user', 'content': "What is life? Answer in three words."}], | |
temperature=1.0, | |
stream=True # for streaming the output to chatbot | |
) | |
print(f'response is - {response}') | |
partial_message = "" | |
for chunk in response: | |
if len(chunk['choices'][0]['delta']) != 0: | |
print(chunk['choices'][0]['delta']['content']) | |
partial_message = partial_message + chunk['choices'][0]['delta']['content'] | |
yield partial_message | |
#time.sleep(0.5) | |
def echo_stream(message, history): | |
for i in range(len(message)): | |
time.sleep(0.3) | |
yield message[:i] | |
ChatInterface(predict, chatbot=[]).queue().launch(debug=True) | |
#chatbot = gr.Chatbot() | |
#gr.ChatInterface(predict, delete_last_btn="del").queue().launch(share=False, debug=True) #examples=["How are you?", "What's up?"], |