Spaces:
Sleeping
Sleeping
#################################################### | |
#Mit Streaming | |
def predict(message, history): | |
history_openai_format = [] | |
for human, assistant in history: | |
history_openai_format.append({"role": "user", "content": human }) | |
history_openai_format.append({"role": "assistant", "content":assistant}) | |
history_openai_format.append({"role": "user", "content": message}) | |
response = openai.ChatCompletion.create( | |
model='gpt-3.5-turbo', | |
messages= history_openai_format, | |
temperature=1.0, | |
stream=True | |
) | |
partial_message = "" | |
for chunk in response: | |
if len(chunk['choices'][0]['delta']) != 0: | |
partial_message = partial_message + chunk['choices'][0]['delta']['content'] | |
yield partial_message | |
gr.ChatInterface(predict).queue().launch() | |
########################################################## | |
#OpenAI Chatinterface | |
from langchain.chat_models import ChatOpenAI | |
from langchain.schema import AIMessage, HumanMessage | |
import openai | |
import gradio as gr | |
os.environ["OPENAI_API_KEY"] = "sk-..." # Replace with your key | |
llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613') | |
def predict(message, history): | |
history_langchain_format = [] | |
for human, ai in history: | |
history_langchain_format.append(HumanMessage(content=human)) | |
history_langchain_format.append(AIMessage(content=ai)) | |
history_langchain_format.append(HumanMessage(content=message)) | |
gpt_response = llm(history_langchain_format) | |
return gpt_response.content | |
gr.ChatInterface(predict).launch() |