File size: 3,003 Bytes
94621a1
 
 
 
11a43f0
 
 
94621a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11a43f0
 
94621a1
 
d5ae503
94621a1
11a43f0
94621a1
 
11a43f0
 
 
 
 
 
 
94621a1
 
 
11a43f0
94621a1
d5ae503
94621a1
d5ae503
 
0387ca0
 
d5ae503
 
 
 
0387ca0
 
 
 
d5ae503
0387ca0
 
d5ae503
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b71acb
 
d5ae503
94621a1
11a43f0
 
 
 
5816f03
2b79822
5816f03
11a43f0
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
import os
import openai
import gradio as gr
from gradio import ChatInterface
import time

# Get the value of the openai_api_key from environment variable
openai.api_key = os.getenv("OPENAI_API_KEY")

# Import things that are needed generically from langchain
from langchain import LLMMathChain, SerpAPIWrapper
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.chat_models import ChatOpenAI
from langchain.tools import BaseTool, StructuredTool, Tool, tool
from langchain.tools import MoveFileTool, format_tool_to_openai_function
from langchain.schema import (
    AIMessage,
    HumanMessage,
    SystemMessage
)
from langchain.utilities import WikipediaAPIWrapper
from langchain.tools import AIPluginTool

# Question- how can one set up a system message for their Chatbot while using ChatInterface
# Example system message : system = SystemMessage(content = "You are a helpful AI assistant")

# driver
def predict_old(user_input, chatbot):

    chat = ChatOpenAI(temperature=1.0, streaming=True, model='gpt-3.5-turbo-0613')
    messages=[]

    for conv in chatbot:
        human = HumanMessage(content=conv[0])
        ai = AIMessage(content=conv[1])
        messages.append(human)
        messages.append(ai)

    messages.append(HumanMessage(content=user_input))

    # getting gpt3.5's response
    gpt_response = chat(messages)
    return gpt_response.content

def predict(inputs, chatbot):

    print(f'inputs is - {inputs}')
    print(f'chatbot is - {chatbot}')
    #if chatbot[0][-1]  is None: #[["hi there'", None]]
    #    chatbot=[]
    messages = []
    for conv in chatbot:
        user = conv[0]
        messages.append({"role": "user", "content":user })
        if chatbot[0][-1] is None: #[["hi there'", None]]
            #chatbot=[]
            break
        assistant = conv[1]
        messages.append({"role": "assistant", "content":assistant})
    
    #messages.append({"role": "user", "content":inputs})
    print(f'messages is - {messages}')

    # a ChatCompletion request
    response = openai.ChatCompletion.create(
        model='gpt-3.5-turbo',
        messages= messages, # [{'role': 'user', 'content': "What is life? Answer in three words."}],
        temperature=1.0,
        stream=True  # for streaming the output to chatbot
    )
    print(f'response is - {response}')

    partial_message = ""
    for chunk in response:
        if len(chunk['choices'][0]['delta']) != 0:
          print(chunk['choices'][0]['delta']['content'])
          partial_message = partial_message + chunk['choices'][0]['delta']['content']
          yield partial_message 
        #time.sleep(0.5)

def echo_stream(message, history):
    for i in range(len(message)):
        time.sleep(0.3)
        yield message[:i]

ChatInterface(predict, chatbot=[]).queue().launch(debug=True)

#chatbot = gr.Chatbot()
#gr.ChatInterface(predict, delete_last_btn="del").queue().launch(share=False, debug=True) #examples=["How are you?", "What's up?"],