File size: 4,830 Bytes
5d717ac
 
d3dee17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
780b546
d3dee17
c4b979c
 
 
 
 
 
d3dee17
c4b979c
 
 
 
cdc0a26
c4b979c
 
 
cdc0a26
c4b979c
cdc0a26
c4b979c
 
780b546
 
 
 
 
 
 
 
73eae6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af2e7c9
 
73eae6d
af2e7c9
 
 
 
 
 
 
 
 
 
 
c4b979c
73eae6d
af2e7c9
0db1fc6
 
 
af2e7c9
 
0db1fc6
af2e7c9
0db1fc6
73eae6d
0db1fc6
 
 
 
 
 
 
 
 
 
af2e7c9
0db1fc6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159


# import gradio as gr
# from huggingface_hub import InferenceClient

# """
# For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
# """
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")


# def respond(
#     message,
#     history: list[tuple[str, str]],
#     system_message,
#     max_tokens,
#     temperature,
#     top_p,
# ):
#     messages = [{"role": "system", "content": system_message}]

#     for val in history:
#         if val[0]:
#             messages.append({"role": "user", "content": val[0]})
#         if val[1]:
#             messages.append({"role": "assistant", "content": val[1]})

#     messages.append({"role": "user", "content": message})

#     response = ""

#     for message in client.chat_completion(
#         messages,
#         max_tokens=max_tokens,
#         stream=True,
#         temperature=temperature,
#         top_p=top_p,
#     ):
#         token = message.choices[0].delta.content

#         response += token
#         yield response

# """
# For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
# """
# demo = gr.ChatInterface(
#     respond,
#     additional_inputs=[
#         gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
#         gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
#         gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
#         gr.Slider(
#             minimum=0.1,
#             maximum=1.0,
#             value=0.95,
#             step=0.05,
#             label="Top-p (nucleus sampling)",
#         ),
#     ],
# )


# if __name__ == "__main__":
#     demo.launch()




# import gradio as gr

# def fake(message, history):
#     if message.strip():
#         # Instead of returning audio directly, return a message
#         return "Playing sample audio...", gr.Audio("https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav")
#     else:
#         return "Please provide the name of an artist", None

# with gr.Blocks() as demo:
#     chatbot = gr.Chatbot(placeholder="Play music by any artist!")
#     textbox = gr.Textbox(placeholder="Which artist's music do you want to listen to?", scale=7)
#     audio_player = gr.Audio()

#     def chat_interface(message, history):
#         response, audio = fake(message, history)
#         return history + [(message, response)], audio

#     textbox.submit(chat_interface, [textbox, chatbot], [chatbot, audio_player])

# demo.launch()

# import random
# def random_response(message, history):
#     return random.choice(["Yes", "No"])

# gr.ChatInterface(random_response).launch()



# import gradio as gr

# def yes_man(message, history):
#     if message.endswith("?"):
#         return "Yes"
#     else:
#         return "Ask me anything!"

# gr.ChatInterface(
#     yes_man,
#     chatbot=gr.Chatbot(placeholder="<strong>Ask me a yes or no question</strong><br>Ask me anything"),
#     textbox=gr.Textbox(placeholder="Ask me a yes or no question", container=False, scale=15),
#     title="Yes Man",
#     description="Ask Yes Man any question",
#     theme="soft",
#     examples=[{"text": "Hello"}, {"text": "Am I cool?"}, {"text": "Are tomatoes vegetables?"}],
#     cache_examples=True,
#     retry_btn=None,
#     undo_btn="Delete Previous",
#     clear_btn="Clear",
# ).launch()

# below code is not working
# import gradio as gr

# def count_files(files):
#     num_files = len(files)
#     return f"You uploaded {num_files} file(s)"

# with gr.Blocks() as demo:
#     with gr.Row():
#         chatbot = gr.Chatbot()
#         file_input = gr.Files(label="Upload Files")
#     file_input.change(count_files, inputs=file_input, outputs=chatbot)

# demo.launch()


# new code
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage
import openai
import gradio as gr

os.environ["OPENAI_API_KEY"] = "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"  # Replace with your key

llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')

def predict(message, history):
    history_langchain_format = []
    for msg in history:
        if msg['role'] == "user":
            history_langchain_format.append(HumanMessage(content=msg['content']))
        elif msg['role'] == "assistant":
            history_langchain_format.append(AIMessage(content=msg['content']))
    history_langchain_format.append(HumanMessage(content=message))
    gpt_response = llm(history_langchain_format)
    return gpt_response.content

gr.ChatInterface(predict).launch()