gpt_demo / app.py
starsaround's picture
update using gr.ChatInterface
f221667
raw
history blame
No virus
4.26 kB
import g4f
import gradio as gr
from g4f.Provider import (
Ails,
You,
Bing,
Yqcloud,
Theb,
Aichat,
Bard,
Vercel,
Forefront,
Lockchat,
Liaobots,
H2o,
ChatgptLogin,
DeepAi,
GetGpt
)
import os
import json
import pandas as pd
from models_for_langchain.model import CustomLLM
from langchain.memory import ConversationBufferWindowMemory, ConversationTokenBufferMemory
from langchain import LLMChain, PromptTemplate
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
provider_dict = {
'Ails': Ails,
'You': You,
'Bing': Bing,
'Yqcloud': Yqcloud,
'Theb': Theb,
'Aichat': Aichat,
'Bard': Bard,
'Vercel': Vercel,
'Forefront': Forefront,
'Lockchat': Lockchat,
'Liaobots': Liaobots,
'H2o': H2o,
'ChatgptLogin': ChatgptLogin,
'DeepAi': DeepAi,
'GetGpt': GetGpt
}
def change_prompt_set(prompt_set_name):
return gr.Dropdown.update(choices=list(prompt_set_list[prompt_set_name].keys()))
def change_prompt(prompt_set_name, prompt_name):
return gr.update(value=prompt_set_list[prompt_set_name][prompt_name])
def user(user_message, history):
return gr.update(value="", interactive=False), history + [[user_message, None]]
def bot(message, history, model_name, provider_name, system_msg):
response = ''
if len(system_msg)>3000:
system_msg = system_msg[:2000] + system_msg[-1000:]
global template, memory
llm.model_name = model_name
llm.provider_name = provider_name
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"], template=template.format(system_instruction=system_msg)
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=False,
memory=memory,
)
bot_msg = llm_chain.run(message)
for c in bot_msg:
response += c
yield response
def empty_chat():
global memory
memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
return None
prompt_set_list = {}
for prompt_file in os.listdir("prompt_set"):
key = prompt_file
if '.csv' in key:
df = pd.read_csv("prompt_set/" + prompt_file)
prompt_dict = dict(zip(df['act'], df['prompt']))
else:
with open("prompt_set/" + prompt_file, encoding='utf-8') as f:
ds = json.load(f)
prompt_dict = {item["act"]: item["prompt"] for item in ds}
prompt_set_list[key] = prompt_dict
with gr.Blocks() as demo:
llm = CustomLLM()
template = """
Chat with human based on following instructions:
```
{system_instruction}
```
The following is a conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
{{chat_history}}
Human: {{human_input}}
Chatbot:"""
memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
with gr.Row():
model_name = gr.Dropdown(['gpt-3.5-turbo', 'gpt-4'], value='gpt-3.5-turbo', label='模型')
provider = gr.Dropdown(provider_dict.keys(), value='GetGpt', label='提供者', min_width=20)
system_msg = gr.Textbox(value="你是一名助手,可以解答问题。", label='系统提示')
gr.ChatInterface(bot,
additional_inputs=[
model_name,
provider,
system_msg]
)
with gr.Row():
default_prompt_set = "1 中文提示词.json"
prompt_set_name = gr.Dropdown(prompt_set_list.keys(), value=default_prompt_set, label='提示词集合')
prompt_name = gr.Dropdown(prompt_set_list[default_prompt_set].keys(), label='提示词', min_width=20)
prompt_set_name.select(change_prompt_set, prompt_set_name, prompt_name)
prompt_name.select(change_prompt, [prompt_set_name, prompt_name], system_msg)
demo.title = "AI Chat"
demo.queue()
demo.launch()