|
import gradio as gr |
|
from langchain.agents import initialize_agent |
|
|
|
|
|
|
|
from langchain.tools import BaseTool, StructuredTool, Tool, tool |
|
from PIL import Image |
|
from demotool import * |
|
from loader import * |
|
|
|
import re |
|
from gradio_tools.tools import (StableDiffusionTool, ImageCaptioningTool, StableDiffusionPromptGeneratorTool, |
|
TextToVideoTool) |
|
|
|
from langchain.memory import ConversationBufferMemory |
|
|
|
|
|
from langchain import PromptTemplate, HuggingFaceHub, LLMChain |
|
|
|
def init_model_config(): |
|
llm = ChatLLM() |
|
llm.model_type = 'chatglm' |
|
llm.model_name_or_path = llm_model_dict['chatglm'][ |
|
'ChatGLM-6B-int4'] |
|
llm.load_llm() |
|
return llm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
memory = ConversationBufferMemory(memory_key="chat_history") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tools = [] |
|
|
|
|
|
|
|
|
|
agent = initialize_agent(tools, init_model_config(), memory=memory, agent="conversational-react-description", verbose=True) |
|
|
|
def run_text(text, state): |
|
|
|
|
|
|
|
res = agent.run(input=(text)) |
|
response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res) |
|
state = state + [(text, response)] |
|
return state,state |
|
|
|
with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo: |
|
chatbot = gr.Chatbot(elem_id="chatbot",show_label=False) |
|
state = gr.State([]) |
|
with gr.Row() as input_raws: |
|
with gr.Column(scale=0.6): |
|
txt = gr.Textbox(show_label=False).style(container=False) |
|
with gr.Column(scale=0.20, min_width=0): |
|
run = gr.Button("🏃♂️Run") |
|
with gr.Column(scale=0.20, min_width=0): |
|
clear = gr.Button("🔄Clear️") |
|
|
|
txt.submit(run_text, [txt, state], [chatbot,state]) |
|
txt.submit(lambda: "", None, txt) |
|
run.click(run_text, [txt, state], [chatbot,state]) |
|
|
|
demo.queue(concurrency_count=10).launch(server_name="0.0.0.0", server_port=7865) |