import gradio as gr from langchain.agents import initialize_agent # from langchain.llms import OpenAI # from langchain.chat_models import ChatOpenAI from langchain.tools import BaseTool, StructuredTool, Tool, tool from PIL import Image from demotool import * from loader import * from llmLoader import * import re from gradio_tools.tools import (StableDiffusionTool, ImageCaptioningTool, StableDiffusionPromptGeneratorTool, TextToVideoTool) from langchain.memory import ConversationBufferMemory # from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain import PromptTemplate, HuggingFaceHub, LLMChain # def init_model_config(): # llm = ChatLLM() # llm.model_type = 'chatglm' # llm.model_name_or_path = llm_model_dict['chatglm'][ # 'ChatGLM-6B-int4'] # llm.load_llm() # return llm # initialize HF LLM # flan_t5 = HuggingFaceHub( # repo_id="google/flan-t5-xl", # model_kwargs={"temperature":1e-10}, # huggingfacehub_api_token="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" # ) # llm = ChatOpenAI(openai_api_key="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",temperature=0,streaming=True,callbacks=[StreamingStdOutCallbackHandler()]) chatLLMm = ModelLoader() chatLLMm.load_model() memory = ConversationBufferMemory(memory_key="chat_history") # tools = [ Text2Image()] # tools = [ Tool.from_function( # func=search, # name = "Search", # description="useful for when you need to answer questions about current events" # )] # tools = [ Tool.from_function( # func=optimizationProblem, # name = "optimizationProblem", # description=" you must use this tool when you need to Add more information" # )] # tools = [ StableDiffusionPromptGeneratorTool().langchain] tools = [] agent = initialize_agent(tools, chatLLMm, memory=memory, agent="conversational-react-description", verbose=True) def run_text(text, state): # print("stat:"+text) # res = llm_chain.run(text) # print("res:"+res) res = agent.run(input=(text)) response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res) state = state + [(text, response)] return state,state with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo: chatbot = gr.Chatbot(elem_id="chatbot",show_label=False) state = gr.State([]) with gr.Row() as input_raws: with gr.Column(scale=0.6): txt = gr.Textbox(show_label=False).style(container=False) with gr.Column(scale=0.20, min_width=0): run = gr.Button("🏃‍♂️Run") with gr.Column(scale=0.20, min_width=0): clear = gr.Button("🔄Clear️") txt.submit(run_text, [txt, state], [chatbot,state]) txt.submit(lambda: "", None, txt) run.click(run_text, [txt, state], [chatbot,state]) demo.queue(concurrency_count=10).launch(server_name="0.0.0.0", server_port=7865)