File size: 3,028 Bytes
b5e593e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import gradio as gr
from langchain.agents import initialize_agent
# from langchain.llms import OpenAI
# from langchain.chat_models import ChatOpenAI

from langchain.tools import BaseTool, StructuredTool, Tool, tool
from PIL import Image
from demotool import *
from loader import *
# from llmLoader import *
import re
from gradio_tools.tools import (StableDiffusionTool, ImageCaptioningTool, StableDiffusionPromptGeneratorTool,
                                TextToVideoTool)

from langchain.memory import ConversationBufferMemory
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

from langchain import PromptTemplate, HuggingFaceHub, LLMChain

def init_model_config():
    llm = ChatLLM()
    llm.model_type = 'chatglm'
    llm.model_name_or_path = llm_model_dict['chatglm'][
                'ChatGLM-6B-int4']
    llm.load_llm()
    return llm

# initialize HF LLM
# flan_t5 = HuggingFaceHub(
#     repo_id="google/flan-t5-xl",
#     model_kwargs={"temperature":1e-10},
#     huggingfacehub_api_token="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"
# )


# llm = ChatOpenAI(openai_api_key="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",temperature=0,streaming=True,callbacks=[StreamingStdOutCallbackHandler()])

# llm = ModelLoader()
# llm.loader()
# chatLLM = ModelLoader()
# chatLLM.loader()
memory = ConversationBufferMemory(memory_key="chat_history")


# tools = [ Text2Image()]

# tools = [ Tool.from_function(
#         func=search,
#         name = "Search",
#         description="useful for when you need to answer questions about current events"
#     )]

# tools = [ Tool.from_function(
#         func=optimizationProblem,
#         name = "optimizationProblem",
#         description=" you must use this tool when you need to Add more information"
#     )]


# tools = [ StableDiffusionPromptGeneratorTool().langchain]

tools = []




agent = initialize_agent(tools, init_model_config(), memory=memory, agent="conversational-react-description", verbose=True)

def run_text(text, state):
    # print("stat:"+text)
    # res = llm_chain.run(text)
    # print("res:"+res)
    res = agent.run(input=(text))
    response = re.sub('(image/\S*png)', lambda m: f'![](/file={m.group(0)})*{m.group(0)}*', res)
    state = state + [(text, response)]
    return state,state

with gr.Blocks(css="#chatbot {overflow:auto; height:500px;}") as demo:
    chatbot = gr.Chatbot(elem_id="chatbot",show_label=False)
    state = gr.State([])
    with gr.Row() as input_raws:
        with gr.Column(scale=0.6):
            txt = gr.Textbox(show_label=False).style(container=False)
        with gr.Column(scale=0.20, min_width=0):
            run = gr.Button("🏃‍♂️Run")
        with gr.Column(scale=0.20, min_width=0):
            clear = gr.Button("🔄Clear️")

    txt.submit(run_text, [txt, state], [chatbot,state])
    txt.submit(lambda: "", None, txt)
    run.click(run_text, [txt, state], [chatbot,state])
 
demo.queue(concurrency_count=10).launch(server_name="0.0.0.0", server_port=7865)