File size: 9,744 Bytes
5b71c3a
 
 
 
 
 
 
 
 
 
 
 
cd08eb6
 
 
f3e41d6
 
cd08eb6
 
f3e41d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0431f29
 
 
 
 
 
f3e41d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0431f29
f3e41d6
 
cd08eb6
0431f29
f3e41d6
 
0431f29
f3e41d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd08eb6
9111154
 
0431f29
f3e41d6
 
 
6310e00
 
 
bff5b69
 
f3e41d6
6310e00
 
 
bff5b69
 
f3e41d6
6310e00
 
 
bff5b69
 
9111154
 
 
 
 
 
f3e41d6
5b71c3a
 
 
0431f29
a948da3
cd08eb6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9111154
 
 
 
 
 
 
e76e97a
5b71c3a
4b20d73
5b71c3a
 
 
87d0d80
5b71c3a
4b20d73
5b71c3a
 
f3e41d6
 
 
 
 
 
 
 
 
 
 
 
 
cd08eb6
 
 
 
 
 
 
 
9111154
 
cd08eb6
5b71c3a
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import uuid
import gradio as gr
import re
from diffusers.utils import load_image
import requests
from awesome_chat import chat_huggingface
import os

os.makedirs("public/images", exist_ok=True)
os.makedirs("public/audios", exist_ok=True)
os.makedirs("public/videos", exist_ok=True)

HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
OPENAI_KEY = os.environ.get("OPENAI_KEY")

class Client:
    def __init__(self) -> None:
        self.OPENAI_KEY = OPENAI_KEY
        self.HUGGINGFACE_TOKEN = HUGGINGFACE_TOKEN
        self.all_messages = []

    def set_key(self, openai_key):
        self.OPENAI_KEY = openai_key
        return self.OPENAI_KEY

    def set_token(self, huggingface_token):
        self.HUGGINGFACE_TOKEN = huggingface_token
        return self.HUGGINGFACE_TOKEN
    
    def add_message(self, content, role):
        message = {"role":role, "content":content}
        self.all_messages.append(message)

    def extract_medias(self, message):
        # url_pattern = re.compile(r"(http(s?):|\/)?([\.\/_\w:-])*?")
        urls = []
        # for match in url_pattern.finditer(message):
        #     if match.group(0) not in urls:
        #         urls.append(match.group(0))

        image_pattern = re.compile(r"(http(s?):|\/)?([\.\/_\w:-])*?\.(jpg|jpeg|tiff|gif|png)")
        image_urls = []
        for match in image_pattern.finditer(message):
            if match.group(0) not in image_urls:
                image_urls.append(match.group(0))

        audio_pattern = re.compile(r"(http(s?):|\/)?([\.\/_\w:-])*?\.(flac|wav)")
        audio_urls = []
        for match in audio_pattern.finditer(message):
            if match.group(0) not in audio_urls:
                audio_urls.append(match.group(0))

        video_pattern = re.compile(r"(http(s?):|\/)?([\.\/_\w:-])*?\.(mp4)")
        video_urls = []
        for match in video_pattern.finditer(message):
            if match.group(0) not in video_urls:
                video_urls.append(match.group(0))

        return urls, image_urls, audio_urls, video_urls

    def add_text(self, messages, message):
        if not self.OPENAI_KEY or not self.OPENAI_KEY.startswith("sk-") or not self.HUGGINGFACE_TOKEN or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
            return messages, "Please set your OpenAI API key and Hugging Face token first!!!"
        self.add_message(message, "user")
        messages = messages + [(message, None)]
        urls, image_urls, audio_urls, video_urls = self.extract_medias(message)

        for image_url in image_urls:
            if not image_url.startswith("http") and not image_url.startswith("public"):
                image_url = "public/" + image_url
            image = load_image(image_url)
            name = f"public/images/{str(uuid.uuid4())[:4]}.jpg" 
            image.save(name)
            messages = messages + [((f"{name}",), None)]
        for audio_url in audio_urls and not audio_url.startswith("public"):
            if not audio_url.startswith("http"):
                audio_url = "public/" + audio_url
            ext = audio_url.split(".")[-1]
            name = f"public/audios/{str(uuid.uuid4()[:4])}.{ext}"
            response = requests.get(audio_url)
            with open(name, "wb") as f:
                f.write(response.content)
            messages = messages + [((f"{name}",), None)]
        for video_url in video_urls and not video_url.startswith("public"):
            if not video_url.startswith("http"):
                video_url = "public/" + video_url
            ext = video_url.split(".")[-1]
            name = f"public/audios/{str(uuid.uuid4()[:4])}.{ext}"
            response = requests.get(video_url)
            with open(name, "wb") as f:
                f.write(response.content)
            messages = messages + [((f"{name}",), None)]
        return messages, ""

    def bot(self, messages):
        if not self.OPENAI_KEY or not self.OPENAI_KEY.startswith("sk-") or not self.HUGGINGFACE_TOKEN or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
            return messages, {}
        message, results = chat_huggingface(self.all_messages, self.OPENAI_KEY, self.HUGGINGFACE_TOKEN)
        urls, image_urls, audio_urls, video_urls = self.extract_medias(message)
        self.add_message(message, "assistant")
        messages[-1][1] = message
        for image_url in image_urls:
            if not image_url.startswith("http"):
                image_url = image_url.replace("public/", "")
                messages = messages + [((None, (f"public/{image_url}",)))]
            # else:
            #     messages = messages + [((None, (f"{image_url}",)))]
        for audio_url in audio_urls:
            if not audio_url.startswith("http"):
                audio_url = audio_url.replace("public/", "")
                messages = messages + [((None, (f"public/{audio_url}",)))]
            # else:
            #     messages = messages + [((None, (f"{audio_url}",)))]
        for video_url in video_urls:
            if not video_url.startswith("http"):
                video_url = video_url.replace("public/", "")
                messages = messages + [((None, (f"public/{video_url}",)))]
            # else:
            #     messages = messages + [((None, (f"{video_url}",)))]
        # replace int key to string key
        results = {str(k): v for k, v in results.items()}
        return messages, results
    
css = ".json {height: 527px; overflow: scroll;} .json-holder {height: 527px; overflow: scroll;}"
with gr.Blocks(css=css) as demo:
    state = gr.State(value={"client": Client()})
    gr.Markdown("<h1><center>HuggingGPT</center></h1>")
    gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
    gr.Markdown("<p align='center' style='font-size: 20px;'>A system to connect LLMs with ML community. See our <a href='https://github.com/microsoft/JARVIS'>Project</a> and <a href='http://arxiv.org/abs/2303.17580'>Paper</a>.</p>")
    gr.HTML('''<center><a href="https://huggingface.co/spaces/microsoft/HuggingGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key and Hugging Face Token</center>''')
    gr.HTML('''<center>Note: Only a few models are deployed in the local inference endpoint due to hardware limitations. In addition, online HuggingFace inference endpoints may sometimes not be available. Thus the capability of HuggingGPT is limited.</center>''')
    if not OPENAI_KEY:
        with gr.Row().style():
            with gr.Column(scale=0.85):
                openai_api_key = gr.Textbox(
                    show_label=False,
                    placeholder="Set your OpenAI API key here and press Enter",
                    lines=1,
                    type="password"
                ).style(container=False)
            with gr.Column(scale=0.15, min_width=0):
                btn1 = gr.Button("Submit").style(full_height=True)

    if not HUGGINGFACE_TOKEN:
        with gr.Row().style():
            with gr.Column(scale=0.85):
                hugging_face_token = gr.Textbox(
                    show_label=False,
                    placeholder="Set your Hugging Face Token here and press Enter",
                    lines=1,
                    type="password"
                ).style(container=False)
            with gr.Column(scale=0.15, min_width=0):
                btn3 = gr.Button("Submit").style(full_height=True)
    

    with gr.Row().style():
        with gr.Column(scale=0.6):
            chatbot = gr.Chatbot([], elem_id="chatbot").style(height=500)
        with gr.Column(scale=0.4):
            results = gr.JSON(elem_classes="json")


    with gr.Row().style():
        with gr.Column(scale=0.85):
            txt = gr.Textbox(
                show_label=False,
                placeholder="Enter text and press enter. The url must contain the media type. e.g, https://example.com/example.jpg",
                lines=1,
            ).style(container=False)
        with gr.Column(scale=0.15, min_width=0):
            btn2 = gr.Button("Send").style(full_height=True)
        
    def set_key(state, openai_api_key):
        return state["client"].set_key(openai_api_key)

    def add_text(state, chatbot, txt):
        return state["client"].add_text(chatbot, txt)
    
    def set_token(state, hugging_face_token):
        return state["client"].set_token(hugging_face_token)
    
    def bot(state, chatbot):
        return state["client"].bot(chatbot)

    if not OPENAI_KEY:
        openai_api_key.submit(set_key, [state, openai_api_key], [openai_api_key])
        btn1.click(set_key, [state, openai_api_key], [openai_api_key])

    if not HUGGINGFACE_TOKEN:
        hugging_face_token.submit(set_token, [state, hugging_face_token], [hugging_face_token])
        btn3.click(set_token, [state, hugging_face_token], [hugging_face_token])
    
    txt.submit(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
    btn2.click(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
    

    gr.Examples(
        examples=["Given a collection of image A: /examples/a.jpg, B: /examples/b.jpg, C: /examples/c.jpg, please tell me how many zebras in these picture?",
                "Please generate a canny image based on /examples/f.jpg",
                "show me a joke and an image of cat",
                "what is in the examples/a.jpg",
                "based on the /examples/a.jpg, please generate a video and audio",
                "based on pose of /examples/d.jpg and content of /examples/e.jpg, please show me a new image",
                ],
        inputs=txt
    )

demo.launch()