Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
import os
|
4 |
import string
|
5 |
|
@@ -19,6 +17,7 @@ quantization_config = BitsAndBytesConfig(
|
|
19 |
pipe = pipeline("image-to-text", model=model_id, model_kwargs={"quantization_config": quantization_config})
|
20 |
|
21 |
|
|
|
22 |
def extract_response_pairs(text):
|
23 |
pattern = re.compile(r'(USER:.*?)ASSISTANT:(.*?)(?:$|USER:)', re.DOTALL)
|
24 |
matches = pattern.findall(text)
|
@@ -36,11 +35,10 @@ def postprocess_output(output: str) -> str:
|
|
36 |
|
37 |
|
38 |
def chat(image, text, temperature, length_penalty,
|
39 |
-
repetition_penalty, max_length, min_length,
|
40 |
history_chat):
|
41 |
|
42 |
-
prompt = " ".join(history_chat)
|
43 |
-
prompt = f"USER: <image>\n{text}\nASSISTANT:"
|
44 |
|
45 |
outputs = pipe(image, prompt=prompt,
|
46 |
generate_kwargs={"temperature":temperature,
|
@@ -48,13 +46,14 @@ def chat(image, text, temperature, length_penalty,
|
|
48 |
"repetition_penalty":repetition_penalty,
|
49 |
"max_length":max_length,
|
50 |
"min_length":min_length,
|
51 |
-
"num_beams":num_beams,
|
52 |
"top_p":top_p})
|
53 |
|
54 |
-
|
55 |
-
history_chat.append(
|
|
|
56 |
|
57 |
chat_val = extract_response_pairs(" ".join(history_chat))
|
|
|
58 |
return chat_val, history_chat
|
59 |
|
60 |
|
@@ -67,8 +66,8 @@ css = """
|
|
67 |
"""
|
68 |
with gr.Blocks(css="style.css") as demo:
|
69 |
gr.Markdown(DESCRIPTION)
|
70 |
-
gr.Markdown("
|
71 |
-
gr.Markdown("
|
72 |
|
73 |
chatbot = gr.Chatbot(label="Chat", show_label=False)
|
74 |
gr.Markdown("Input image and text and start chatting 👇")
|
@@ -185,8 +184,12 @@ with gr.Blocks(css="style.css") as demo:
|
|
185 |
chatbot,
|
186 |
history_chat
|
187 |
],
|
188 |
-
queue=False
|
189 |
-
|
|
|
|
|
|
|
|
|
190 |
|
191 |
|
192 |
if __name__ == "__main__":
|
|
|
|
|
|
|
1 |
import os
|
2 |
import string
|
3 |
|
|
|
17 |
pipe = pipeline("image-to-text", model=model_id, model_kwargs={"quantization_config": quantization_config})
|
18 |
|
19 |
|
20 |
+
|
21 |
def extract_response_pairs(text):
|
22 |
pattern = re.compile(r'(USER:.*?)ASSISTANT:(.*?)(?:$|USER:)', re.DOTALL)
|
23 |
matches = pattern.findall(text)
|
|
|
35 |
|
36 |
|
37 |
def chat(image, text, temperature, length_penalty,
|
38 |
+
repetition_penalty, max_length, min_length, top_p,
|
39 |
history_chat):
|
40 |
|
41 |
+
prompt = " ".join(history_chat) + f"USER: <image>\n{text}\nASSISTANT:"
|
|
|
42 |
|
43 |
outputs = pipe(image, prompt=prompt,
|
44 |
generate_kwargs={"temperature":temperature,
|
|
|
46 |
"repetition_penalty":repetition_penalty,
|
47 |
"max_length":max_length,
|
48 |
"min_length":min_length,
|
|
|
49 |
"top_p":top_p})
|
50 |
|
51 |
+
|
52 |
+
history_chat.append(outputs[0]["generated_text"])
|
53 |
+
print(f"history_chat is {history_chat}")
|
54 |
|
55 |
chat_val = extract_response_pairs(" ".join(history_chat))
|
56 |
+
print(f"chat_val is {chat_val}")
|
57 |
return chat_val, history_chat
|
58 |
|
59 |
|
|
|
66 |
"""
|
67 |
with gr.Blocks(css="style.css") as demo:
|
68 |
gr.Markdown(DESCRIPTION)
|
69 |
+
gr.Markdown("## LLaVA, one of the greatest multimodal chat models is now available in transformers with 4-bit quantization! ⚡️")
|
70 |
+
gr.Markdown("## Try it 4-bit quantized LLaVA this demo 🤗")
|
71 |
|
72 |
chatbot = gr.Chatbot(label="Chat", show_label=False)
|
73 |
gr.Markdown("Input image and text and start chatting 👇")
|
|
|
184 |
chatbot,
|
185 |
history_chat
|
186 |
],
|
187 |
+
queue=False)
|
188 |
+
|
189 |
+
examples = [["/content/baklava.png", "How to make this pastry?"],["/content/bee.png","Describe this image."]]
|
190 |
+
gr.Examples(examples=examples, inputs=[image, text_input, chat_inputs])
|
191 |
+
|
192 |
+
|
193 |
|
194 |
|
195 |
if __name__ == "__main__":
|