Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -52,16 +52,17 @@ def stream_chat(message, history: list, temperature: float, max_new_tokens: int)
|
|
52 |
print(message)
|
53 |
conversation = []
|
54 |
for prompt, answer in history:
|
55 |
-
conversation.extend([{"role": "user", "content":
|
56 |
-
conversation.append({"role": "user", "content": message['text']})
|
57 |
|
58 |
if message["files"]:
|
59 |
-
image = Image.open(message["files"][-1])
|
|
|
60 |
else:
|
61 |
if len(history) == 0:
|
62 |
gr.Error("Please upload an image first.")
|
63 |
image = None
|
64 |
-
|
|
|
65 |
prompt = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
66 |
inputs = processor(prompt, images=image, return_tensors="pt").to(0)
|
67 |
|
|
|
52 |
print(message)
|
53 |
conversation = []
|
54 |
for prompt, answer in history:
|
55 |
+
conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
|
|
|
56 |
|
57 |
if message["files"]:
|
58 |
+
image = Image.open(message["files"][-1])
|
59 |
+
conversation.append({"role": "user"}, "content": f"<|image_1|>\n{message['text']}")
|
60 |
else:
|
61 |
if len(history) == 0:
|
62 |
gr.Error("Please upload an image first.")
|
63 |
image = None
|
64 |
+
conversation.append({"role": "user", "content": message['text']})
|
65 |
+
|
66 |
prompt = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
67 |
inputs = processor(prompt, images=image, return_tensors="pt").to(0)
|
68 |
|