vilarin commited on
Commit
3193581
1 Parent(s): 2692054

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -52,16 +52,17 @@ def stream_chat(message, history: list, temperature: float, max_new_tokens: int)
52
  print(message)
53
  conversation = []
54
  for prompt, answer in history:
55
- conversation.extend([{"role": "user", "content": f"<|image_1|>\n{prompt}"}, {"role": "assistant", "content": answer}])
56
- conversation.append({"role": "user", "content": message['text']})
57
 
58
  if message["files"]:
59
- image = Image.open(message["files"][-1]).convert('RGB')
 
60
  else:
61
  if len(history) == 0:
62
  gr.Error("Please upload an image first.")
63
  image = None
64
-
 
65
  prompt = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
66
  inputs = processor(prompt, images=image, return_tensors="pt").to(0)
67
 
 
52
  print(message)
53
  conversation = []
54
  for prompt, answer in history:
55
+ conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
 
56
 
57
  if message["files"]:
58
+ image = Image.open(message["files"][-1])
59
+ conversation.append({"role": "user"}, "content": f"<|image_1|>\n{message['text']}")
60
  else:
61
  if len(history) == 0:
62
  gr.Error("Please upload an image first.")
63
  image = None
64
+ conversation.append({"role": "user", "content": message['text']})
65
+
66
  prompt = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
67
  inputs = processor(prompt, images=image, return_tensors="pt").to(0)
68