vilarin commited on
Commit
29c4e7f
1 Parent(s): 4234c1d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -59,13 +59,14 @@ def stream_chat(message, history: list, temperature: float, max_new_tokens: int)
59
  if len(history) == 0:
60
  raise gr.Error("Please upload an image first.")
61
  image = None
 
62
  elif len(history):
63
  image = Image.open(history[0][0][0])
64
  for prompt, answer in history:
65
  conversation[0]['content'] = f"<|image_1|>\n{prompt}"
66
  conversation.extend([{"role": "assistant", "content": answer}])
67
-
68
- conversation.append({"role": "user", "content": f"<|image_1|>\n{message['text']}"})
69
  print(f"Conversation is -\n{conversation}")
70
  inputs = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
71
  inputs_ids = processor(inputs, image, return_tensors="pt").to(0)
 
59
  if len(history) == 0:
60
  raise gr.Error("Please upload an image first.")
61
  image = None
62
+ conversation.append({"role": "user", "content": f"<|image_1|>\n{message['text']}"})
63
  elif len(history):
64
  image = Image.open(history[0][0][0])
65
  for prompt, answer in history:
66
  conversation[0]['content'] = f"<|image_1|>\n{prompt}"
67
  conversation.extend([{"role": "assistant", "content": answer}])
68
+ conversation.append({"role": "user", "content": message['text'])
69
+
70
  print(f"Conversation is -\n{conversation}")
71
  inputs = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
72
  inputs_ids = processor(inputs, image, return_tensors="pt").to(0)