vilarin commited on
Commit
999df98
1 Parent(s): 3090ac5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -66,7 +66,8 @@ def stream_chat(message, history: list, temperature: float, max_new_tokens: int)
66
 
67
  inputs = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
68
  inputs_ids = processor(inputs, image, return_tensors="pt").to(0)
69
-
 
70
  generate_kwargs = dict(
71
  streamer=streamer,
72
  max_new_tokens=max_new_tokens,
@@ -78,7 +79,6 @@ def stream_chat(message, history: list, temperature: float, max_new_tokens: int)
78
  generate_kwargs["do_sample"] = False
79
  generate_kwargs = {**inputs_ids, **generate_kwargs}
80
 
81
- streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True, "skip_prompt": True, 'clean_up_tokenization_spaces':False,})
82
 
83
  thread = Thread(target=model.generate, kwargs=generate_kwargs)
84
  thread.start()
 
66
 
67
  inputs = processor.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
68
  inputs_ids = processor(inputs, image, return_tensors="pt").to(0)
69
+ streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True, "skip_prompt": True, 'clean_up_tokenization_spaces':False,})
70
+
71
  generate_kwargs = dict(
72
  streamer=streamer,
73
  max_new_tokens=max_new_tokens,
 
79
  generate_kwargs["do_sample"] = False
80
  generate_kwargs = {**inputs_ids, **generate_kwargs}
81
 
 
82
 
83
  thread = Thread(target=model.generate, kwargs=generate_kwargs)
84
  thread.start()