sflindrs commited on
Commit
5bfb146
·
verified ·
1 Parent(s): 53afe5a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -3,6 +3,7 @@ from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
3
  from PIL import Image
4
  import torch
5
  import spaces
 
6
 
7
  # Load the processor and model
8
  processor = AutoProcessor.from_pretrained(
@@ -34,15 +35,16 @@ def process_image_and_text(image, text):
34
  # Generate output
35
  output = model.generate_from_batch(
36
  inputs,
37
- GenerationConfig(max_new_tokens=200, stop_strings="<|endoftext|>"),
38
  tokenizer=processor.tokenizer
39
  )
40
 
41
  # Only get generated tokens; decode them to text
42
  generated_tokens = output[0, inputs['input_ids'].size(1):]
43
  generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
 
44
 
45
- return generated_text
46
 
47
  def chatbot(image, text, history):
48
  if image is None:
 
3
  from PIL import Image
4
  import torch
5
  import spaces
6
+ import pprint
7
 
8
  # Load the processor and model
9
  processor = AutoProcessor.from_pretrained(
 
35
  # Generate output
36
  output = model.generate_from_batch(
37
  inputs,
38
+ GenerationConfig(max_new_tokens=1024, stop_strings="<|endoftext|>"),
39
  tokenizer=processor.tokenizer
40
  )
41
 
42
  # Only get generated tokens; decode them to text
43
  generated_tokens = output[0, inputs['input_ids'].size(1):]
44
  generated_text = processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
45
+ pretty_text = pprint.pp(generated_text)
46
 
47
+ return pretty_text
48
 
49
  def chatbot(image, text, history):
50
  if image is None: