merve HF staff commited on
Commit
5e58d7c
·
verified ·
1 Parent(s): 28d9bf0

refactor a bit

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -20,10 +20,10 @@ def bot_streaming(message, history):
20
 
21
  if message.files:
22
  if len(message.files) == 1:
23
- image = [message.files[0].path]
24
- # interleaved images or video
25
  elif len(message.files) > 1:
26
- image = [msg.path for msg in message.files]
27
  else:
28
 
29
  def has_file_data(lst):
@@ -38,32 +38,31 @@ def bot_streaming(message, history):
38
  if all(isinstance(sub_item, str) for sub_item in item):
39
  latest_text_only_index = i
40
 
41
- image = [path for i, item in enumerate(history) if i < latest_text_only_index and has_file_data(item) for path in extract_paths(item)]
42
 
43
  if message.files is None:
44
- gr.Error("You need to upload an image or video for LLaVA to work.")
45
 
46
  image_extensions = Image.registered_extensions()
47
  image_extensions = tuple([ex for ex, f in image_extensions.items()])
48
- if len(image) == 1:
49
- image = Image.open(image[0]).convert("RGB")
50
  prompt = f"{message.text}<image>"
51
 
52
- elif len(image) > 1:
53
  image_list = []
54
  user_prompt = message.text
55
 
56
- for img in image:
57
- img = Image.open(img).convert("RGB")
58
- image_list.append(img)
59
 
60
  toks = "<image>" * len(image_list)
61
  prompt = user_prompt + toks
62
 
63
- image = image_list
64
 
65
 
66
- inputs = processor(prompt, image, return_tensors="pt").to("cuda", torch.float16)
67
  streamer = TextIteratorStreamer(processor, {"skip_special_tokens": True})
68
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=250)
69
  generated_text = ""
@@ -78,7 +77,7 @@ def bot_streaming(message, history):
78
 
79
  buffer += new_text
80
 
81
- generated_text_without_prompt = buffer#[len(ext_buffer):]
82
  time.sleep(0.01)
83
  yield buffer
84
 
 
20
 
21
  if message.files:
22
  if len(message.files) == 1:
23
+ img = [message.files[0].path]
24
+ # interleaved images
25
  elif len(message.files) > 1:
26
+ img = [msg.path for msg in message.files]
27
  else:
28
 
29
  def has_file_data(lst):
 
38
  if all(isinstance(sub_item, str) for sub_item in item):
39
  latest_text_only_index = i
40
 
41
+ img = [path for i, item in enumerate(history) if i < latest_text_only_index and has_file_data(item) for path in extract_paths(item)]
42
 
43
  if message.files is None:
44
+ gr.Error("You need to upload an image or multiple images at least once for LLaVA to work.")
45
 
46
  image_extensions = Image.registered_extensions()
47
  image_extensions = tuple([ex for ex, f in image_extensions.items()])
48
+ if len(img) == 1:
49
+ image = Image.open(img[0]).convert("RGB")
50
  prompt = f"{message.text}<image>"
51
 
52
+ elif len(img) > 1:
53
  image_list = []
54
  user_prompt = message.text
55
 
56
+ for im in img:
57
+ image_list.append(Image.open(im).convert("RGB"))
 
58
 
59
  toks = "<image>" * len(image_list)
60
  prompt = user_prompt + toks
61
 
62
+ img = image_list
63
 
64
 
65
+ inputs = processor(prompt, img, return_tensors="pt").to("cuda", torch.float16)
66
  streamer = TextIteratorStreamer(processor, {"skip_special_tokens": True})
67
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=250)
68
  generated_text = ""
 
77
 
78
  buffer += new_text
79
 
80
+ generated_text_without_prompt = buffer
81
  time.sleep(0.01)
82
  yield buffer
83