File size: 3,261 Bytes
ca88e21
 
 
85e414b
 
baba42a
 
 
 
 
 
 
 
 
 
 
 
8d16524
baba42a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09482e0
baba42a
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import transformers

print("transformers version", transformers.__version__)
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
from transformers import TextIteratorStreamer
import torch
from PIL import Image
import requests
import spaces
from threading import Thread
import gradio as gr
from gradio import FileData
import time

processor = ChameleonProcessor.from_pretrained("facebook/chameleon-30b")
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-30b", torch_dtype=torch.float16).to("cuda") 

@spaces.GPU
def bot_streaming(message, history):

  txt = message.text
  ext_buffer = f"{txt}"

  if message.files:
    if len(message.files) == 1:
      image = [message.files[0].path]
    # interleaved images or video
    elif len(message.files) > 1:
      image = [msg.path for msg in message.files]
  else:
      
    def has_file_data(lst):
      return any(isinstance(item, FileData) for sublist in lst if isinstance(sublist, tuple) for item in sublist)

    def extract_paths(lst):
        return [item.path for sublist in lst if isinstance(sublist, tuple) for item in sublist if isinstance(item, FileData)]

    latest_text_only_index = -1

    for i, item in enumerate(history):
        if all(isinstance(sub_item, str) for sub_item in item):
            latest_text_only_index = i

    image = [path for i, item in enumerate(history) if i < latest_text_only_index and has_file_data(item) for path in extract_paths(item)]

  if message.files is None:
      gr.Error("You need to upload an image or video for LLaVA to work.")
      
  image_extensions = Image.registered_extensions()
  image_extensions = tuple([ex for ex, f in image_extensions.items()])
  if len(image) == 1:
      image = Image.open(image[0]).convert("RGB")
      prompt = f"{message.text}<image>"

  elif len(image) > 1:
    image_list = []
    user_prompt = message.text

    for img in image:
      img = Image.open(img).convert("RGB")
      image_list.append(img)
      
    toks = "<image>" * len(image_list)
    prompt = user_prompt + toks

    image = image_list


  inputs = processor(prompt, image, return_tensors="pt").to("cuda", torch.float16)
  streamer = TextIteratorStreamer(processor, {"skip_special_tokens": True})
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=100)
  generated_text = ""

  thread = Thread(target=model.generate, kwargs=generation_kwargs)
  thread.start()

  

  buffer = ""
  for new_text in streamer:
    
    buffer += new_text
    
    generated_text_without_prompt = buffer#[len(ext_buffer):]
    time.sleep(0.01)
    yield buffer


demo = gr.ChatInterface(fn=bot_streaming, title="Chameleon 🦎", examples=[
    {"text": "There are two images in the input. What is the relationship between this image and this image?", "files":["./bee.jpg", "./depth-bee.png"]},
    {"text": "What is on the flower?", "files":["./bee.jpg"]}],
      textbox=gr.MultimodalTextbox(file_count="multiple"), 
      description="Try Chameleon-30B by Meta in this demo. Upload image(s), and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error. ",
      stop_btn="Stop Generation", multimodal=True)
demo.launch(debug=True)