File size: 3,438 Bytes
c2c71b6
baba42a
 
 
 
 
 
 
 
 
c91b8d4
c2c71b6
c91b8d4
baba42a
8d16524
baba42a
 
 
80fa0a9
baba42a
 
 
80fa0a9
 
baba42a
80fa0a9
baba42a
 
 
 
 
 
 
 
 
 
 
 
 
 
80fa0a9
baba42a
 
80fa0a9
baba42a
 
 
80fa0a9
 
 
baba42a
80fa0a9
baba42a
80fa0a9
baba42a
80fa0a9
 
 
baba42a
 
 
 
80fa0a9
baba42a
 
80fa0a9
baba42a
8c9b6fc
baba42a
 
 
 
 
 
 
 
 
 
 
 
80fa0a9
baba42a
 
 
 
 
3752e93
28d9bf0
4f7320a
09482e0
baba42a
2df119b
baba42a
80fa0a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration, TextIteratorStreamer, BitsAndBytesConfig
import torch
from PIL import Image
import requests
import spaces
from threading import Thread
import gradio as gr
from gradio import FileData
import time

processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")

model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.float16).to("cuda") 

@spaces.GPU
def bot_streaming(message, history):

  txt = message.text
  ext_buffer = f"{txt}"

  if message.files:
    if len(message.files) == 1:
      image = [message.files[0].path]
    # interleaved images or video
    elif len(message.files) > 1:
      image = [msg.path for msg in message.files]
  else:
      
    def has_file_data(lst):
      return any(isinstance(item, FileData) for sublist in lst if isinstance(sublist, tuple) for item in sublist)

    def extract_paths(lst):
        return [item.path for sublist in lst if isinstance(sublist, tuple) for item in sublist if isinstance(item, FileData)]

    latest_text_only_index = -1

    for i, item in enumerate(history):
        if all(isinstance(sub_item, str) for sub_item in item):
            latest_text_only_index = i

    image = [path for i, item in enumerate(history) if i < latest_text_only_index and has_file_data(item) for path in extract_paths(item)]

  if message.files is None:
      gr.Error("You need to upload an image or video for LLaVA to work.")
      
  image_extensions = Image.registered_extensions()
  image_extensions = tuple([ex for ex, f in image_extensions.items()])
  if len(image) == 1:
      image = Image.open(image[0]).convert("RGB")
      prompt = f"{message.text}<image>"

  elif len(image) > 1:
    image_list = []
    user_prompt = message.text

    for img in image:
      img = Image.open(img).convert("RGB")
      image_list.append(img)
      
    toks = "<image>" * len(image_list)
    prompt = user_prompt + toks

    image = image_list


  inputs = processor(prompt, image, return_tensors="pt").to("cuda", torch.float16)
  streamer = TextIteratorStreamer(processor, {"skip_special_tokens": True})
  generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=250)
  generated_text = ""

  thread = Thread(target=model.generate, kwargs=generation_kwargs)
  thread.start()

  

  buffer = ""
  for new_text in streamer:
    
    buffer += new_text
    
    generated_text_without_prompt = buffer#[len(ext_buffer):]
    time.sleep(0.01)
    yield buffer


demo = gr.ChatInterface(fn=bot_streaming, title="Chameleon 🦎", examples=[
    {"text": "Where to find this monument? Can you give me other recommendations around the area?", "files":["./wat_arun.jpg"]},
    {"text": "Do these two pieces belong to the same era and if so, which era is it?", "files":["./rococo_1.jpg","./rococo_2.jpg"]},
    {"text": "What art style is this and which century?", "files":["./rococo_1.jpg"]},
    {"text": "What is on the flower?", "files":["./bee.jpg"]}],
      textbox=gr.MultimodalTextbox(file_count="multiple"), 
      description="Try [Chameleon-7B](https://huggingface.co/facebook/chameleon-7b) by Meta with transformers in this demo. Upload image(s), and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error. ",
      stop_btn="Stop Generation", multimodal=True)
demo.launch(debug=True)