HuanjinYao commited on
Commit
86cffc0
1 Parent(s): 970607e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -56
app.py CHANGED
@@ -1,62 +1,119 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
 
62
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import spaces
4
 
5
+ import os
6
+ import warnings
7
+ import shutil
8
+ import time
9
+ from threading import Thread
10
+
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, AutoProcessor
12
+ from transformers import TextIteratorStreamer
13
+ import torch
14
+ from dc.model import *
15
+ from dc.constants import DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
16
+ from dc.conversation import conv_templates, SeparatorStyle
17
+ from PIL import Image
18
+
19
+
20
+ processor = AutoProcessor.from_pretrained('HuanjinYao/DenseConnector-v1.5-8B')
21
+ tokenizer = AutoTokenizer.from_pretrained('HuanjinYao/DenseConnector-v1.5-8B', use_fast=False)
22
+ model = LlavaLlamaForCausalLM.from_pretrained('HuanjinYao/DenseConnector-v1.5-8B', low_cpu_mem_usage=True, **kwargs)
23
+ image_processor = model.get_vision_tower()
24
+ if not vision_tower.is_loaded:
25
+ vision_tower.load_model()
26
+ vision_tower.to(device=device, dtype=torch.float16)
27
+ image_processor = vision_tower.image_processor
28
+
29
+ model.to('cuda')
30
+
31
+ # model.generation_config.eos_token_id = 128009
32
+ tokenizer.unk_token = "<|reserved_special_token_0|>"
33
+ tokenizer.pad_token = tokenizer.unk_token
34
+ terminators = [
35
+ tokenizer.eos_token_id,
36
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
37
+ ]
38
+
39
+
40
+ @spaces.GPU
41
+ def bot_streaming(message, history):
42
+ print(message)
43
+ if message["files"]:
44
+ # message["files"][-1] is a Dict or just a string
45
+ if type(message["files"][-1]) == dict:
46
+ image = message["files"][-1]["path"]
47
+ else:
48
+ image = message["files"][-1]
49
+ else:
50
+ # if there's no image uploaded for this turn, look for images in the past turns
51
+ # kept inside tuples, take the last one
52
+ for hist in history:
53
+ if type(hist[0]) == tuple:
54
+ image = hist[0][0]
55
+ try:
56
+ if image is None:
57
+ # Handle the case where image is None
58
+ gr.Error("You need to upload an image for LLaVA to work.")
59
+ except NameError:
60
+ # Handle the case where 'image' is not defined at all
61
+ gr.Error("You need to upload an image for LLaVA to work.")
62
+
63
+
64
+ conv = conv_templates['llama_3'].copy()
65
+ if len(history) == 0:
66
+ user = DEFAULT_IMAGE_TOKEN + '\n' + message['text']
67
+ else:
68
+ for idx, (user, assistant) in enumerate(history):
69
+ # conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
70
+ if idx == 0:
71
+ user = DEFAULT_IMAGE_TOKEN + '\n' + user
72
+ conv.append_message(conv.roles[0], user)
73
+ conv.append_message(conv.roles[1], assistant)
74
+ conv.append_message(conv.roles[0], user)
75
+ conv.append_message(conv.roles[1], None)
76
+ prompt = conv.get_prompt()
77
+
78
+ image = Image.open(os.path.join(image, image_file)).convert('RGB')
79
+ image_tensor = image_processor([image], image_processor, self.model_config)[0]
80
+
81
+ inputs = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
82
+
83
+
84
+ streamer = TextIteratorStreamer(tokenizer, **{"skip_special_tokens": False, "skip_prompt": True})
85
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=False, eos_token_id = terminators)
86
+
87
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
88
+ thread.start()
89
+
90
+
91
+ buffer = ""
92
+ # time.sleep(0.5)
93
+ for new_text in streamer:
94
+ if "<|eot_id|>" in new_text:
95
+ new_text = new_text.split("<|eot_id|>")[0]
96
+ buffer += new_text
97
+
98
+ generated_text_without_prompt = buffer
99
+ # time.sleep(0.06)
100
+ yield generated_text_without_prompt
101
+
102
+
103
+ chatbot=gr.Chatbot(placeholder=PLACEHOLDER,scale=1)
104
+ chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
105
+ with gr.Blocks(fill_height=True, ) as demo:
106
+ gr.ChatInterface(
107
+ fn=bot_streaming,
108
+ title="LLaVA Llama-3-8B",
109
+ examples=[{"text": "What is on the flower?", "files": ["./bee.jpg"]},
110
+ {"text": "How to make this pastry?", "files": ["./baklava.png"]}],
111
+ description="Try [LLaVA Llama-3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
112
+ stop_btn="Stop Generation",
113
+ multimodal=True,
114
+ textbox=chat_input,
115
+ chatbot=chatbot,
116
+ )
117
 
118
 
119
  if __name__ == "__main__":