JayGhiya commited on
Commit
77b0e7b
1 Parent(s): 5bc51f2

feat: demo for 34b llava on zero gpu

Browse files
Files changed (2) hide show
  1. app.py +55 -59
  2. requirements.txt +5 -1
app.py CHANGED
@@ -1,63 +1,59 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
 
 
 
 
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
-
61
-
62
- if __name__ == "__main__":
63
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration, TextIteratorStreamer
3
+ from threading import Thread
4
+ import re
5
+ import time
6
+ from PIL import Image
7
+ import torch
8
+ import spaces
9
  """
10
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
11
  """
12
+ processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-34b-hf")
13
+
14
+ model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-34b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True)
15
+ model.to("cuda:0")
16
+
17
+ @spaces.GPU
18
+ def bot_streaming(message, history):
19
+ print(message)
20
+ if message["files"]:
21
+ image = message["files"][-1]["path"]
22
+ else:
23
+ # if there's no image uploaded for this turn, look for images in the past turns
24
+ # kept inside tuples, take the last one
25
+ for hist in history:
26
+ if type(hist[0])==tuple:
27
+ image = hist[0][0]
28
+
29
+ if image is None:
30
+ gr.Error("You need to upload an image for LLaVA to work.")
31
+ prompt=f"[INST] <image>\n{message['text']} [/INST]"
32
+ image = Image.open(image).convert("RGB")
33
+ inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
34
+
35
+ streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
36
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=300)
37
+ generated_text = ""
38
+
39
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
40
+ thread.start()
41
+
42
+ text_prompt =f"[INST] \n{message['text']} [/INST]"
43
+
44
+
45
+ buffer = ""
46
+ for new_text in streamer:
47
+
48
+ buffer += new_text
49
+
50
+ generated_text_without_prompt = buffer[len(text_prompt):]
51
+ time.sleep(0.04)
52
+ yield generated_text_without_prompt
53
+
54
+
55
+ demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA NeXT", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
56
+ {"text": "How to make this pastry?", "files":["./baklava.png"]}],
57
+ description="Try [LLaVA 1.6 34b] in this demo. Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
58
+ stop_btn="Stop Generation", multimodal=True)
59
+ demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1 +1,5 @@
1
- huggingface_hub==0.22.2
 
 
 
 
 
1
+ spaces
2
+ pillow
3
+ accelerate
4
+ torch
5
+ transformers