kavaliha commited on
Commit
6752e3f
·
verified ·
1 Parent(s): 47b4ee5
Files changed (1) hide show
  1. app.py +84 -93
app.py CHANGED
@@ -15,106 +15,97 @@ processor = LlavaOnevisionProcessor.from_pretrained(model_id)
15
  model = LlavaOnevisionForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16)
16
  model.to("cuda")
17
 
18
- def sample_frames(video_file, num_frames):
19
- video = cv2.VideoCapture(video_file)
20
- total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
21
- interval = total_frames // num_frames
22
  frames = []
23
- for i in range(total_frames):
24
- ret, frame = video.read()
25
- pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
26
  if not ret:
27
- continue
28
- if i % interval == 0:
29
- frames.append(pil_img)
30
- video.release()
31
  return frames
32
 
33
  @spaces.GPU
34
  def bot_streaming(message, history):
35
-
36
- txt = message.text
37
- ext_buffer = f"user\n{txt} assistant"
38
-
39
- if message.files:
40
- if len(message.files) == 1:
41
- image = [message.files[0].path]
42
- # interleaved images or video
43
- elif len(message.files) > 1:
44
- image = [msg.path for msg in message.files]
45
- else:
46
- # if there's no image uploaded for this turn, look for images in the past turns
47
- # kept inside tuples, take the last one
48
- for hist in history:
49
- if type(hist[0])==tuple:
50
- image = hist[0][0]
51
-
52
- if message.files is None:
53
- gr.Error("You need to upload an image or video for LLaVA to work.")
54
-
55
- video_extensions = ("avi", "mp4", "mov", "mkv", "flv", "wmv", "mjpeg")
56
- image_extensions = Image.registered_extensions()
57
- image_extensions = tuple([ex for ex, f in image_extensions.items()])
58
- if len(image) == 1:
59
- if image[0].endswith(video_extensions):
60
-
61
- video = sample_frames(image[0], 32)
62
  image = None
63
- prompt = f"<|im_start|>user <video>\n{message.text}<|im_end|><|im_start|>assistant"
64
- elif image[0].endswith(image_extensions):
65
- image = Image.open(image[0]).convert("RGB")
66
- video = None
67
- prompt = f"<|im_start|>user <image>\n{message.text}<|im_end|><|im_start|>assistant"
68
-
69
- elif len(image) > 1:
70
- image_list = []
71
- user_prompt = message.text
72
-
73
- for img in image:
74
- if img.endswith(image_extensions):
75
- img = Image.open(img).convert("RGB")
76
- image_list.append(img)
77
 
78
- elif img.endswith(video_extensions):
79
- frames = sample_frames(img, 6)
80
- for frame in frames:
81
- image_list.append(frame)
82
-
83
- toks = "<image>" * len(image_list)
84
- prompt = "<|im_start|>user"+ toks + f"\n{user_prompt}<|im_end|><|im_start|>assistant"
85
-
86
- image = image_list
87
- video = None
88
-
89
-
90
- inputs = processor(text=prompt, images=image, videos=video, return_tensors="pt").to("cuda", torch.float16)
91
- streamer = TextIteratorStreamer(processor, **{"max_new_tokens": 200, "skip_special_tokens": True})
92
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=200)
93
- generated_text = ""
94
-
95
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
96
- thread.start()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
-
 
99
 
100
- buffer = ""
101
- for new_text in streamer:
102
-
103
- buffer += new_text
104
-
105
- generated_text_without_prompt = buffer[len(ext_buffer):]
106
- time.sleep(0.01)
107
- yield generated_text_without_prompt
108
-
109
-
110
- demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA Onevision", examples=[
111
- {"text": "Do the cats in these two videos have same breed? What breed is each cat?", "files":["./cats_1.mp4", "./cats_2.mp4"]},
112
- {"text": "These are the tech specs of two laptops I am choosing from. Which one should I choose for office work?", "files":["./dell-tech-specs.jpeg", "./asus-tech-specs.png"]},
113
- {"text": "Here are several images from a cooking book, showing how to prepare a meal step by step. Can you write a recipe for the meal, describing each step in details?", "files":["./step0.png", "./step1.png", "./step2.png", "./step3.png", "./step4.png", "./step5.png"]},
114
-
115
- {"text": "What is on the flower?", "files":["./bee.jpg"]},
116
- {"text": "What is this video about? Describe all the steps taken in the video so I can follow them, be very detailed", "files":["./tutorial.mp4"]}],
117
- textbox=gr.MultimodalTextbox(file_count="multiple"),
118
- description="Try [LLaVA Onevision](https://huggingface.co/docs/transformers/main/en/model_doc/llava_onevision) in this demo (more specifically, the [Qwen-2-0.5B-Instruct variant](https://huggingface.co/llava-hf/llava-onevision-qwen2-0.5b-ov-hf)). Upload an image or a video, and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error. ",
119
- stop_btn="Stop Generation", multimodal=True)
120
- demo.launch(debug=True)
 
 
 
 
 
 
 
15
  model = LlavaOnevisionForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16)
16
  model.to("cuda")
17
 
18
+ # Function to capture frames from the camera
19
+ def capture_camera_frames(num_frames):
20
+ camera = cv2.VideoCapture(0) # Accessing the camera (0 is the default camera)
 
21
  frames = []
22
+ for _ in range(num_frames):
23
+ ret, frame = camera.read()
 
24
  if not ret:
25
+ break
26
+ pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
27
+ frames.append(pil_img)
28
+ camera.release()
29
  return frames
30
 
31
  @spaces.GPU
32
  def bot_streaming(message, history):
33
+ txt = message.text
34
+ ext_buffer = f"user\n{txt} assistant"
35
+
36
+ if message.files:
37
+ if len(message.files) == 1:
38
+ image = [message.files[0].path]
39
+ elif len(message.files) > 1:
40
+ image = [msg.path for msg in message.files]
41
+ else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  image = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
+ # Check if we should use the camera
45
+ if txt.lower().startswith("camera"):
46
+ # Capture frames from the camera
47
+ image = capture_camera_frames(5) # Capture 5 frames
48
+
49
+ if message.files is None and not image:
50
+ gr.Error("You need to upload an image or video, or access the camera for LLaVA to work.")
51
+ return
52
+
53
+ video_extensions = ("avi", "mp4", "mov", "mkv", "flv", "wmv", "mjpeg")
54
+ image_extensions = Image.registered_extensions()
55
+ image_extensions = tuple([ex for ex, f in image_extensions.items()])
56
+
57
+ if len(image) == 1:
58
+ if image[0].endswith(video_extensions):
59
+ video = sample_frames(image[0], 32)
60
+ image = None
61
+ prompt = f"<|im_start|>user <video>\n{message.text}<|im_end|><|im_start|>assistant"
62
+ elif image[0].endswith(image_extensions):
63
+ image = Image.open(image[0]).convert("RGB")
64
+ video = None
65
+ prompt = f"<|im_start|>user <image>\n{message.text}<|im_end|><|im_start|>assistant"
66
+ elif len(image) > 1:
67
+ image_list = []
68
+ user_prompt = message.text
69
+
70
+ for img in image:
71
+ if img.endswith(image_extensions):
72
+ img = Image.open(img).convert("RGB")
73
+ image_list.append(img)
74
+ elif img.endswith(video_extensions):
75
+ frames = sample_frames(img, 6)
76
+ for frame in frames:
77
+ image_list.append(frame)
78
+
79
+ toks = "<image>" * len(image_list)
80
+ prompt = "<|im_start|>user" + toks + f"\n{user_prompt}<|im_end|><|im_start|>assistant"
81
 
82
+ image = image_list
83
+ video = None
84
 
85
+ inputs = processor(text=prompt, images=image, videos=video, return_tensors="pt").to("cuda", torch.float16)
86
+ streamer = TextIteratorStreamer(processor, **{"max_new_tokens": 200, "skip_special_tokens": True})
87
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=200)
88
+ generated_text = ""
89
+
90
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
91
+ thread.start()
92
+
93
+ buffer = ""
94
+ for new_text in streamer:
95
+ buffer += new_text
96
+ generated_text_without_prompt = buffer[len(ext_buffer):]
97
+ time.sleep(0.01)
98
+ yield generated_text_without_prompt
99
+
100
+
101
+ # Integrate camera access into Gradio demo
102
+ demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA Onevision with Camera", examples=[
103
+ {"text": "Take a picture with the camera and describe what is in it.", "files":[]},
104
+ {"text": "Do the cats in these two videos have the same breed? What breed is each cat?", "files":["./cats_1.mp4", "./cats_2.mp4"]},
105
+ {"text": "Here are several images from a cooking book, showing how to prepare a meal step by step. Can you write a recipe for the meal?", "files":["./step0.png", "./step1.png", "./step2.png", "./step3.png"]},
106
+ ],
107
+ textbox=gr.MultimodalTextbox(file_count="multiple"),
108
+ description="Upload an image or video, or try capturing frames with the camera and chat about it.",
109
+ stop_btn="Stop Generation", multimodal=True)
110
+
111
+ demo.launch(debug=True)