utkarshpatel7355 commited on
Commit
bb4ae12
Β·
verified Β·
1 Parent(s): 2f3c0d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -50
app.py CHANGED
@@ -1,64 +1,145 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
 
 
 
 
 
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- response = ""
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
 
 
 
 
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
+ import spaces
2
  import gradio as gr
 
3
 
4
+ import subprocess # πŸ₯²
5
+ subprocess.run(
6
+ "pip install flash-attn --no-build-isolation",
7
+ env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
8
+ shell=True,
9
+ )
10
+ # subprocess.run(
11
+ # "pip install git+https://github.com/LLaVA-VL/LLaVA-NeXT.git",
12
+ # shell=True,
13
+ # )
14
+
15
+ import torch
16
+ from llava.model.builder import load_pretrained_model
17
+ from llava.mm_utils import get_model_name_from_path, process_images, tokenizer_image_token
18
+ from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IGNORE_INDEX
19
+ from llava.conversation import conv_templates, SeparatorStyle
20
+ import copy
21
+ import warnings
22
+ from decord import VideoReader, cpu
23
+ import numpy as np
24
+ import tempfile
25
+ import os
26
+ import shutil
27
+ #warnings.filterwarnings("ignore")
28
+ title = "# πŸ™‹πŸ»β€β™‚οΈWelcome to 🌟Tonic's πŸŒ‹πŸ“ΉLLaVA-Video!"
29
+ description1 ="""The **πŸŒ‹πŸ“ΉLLaVA-Video-7B-Qwen2** is a 7B parameter model trained on the πŸŒ‹πŸ“ΉLLaVA-Video-178K dataset and the LLaVA-OneVision dataset. It is [based on the **Qwen2 language model**](https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f), supporting a context window of up to 32K tokens. The model can process and interact with images, multi-images, and videos, with specific optimizations for video analysis.
30
+ This model leverages the **SO400M vision backbone** for visual input and Qwen2 for language processing, making it highly efficient in multi-modal reasoning, including visual and video-based tasks.
31
+ πŸŒ‹πŸ“ΉLLaVA-Video has larger variants of [32B](https://huggingface.co/lmms-lab/LLaVA-NeXT-Video-32B-Qwen) and [72B](https://huggingface.co/lmms-lab/LLaVA-Video-72B-Qwen2) and with a [variant](https://huggingface.co/lmms-lab/LLaVA-Video-7B-Qwen2-Video-Only) only trained on the new synthetic data
32
+ For further details, please visit the [Project Page](https://github.com/LLaVA-VL/LLaVA-NeXT) or check out the corresponding [research paper](https://arxiv.org/abs/2410.02713).
33
+ - **Architecture**: `LlavaQwenForCausalLM`
34
+ - **Attention Heads**: 28
35
+ - **Hidden Layers**: 28
36
+ - **Hidden Size**: 3584
37
  """
38
+ description2 ="""
39
+ - **Intermediate Size**: 18944
40
+ - **Max Frames Supported**: 64
41
+ - **Languages Supported**: English, Chinese
42
+ - **Image Aspect Ratio**: `anyres_max_9`
43
+ - **Image Resolution**: Various grid resolutions
44
+ - **Max Position Embeddings**: 32,768
45
+ - **Vocab Size**: 152,064
46
+ - **Model Precision**: bfloat16
47
+ - **Hardware Used for Training**: 256 * Nvidia Tesla A100 GPUs
48
  """
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
+ join_us = """
51
+ ## Join us :
52
+ 🌟TeamTonic🌟 is always making cool demos! Join our active builder's πŸ› οΈcommunity πŸ‘» [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) On πŸ€—Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Build Tonic](https://git.tonic-ai.com/contribute)πŸ€—Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant πŸ€—
53
+ """
 
54
 
55
+ def load_video(video_path, max_frames_num, fps=1, force_sample=False):
56
+ if max_frames_num == 0:
57
+ return np.zeros((1, 336, 336, 3))
58
+ vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
59
+ total_frame_num = len(vr)
60
+ video_time = total_frame_num / vr.get_avg_fps()
61
+ fps = round(vr.get_avg_fps()/fps)
62
+ frame_idx = [i for i in range(0, len(vr), fps)]
63
+ frame_time = [i/fps for i in frame_idx]
64
+ if len(frame_idx) > max_frames_num or force_sample:
65
+ sample_fps = max_frames_num
66
+ uniform_sampled_frames = np.linspace(0, total_frame_num - 1, sample_fps, dtype=int)
67
+ frame_idx = uniform_sampled_frames.tolist()
68
+ frame_time = [i/vr.get_avg_fps() for i in frame_idx]
69
+ frame_time = ",".join([f"{i:.2f}s" for i in frame_time])
70
+ spare_frames = vr.get_batch(frame_idx).asnumpy()
71
+ return spare_frames, frame_time, video_time
72
 
73
+ # Load the model
74
+ pretrained = "lmms-lab/LLaVA-Video-7B-Qwen2"
75
+ model_name = "llava_qwen"
76
+ device = "cuda" if torch.cuda.is_available() else "cpu"
77
+ device_map = "auto"
78
 
79
+ print("Loading model...")
80
+ tokenizer, model, image_processor, max_length = load_pretrained_model(pretrained, None, model_name, torch_dtype="bfloat16", device_map=device_map)
81
+ model.eval()
82
+ print("Model loaded successfully!")
 
 
 
 
83
 
84
+ @spaces.GPU
85
+ def process_video(video_path, question):
86
+ max_frames_num = 64
87
+ video, frame_time, video_time = load_video(video_path, max_frames_num, 1, force_sample=True)
88
+ video = image_processor.preprocess(video, return_tensors="pt")["pixel_values"].to(device).bfloat16()
89
+ video = [video]
90
 
91
+ conv_template = "qwen_1_5"
92
+ time_instruction = f"The video lasts for {video_time:.2f} seconds, and {len(video[0])} frames are uniformly sampled from it. These frames are located at {frame_time}. Please answer the following questions related to this video."
93
+
94
+ full_question = DEFAULT_IMAGE_TOKEN + f"{time_instruction}\n{question}"
95
+
96
+ conv = copy.deepcopy(conv_templates[conv_template])
97
+ conv.append_message(conv.roles[0], full_question)
98
+ conv.append_message(conv.roles[1], None)
99
+ prompt_question = conv.get_prompt()
100
+
101
+ input_ids = tokenizer_image_token(prompt_question, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device)
102
+
103
+ with torch.no_grad():
104
+ output = model.generate(
105
+ input_ids,
106
+ images=video,
107
+ modalities=["video"],
108
+ do_sample=False,
109
+ temperature=0,
110
+ max_new_tokens=4096,
111
+ )
112
+
113
+ response = tokenizer.batch_decode(output, skip_special_tokens=True)[0].strip()
114
+ return response
115
 
116
+ def gradio_interface(video_file, question):
117
+ if video_file is None:
118
+ return "Please upload a video file."
119
+ response = process_video(video_file, question)
120
+ return response
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
+ with gr.Blocks() as demo:
123
+ gr.Markdown(title)
124
+ with gr.Row():
125
+ with gr.Group():
126
+ gr.Markdown(description1)
127
+ with gr.Group():
128
+ gr.Markdown(description2)
129
+ with gr.Accordion("Join Us", open=False):
130
+ gr.Markdown(join_us)
131
+ with gr.Row():
132
+ with gr.Column():
133
+ video_input = gr.Video()
134
+ question_input = gr.Textbox(label="πŸ™‹πŸ»β€β™‚οΈUser Question", placeholder="Ask a question about the video...")
135
+ submit_button = gr.Button("AskπŸŒ‹πŸ“ΉLLaVA-Video")
136
+ output = gr.Textbox(label="πŸŒ‹πŸ“ΉLLaVA-Video")
137
+
138
+ submit_button.click(
139
+ fn=gradio_interface,
140
+ inputs=[video_input, question_input],
141
+ outputs=output
142
+ )
143
 
144
  if __name__ == "__main__":
145
+ demo.launch(show_error=True, ssr_mode = False)