Spaces:
Running
on
Zero
Running
on
Zero
Update chatbot.py
Browse files- chatbot.py +3 -3
chatbot.py
CHANGED
@@ -27,8 +27,8 @@ import io # Add this import for working with image bytes
|
|
27 |
|
28 |
# You can also use models that are commented below
|
29 |
# model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
|
30 |
-
|
31 |
-
model_id = "llava-hf/llava-interleave-qwen-7b-dpo-hf"
|
32 |
processor = LlavaProcessor.from_pretrained(model_id)
|
33 |
model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True)
|
34 |
model.to("cuda")
|
@@ -197,7 +197,7 @@ generate_kwargs = dict( max_new_tokens=4000, do_sample=True, stream=True, detail
|
|
197 |
|
198 |
system_llava = "<|im_start|>system\nYou are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Your task is to fulfill users query in best possible way. You are provided with image, videos and 3d structures as input with question your task is to give best possible result and explaination to user.<|im_end|>"
|
199 |
|
200 |
-
@spaces.GPU(duration=
|
201 |
def model_inference(
|
202 |
user_prompt,
|
203 |
chat_history,
|
|
|
27 |
|
28 |
# You can also use models that are commented below
|
29 |
# model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
|
30 |
+
model_id = "llava-hf/llava-interleave-qwen-7b-hf"
|
31 |
+
# model_id = "llava-hf/llava-interleave-qwen-7b-dpo-hf"
|
32 |
processor = LlavaProcessor.from_pretrained(model_id)
|
33 |
model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True)
|
34 |
model.to("cuda")
|
|
|
197 |
|
198 |
system_llava = "<|im_start|>system\nYou are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Your task is to fulfill users query in best possible way. You are provided with image, videos and 3d structures as input with question your task is to give best possible result and explaination to user.<|im_end|>"
|
199 |
|
200 |
+
@spaces.GPU(duration=60, queue=False)
|
201 |
def model_inference(
|
202 |
user_prompt,
|
203 |
chat_history,
|