Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ from threading import Thread
|
|
18 |
tk = os.environ.get("HF_TOKEN")
|
19 |
#login(tk)
|
20 |
ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
21 |
-
model = MllamaForConditionalGeneration.from_pretrained(ckpt,torch_dtype=torch.bfloat16).to("
|
22 |
processor = AutoProcessor.from_pretrained(ckpt)
|
23 |
r = sr.Recognizer()
|
24 |
|
@@ -143,7 +143,7 @@ def response(state:AppState = AppState()):
|
|
143 |
if images == []:
|
144 |
inputs = processor(text=texts, return_tensors="pt").to("cuda")
|
145 |
else:
|
146 |
-
inputs = processor(text=texts, images=images, return_tensors="pt").to("
|
147 |
streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
|
148 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
|
149 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
|
|
18 |
tk = os.environ.get("HF_TOKEN")
|
19 |
#login(tk)
|
20 |
ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
21 |
+
model = MllamaForConditionalGeneration.from_pretrained(ckpt,torch_dtype=torch.bfloat16).to("cpu")
|
22 |
processor = AutoProcessor.from_pretrained(ckpt)
|
23 |
r = sr.Recognizer()
|
24 |
|
|
|
143 |
if images == []:
|
144 |
inputs = processor(text=texts, return_tensors="pt").to("cuda")
|
145 |
else:
|
146 |
+
inputs = processor(text=texts, images=images, return_tensors="pt").to("cpu")
|
147 |
streamer = TextIteratorStreamer(processor, skip_special_tokens=True, skip_prompt=True)
|
148 |
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=max_new_tokens)
|
149 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|