Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
#4
by
MaziyarPanahi
- opened
app.py
CHANGED
@@ -4,10 +4,14 @@ from transformers import AutoModelForCausalLM, AutoProcessor
|
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
import subprocess
|
7 |
-
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
8 |
|
|
|
|
|
|
|
|
|
9 |
models = {
|
10 |
-
"Qwen/Qwen2-VL-2B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True, torch_dtype="auto"
|
11 |
|
12 |
}
|
13 |
|
|
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
import subprocess
|
7 |
+
# subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
8 |
|
9 |
+
# models = {
|
10 |
+
# "Qwen/Qwen2-VL-2B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
|
11 |
+
|
12 |
+
# }
|
13 |
models = {
|
14 |
+
"Qwen/Qwen2-VL-2B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True, torch_dtype="auto").cuda().eval()
|
15 |
|
16 |
}
|
17 |
|