Spaces:
Sleeping
Sleeping
dragonjump
commited on
Commit
·
35f3879
1
Parent(s):
1347859
update'
Browse files
main.py
CHANGED
@@ -13,7 +13,7 @@ logging.basicConfig(level=logging.INFO)
|
|
13 |
app = FastAPI()
|
14 |
|
15 |
# Qwen2.5-VL Model Setup
|
16 |
-
qwen_checkpoint = "Qwen/Qwen2.5-VL-
|
17 |
min_pixels = 256 * 28 * 28
|
18 |
max_pixels = 1280 * 28 * 28
|
19 |
|
@@ -30,11 +30,12 @@ qwen_model = AutoModelForCausalLM.from_pretrained(
|
|
30 |
)
|
31 |
|
32 |
# LLaMA Model Setup
|
33 |
-
llama_model_name = "
|
34 |
llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_name)
|
35 |
llama_model = AutoModelForCausalLM.from_pretrained(
|
36 |
llama_model_name, torch_dtype=torch.float16, device_map="auto"
|
37 |
)
|
|
|
38 |
|
39 |
@app.get("/")
|
40 |
def read_root():
|
|
|
13 |
app = FastAPI()
|
14 |
|
15 |
# Qwen2.5-VL Model Setup
|
16 |
+
qwen_checkpoint = "Qwen/Qwen2.5-VL-7B-Instruct"
|
17 |
min_pixels = 256 * 28 * 28
|
18 |
max_pixels = 1280 * 28 * 28
|
19 |
|
|
|
30 |
)
|
31 |
|
32 |
# LLaMA Model Setup
|
33 |
+
llama_model_name = "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2"
|
34 |
llama_tokenizer = AutoTokenizer.from_pretrained(llama_model_name)
|
35 |
llama_model = AutoModelForCausalLM.from_pretrained(
|
36 |
llama_model_name, torch_dtype=torch.float16, device_map="auto"
|
37 |
)
|
38 |
+
|
39 |
|
40 |
@app.get("/")
|
41 |
def read_root():
|