Improve model card
#2
by
nielsr
HF staff
- opened
README.md
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
library_name: transformers
|
4 |
-
pipeline_tag: image-to-text
|
|
|
5 |
---
|
6 |
|
7 |
<h2>[Installation Free!] Quicker Start with Hugging Face AutoModel</h2>
|
@@ -13,13 +14,13 @@ Do the image quality interpreting chat with q-sit.
|
|
13 |
import requests
|
14 |
from PIL import Image
|
15 |
import torch
|
16 |
-
from transformers import AutoProcessor,
|
17 |
|
18 |
model_id = "zhangzicheng/q-sit-mini"
|
19 |
# if you want to use primary version, switch to q-sit
|
20 |
# model_id = "zhangzicheng/q-sit"
|
21 |
|
22 |
-
model =
|
23 |
model_id,
|
24 |
torch_dtype=torch.float16,
|
25 |
low_cpu_mem_usage=True,
|
@@ -53,7 +54,7 @@ Do the image quality scoring with q-sit.
|
|
53 |
import torch
|
54 |
import requests
|
55 |
from PIL import Image
|
56 |
-
from transformers import AutoProcessor,
|
57 |
import numpy as np
|
58 |
|
59 |
def wa5(logits):
|
@@ -62,7 +63,7 @@ def wa5(logits):
|
|
62 |
return np.inner(probs, np.array([1, 0.75, 0.5, 0.25, 0]))
|
63 |
|
64 |
model_id = "zhangzicheng/q-sit-mini"
|
65 |
-
model =
|
66 |
model_id,
|
67 |
torch_dtype=torch.float16,
|
68 |
low_cpu_mem_usage=True,
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
library_name: transformers
|
4 |
+
pipeline_tag: image-text-to-text
|
5 |
+
base_model: llava-hf/llava-onevision-qwen2-0.5b-ov-hf
|
6 |
---
|
7 |
|
8 |
<h2>[Installation Free!] Quicker Start with Hugging Face AutoModel</h2>
|
|
|
14 |
import requests
|
15 |
from PIL import Image
|
16 |
import torch
|
17 |
+
from transformers import AutoProcessor, AutoModelForImageTextToText
|
18 |
|
19 |
model_id = "zhangzicheng/q-sit-mini"
|
20 |
# if you want to use primary version, switch to q-sit
|
21 |
# model_id = "zhangzicheng/q-sit"
|
22 |
|
23 |
+
model = AutoModelForImageTextToText.from_pretrained(
|
24 |
model_id,
|
25 |
torch_dtype=torch.float16,
|
26 |
low_cpu_mem_usage=True,
|
|
|
54 |
import torch
|
55 |
import requests
|
56 |
from PIL import Image
|
57 |
+
from transformers import AutoProcessor, AutoModelForImageTextToText, AutoTokenizer
|
58 |
import numpy as np
|
59 |
|
60 |
def wa5(logits):
|
|
|
63 |
return np.inner(probs, np.array([1, 0.75, 0.5, 0.25, 0]))
|
64 |
|
65 |
model_id = "zhangzicheng/q-sit-mini"
|
66 |
+
model = AutoModelForImageTextToText.from_pretrained(
|
67 |
model_id,
|
68 |
torch_dtype=torch.float16,
|
69 |
low_cpu_mem_usage=True,
|