Spaces:
Running
on
A10G
Running
on
A10G
pharmapsychotic
commited on
Commit
•
3ff6909
1
Parent(s):
a3cd3cd
Update to CLIP Interrogator 0.5.0
Browse files- app.py +1 -22
- requirements.txt +1 -1
app.py
CHANGED
@@ -1,33 +1,12 @@
|
|
1 |
#!/usr/bin/env python3
|
2 |
import gradio as gr
|
3 |
-
import os
|
4 |
from clip_interrogator import Config, Interrogator
|
5 |
-
from huggingface_hub import hf_hub_download
|
6 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
7 |
|
8 |
MODELS = ['ViT-L (best for Stable Diffusion 1.*)', 'ViT-H (best for Stable Diffusion 2.*)']
|
9 |
|
10 |
-
# download preprocessed files
|
11 |
-
PREPROCESS_FILES = [
|
12 |
-
'ViT-H-14_laion2b_s32b_b79k_artists.pkl',
|
13 |
-
'ViT-H-14_laion2b_s32b_b79k_flavors.pkl',
|
14 |
-
'ViT-H-14_laion2b_s32b_b79k_mediums.pkl',
|
15 |
-
'ViT-H-14_laion2b_s32b_b79k_movements.pkl',
|
16 |
-
'ViT-H-14_laion2b_s32b_b79k_trendings.pkl',
|
17 |
-
'ViT-L-14_openai_artists.pkl',
|
18 |
-
'ViT-L-14_openai_flavors.pkl',
|
19 |
-
'ViT-L-14_openai_mediums.pkl',
|
20 |
-
'ViT-L-14_openai_movements.pkl',
|
21 |
-
'ViT-L-14_openai_trendings.pkl',
|
22 |
-
]
|
23 |
-
print("Download preprocessed cache files...")
|
24 |
-
for file in PREPROCESS_FILES:
|
25 |
-
path = hf_hub_download(repo_id="pharma/ci-preprocess", filename=file, cache_dir="cache")
|
26 |
-
cache_path = os.path.dirname(path)
|
27 |
-
|
28 |
-
|
29 |
# load BLIP and ViT-L https://huggingface.co/openai/clip-vit-large-patch14
|
30 |
-
config = Config(
|
31 |
ci_vitl = Interrogator(config)
|
32 |
ci_vitl.clip_model = ci_vitl.clip_model.to("cpu")
|
33 |
|
|
|
1 |
#!/usr/bin/env python3
|
2 |
import gradio as gr
|
|
|
3 |
from clip_interrogator import Config, Interrogator
|
|
|
4 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
5 |
|
6 |
MODELS = ['ViT-L (best for Stable Diffusion 1.*)', 'ViT-H (best for Stable Diffusion 2.*)']
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
# load BLIP and ViT-L https://huggingface.co/openai/clip-vit-large-patch14
|
9 |
+
config = Config(clip_model_name="ViT-L-14/openai")
|
10 |
ci_vitl = Interrogator(config)
|
11 |
ci_vitl.clip_model = ci_vitl.clip_model.to("cpu")
|
12 |
|
requirements.txt
CHANGED
@@ -10,4 +10,4 @@ timm
|
|
10 |
transformers==4.15.0
|
11 |
open_clip_torch
|
12 |
requests
|
13 |
-
clip-interrogator==0.
|
|
|
10 |
transformers==4.15.0
|
11 |
open_clip_torch
|
12 |
requests
|
13 |
+
clip-interrogator==0.5.0
|