File size: 1,298 Bytes
c184c0e
d2ff88f
c184c0e
29cef8e
d2ff88f
29cef8e
 
 
 
 
 
 
 
 
 
d2ff88f
 
 
 
 
 
35301df
d2ff88f
 
 
 
35301df
d2ff88f
 
 
b957ec1
d2ff88f
 
 
 
 
 
 
29cef8e
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from models.builder import build_model
from visualization import mask2rgb
from segmentation.datasets import PascalVOCDataset

import os
from hydra import compose, initialize
from PIL import Image
import matplotlib.pyplot as plt
from torchvision import transforms as T
import torch.nn.functional as F
import numpy as np
from operator import itemgetter 
import torch
import warnings

warnings.filterwarnings("ignore")
initialize(config_path="configs", version_base=None)

from huggingface_hub import Repository

repo = Repository(
	local_dir="clip-dinoiser",
	clone_from="ariG23498/clip-dinoiser",
	use_auth_token=os.environ.get("token")
)

check_path = 'clip-dinoiser/checkpoints/last.pt'
device = "cuda" if torch.cuda.is_available() else "cpu"

check = torch.load(check_path, map_location=device)
dinoclip_cfg = "clip_dinoiser.yaml"
cfg = compose(config_name=dinoclip_cfg)

model = build_model(cfg.model, class_names=PascalVOCDataset.CLASSES).to(device)
model.clip_backbone.decode_head.use_templates=False # switching off the imagenet templates for fast inference
model.load_state_dict(check['model_state_dict'], strict=False)
model = model.eval()

import gradio as gr

def greet(name):
    return "Hello " + name + "!!"

iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()