import torch import torchvision from PIL import Image import cv2 import numpy as np import matplotlib.pyplot as plt from DataSet import QuestionDataSet import TractionModel as plup import random from tqdm import tqdm import gradio as gr def snap(image): return np.flipud(image) def init_model(path): model = plup.create_model() model = plup.load_weights(model, path) model.eval() return model def inference(image): image = vanilla_transform(image).to(device).unsqueeze(0) with torch.no_grad(): pred = model(image) res = float(torch.sigmoid(pred[1].to("cpu")).numpy()[0]) return {'pull-up': res, 'no pull-up': 1 - res} norm_mean = [0.485, 0.456, 0.406] norm_std = [0.229, 0.224, 0.225] vanilla_transform = torchvision.transforms.Compose([ torchvision.transforms.Resize(224), torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(norm_mean, norm_std)]) model = init_model("output/model/model-score0.96-f1_10.9-f1_20.99.pt") if torch.cuda.is_available(): device = torch.device("cuda") else: device = torch.device("cpu") model = model.to(device) iface = gr.Interface(inference, live=True, inputs=gr.inputs.Image(source="upload", tool=None, type='pil'), outputs=gr.outputs.Label()) iface.test_launch() if __name__ == "__main__": iface.launch()