Spaces:
Runtime error
Runtime error
from PIL import Image | |
import gradio as gr | |
import requests | |
from transformers import CLIPProcessor, CLIPModel | |
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") | |
processor = CLIPProcessor.from_pretrained("tokenizer") | |
def generate_answer(image): | |
classes = ['A picture of a room filled with abundant natural light with a lot or few windows regardless of whether it is night, without objects that prevent the light from passing through.','a picture of room in the dark','A picture of a room with Artificial lights like lamps or headlamps'] | |
clas = ['natural_light','no_light', 'artificial_light'] | |
inputs = processor(text=classes, images=image, return_tensors="pt", padding=True) | |
outputs = model(**inputs) | |
logits_per_image = outputs.logits_per_image | |
probs = logits_per_image.softmax(dim=1) | |
probabilities_list = probs.squeeze().tolist() | |
result_dict = {class_name: probability for class_name, probability in zip(clas, probabilities_list)} | |
return result_dict | |
image_input = gr.Image(type="pil", label="Upload Image") | |
iface = gr.Interface( | |
fn=generate_answer, | |
inputs=[image_input], | |
outputs="text", | |
title="Room Lightning Score", | |
description="Upload an room image" | |
) | |
iface.launch() |