|
import gradio as gr |
|
import numpy as np |
|
from PIL import Image |
|
from transformers import pipeline |
|
|
|
|
|
pipe = pipeline("zero-shot-image-classification", model="openai/clip-vit-base-patch32") |
|
|
|
|
|
def zero_shot_classification(image, labels_text): |
|
|
|
pil_image = Image.fromarray(np.uint8(image)).convert("RGB") |
|
|
|
|
|
labels = labels_text.split(",") |
|
|
|
|
|
res = pipe( |
|
images=pil_image, |
|
candidate_labels=labels, |
|
hypothesis_template= "This is a photo of a {}" |
|
) |
|
|
|
|
|
return {dic["label"]: dic["score"] for dic in res} |
|
|
|
|
|
iface = gr.Interface( |
|
zero_shot_classification, |
|
["image", "text"], |
|
"label", |
|
examples=[ |
|
["corn.jpg", "corn,wheat,rice"], |
|
], |
|
description="Please add a picture and a list of labels separated by commas to see the zero-shot classification capabilities", |
|
title="Zero-shot Image Classification" |
|
) |
|
|
|
|
|
iface.launch() |
|
|