Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import DetrImageProcessor, DetrForObjectDetection | |
import torch | |
def anylize(img): | |
# input_image_path = os.path.join(os.getcwd(), img.get_data()[0].name) | |
# return input_image_path | |
image = img | |
processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") | |
model = DetrForObjectDetection.from_pretrained("Guy2/AirportSec-100epoch") | |
inputs = processor(images=image, return_tensors="pt") | |
outputs = model(**inputs) | |
#target_sizes = torch.tensor([image.size]) | |
#target_sizes = torch.tensor([image.size[::-1]]) | |
target_sizes = torch.tensor([image.shape[:2]]) | |
results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.9)[0] | |
print(f"results: {results}") | |
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): | |
box = [round(i, 2) for i in box.tolist()] | |
return( | |
f"Detected {model.config.id2label[label.item()]} with confidence " | |
f"{round(score.item(), 3)} at location {box}" | |
) | |
app = gr.Interface(fn=anylize, inputs="image", outputs="text") | |
app.launch() |