Spaces:
Sleeping
Sleeping
File size: 1,837 Bytes
a7faf38 0427a05 a39ee44 a7faf38 6aac215 a39ee44 a7faf38 e482b27 a7faf38 2dcecce a7faf38 2d4ae78 a7faf38 1f182c3 a7faf38 1f182c3 a7faf38 4a931de |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import gradio as gr
import torch
from PIL import Image
import json
from ultralytics import YOLO
# Images
torch.hub.download_url_to_file(
'https://i.imgur.com/4GmZXID.jpg', '1.jpg')
torch.hub.download_url_to_file(
'https://i.imgur.com/ktIGRvs.jpg', '2.jpg')
torch.hub.download_url_to_file(
'https://i.imgur.com/fSEsXoE.jpg', '3.jpg')
torch.hub.download_url_to_file(
'https://i.imgur.com/lsVJRzd.jpg', '4.jpg')
torch.hub.download_url_to_file(
'https://i.imgur.com/1OFmJd1.jpg', '5.jpg')
torch.hub.download_url_to_file(
'https://i.imgur.com/GhfAWMJ.jpg', '6.jpg')
# Model
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # force_reload=True to update
model = torch.hub.load('./yolov5', 'custom', path='plate.pt', source="local")
def yolo(im):
model.conf = 0.6 # NMS confidence threshold
# g = (size / max(im.size)) # gain
# im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
results = model(im, size=1280) # inference
results.render() # updates results.imgs with boxes and labels
df = results.pandas().xyxy[0].sort_values('xmin')[['name']].to_json(orient="records") # 可以把[['name']]刪除即可顯示全部
res = json.loads(df)
return [Image.fromarray(results.ims[0]), res]
# return [Image.fromarray(results.ims[0])]
inputs = gr.inputs.Image(type='pil', label="Original Image")
outputs = [gr.outputs.Image(type="pil", label="Output Image"),
gr.outputs.JSON(label="Output JSON")]
# outputs = gr.outputs.Image(type="pil", label="Output Image")
title = "TW_plate_number"
description = "TW_plate_number"
examples = [['1.jpg'], ['2.jpg'], ['3.jpg'], ['4.jpg'], ['5.jpg'], ['6.jpg']]
gr.Interface(yolo, inputs, outputs, title=title, description=description, examples=examples, theme="huggingface").launch(enable_queue=True) |