|
import glob |
|
import gradio as gr |
|
from inference import * |
|
from PIL import Image |
|
|
|
|
|
def gradio_app(image_path): |
|
"""A function that send the file to the inference pipeline, and filters |
|
some predictions before outputting to gradio interface.""" |
|
|
|
predictions = run_inference(image_path) |
|
|
|
out_img = Image.fromarray(predictions.render()[0]) |
|
|
|
return out_img |
|
|
|
|
|
title = "Seamore" |
|
description = "----eyes in the sea----" \ |
|
"seamore is trained on 691 classes using 33,667 localized images from " \ |
|
"MBARI’s Video Annotation and Reference System (VARS). " \ |
|
"We used the PyTorch " \ |
|
"framework and the yolov5 ‘YOLOv5x’ pretrained checkpoint to " \ |
|
"train for 28 epochs with a batch size of 18 and image size of " \ |
|
"." |
|
|
|
examples = glob.glob("images/*.png") |
|
|
|
gr.Interface(gradio_app, |
|
inputs=[gr.inputs.Image(type="filepath")], |
|
outputs=gr.outputs.Image(type="pil"), |
|
enable_queue=True, |
|
title=title, |
|
description=description, |
|
examples=examples).launch() |