import gradio as gr from transformers import pipeline import torch import numpy as np from PIL import Image import gradio as gr from gradio_client import Client import os import json import spaces dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-base-384") depth_anything = pipeline(task = "depth-estimation", model="nielsr/depth-anything-small") @spaces.GPU def depth_anything_inference(image_path): return depth_anything(image_path)["depth"] @spaces.GPU def dpt_beit_inference(image): return dpt_beit(image)["depth"] def dpt_large(image_path): try: client = Client("https://nielsr-dpt-depth-estimation.hf.space/") return Image.open(client.predict(image_path)) except Exception: gr.Warning("The DPT-Large Space is currently unavailable. Please try again later.") return "" def infer(image): return dpt_large(image), dpt_beit_inference(image), depth_anything_inference(image) iface = gr.Interface(fn=infer, inputs=gr.Image(type="pil"), outputs=[gr.Image(type="pil", label="DPT-Large"), gr.Image(type="pil", label="DPT with BeiT Backbone"), gr.Image(type="pil", label="Depth Anything")], title="Compare Depth Estimation Models", description="In this Space you can compare various depth estimation models", examples=[["bee.JPG"]]) iface.launch(debug=True)