File size: 1,528 Bytes
81d10ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import gradio as gr
from transformers import pipeline
import torch
import numpy as np
from PIL import Image
import gradio as gr
from gradio_client import Client
import os
import json
import spaces

dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-base-384")
depth_anything = pipeline(task = "depth-estimation", model="nielsr/depth-anything-small")
@spaces.GPU
def depth_anything_inference(image_path):
  return depth_anything(image_path)["depth"]

@spaces.GPU
def dpt_beit_inference(image):
  return dpt_beit(image)["depth"]

def dpt_large(image_path):
    try:
        client = Client("https://nielsr-dpt-depth-estimation.hf.space/")
        return Image.open(client.predict(image_path))
    except Exception:
        gr.Warning("The DPT-Large Space is currently unavailable. Please try again later.")
        return ""
    

def infer(image):
  return dpt_large(image), dpt_beit_inference(image), depth_anything_inference(image)


iface = gr.Interface(fn=infer, 
                     inputs=gr.Image(type="pil"), 
                     outputs=[gr.Image(type="pil", label="DPT-Large"),
                              gr.Image(type="pil", label="DPT with BeiT Backbone"),
                              gr.Image(type="pil", label="Depth Anything")],
                              
                     title="Compare Depth Estimation Models",
                     description="In this Space you can compare various depth estimation models",
                     examples=[["bee.JPG"]])
iface.launch(debug=True)