import gradio as gr from transformers import pipeline import torch import numpy as np from PIL import Image import gradio as gr from gradio_client import Client import os import spaces import json from gradio_depth_pred import create_demo as create_depth_pred_demo from gradio_im_to_3d import create_demo as create_im_to_3d_demo model = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to('cuda').eval() #dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-base-384", device=0) dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-large-512", device=0) #depth_anything = pipeline(task = "depth-estimation", model="nielsr/depth-anything-small", device=0) depth_anything = pipeline(task = "depth-estimation", model="LiheYoung/depth-anything-large-hf", device=0) dpt_large = pipeline(task = "depth-estimation", model="intel/dpt-large", device=0) def depth_anything_inference(img): return depth_anything(img)["depth"] def dpt_beit_inference(img): return dpt_beit(img)["depth"] def dpt_large_inference(img): return dpt_large(img)["depth"] @spaces.GPU def infer(img): if img is None: return None, None, None else: return dpt_large_inference(img), dpt_beit_inference(img), depth_anything_inference(img) css = """ #mkd { height: 500px; overflow: auto; border: 1px solid #ccc; #img-display-container { max-height: 50vh; } #img-display-input { max-height: 40vh; } #img-display-output { max-height: 40vh; } } """ css_zoe = """ #img-display-container { max-height: 50vh; } #img-display-input { max-height: 40vh; } #img-display-output { max-height: 40vh; } """ with gr.Blocks(css=css) as demo: gr.HTML("