Spaces:
Running
Running
from transformers import pipeline | |
from PIL import Image | |
import gradio as gr | |
import numpy as np | |
# Load the Hugging Face depth estimation pipelines | |
pipe_base = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-base-hf") | |
pipe_small = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-small-hf") | |
pipe_intel = pipeline(task="depth-estimation", model="Intel/dpt-swinv2-tiny-256") | |
pipe_beit = pipeline(task="depth-estimation", model="Intel/dpt-beit-base-384") | |
def estimate_depths(image): | |
# Perform depth estimation with each pipeline | |
depth_base = pipe_base(image)["depth"] | |
depth_small = pipe_small(image)["depth"] | |
depth_intel = pipe_intel(image)["depth"] | |
depth_beit = pipe_beit(image)["depth"] | |
# Normalize depths for visualization | |
depth_base = normalize_depth(depth_base) | |
depth_small = normalize_depth(depth_small) | |
depth_intel = normalize_depth(depth_intel) | |
depth_beit = normalize_depth(depth_beit) | |
return depth_base, depth_small, depth_intel, depth_beit | |
def normalize_depth(depth_map): | |
# Normalize depth map values to range [0, 255] for visualization | |
normalized_depth = ((depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())) * 255 | |
return normalized_depth.astype(np.uint8) | |
# Create a Gradio interface | |
iface = gr.Interface( | |
fn=estimate_depths, | |
inputs=gr.Image(type="pil"), | |
outputs=[ | |
gr.Image(type="numpy", label="LiheYoung/depth-anything-base-hf"), | |
gr.Image(type="numpy", label="LiheYoung/depth-anything-small-hf"), | |
gr.Image(type="numpy", label="Intel/dpt-swinv2-tiny-256"), | |
gr.Image(type="numpy", label="Intel/dpt-beit-base-384") | |
], | |
title="Multi-Model Depth Estimation", | |
description="Upload an image to get depth estimation maps from multiple models.", | |
layout="horizontal" | |
) | |
# Launch the Gradio app | |
iface.launch() | |
""" | |
from transformers import pipeline | |
from PIL import Image | |
import requests | |
# load pipe | |
pipe = pipeline(task="depth-estimation", model="LiheYoung/depth-anything-small-hf") | |
# load image | |
url = 'http://images.cocodataset.org/val2017/000000039769.jpg' | |
image = Image.open(requests.get(url, stream=True).raw) | |
# inference | |
depth = pipe(image)["depth"] | |
""" |