update description
Browse files
app.py
CHANGED
@@ -58,7 +58,7 @@ iface = gr.Interface(
|
|
58 |
inputs=gr.Image(type="pil"),
|
59 |
outputs=gr.Image(type="pil"),
|
60 |
title="Monocular Depth Estimation: Omnidata DPT-Hybrid",
|
61 |
-
description="Upload an image to estimate monocular depth.",
|
62 |
examples=[
|
63 |
"https://github.com/EPFL-VILAB/omnidata/blob/main/omnidata_tools/torch/assets/test1_rgb.png?raw=true",
|
64 |
"https://github.com/EPFL-VILAB/omnidata/blob/main/omnidata_tools/torch/assets/demo/test2.png?raw=true",
|
|
|
58 |
inputs=gr.Image(type="pil"),
|
59 |
outputs=gr.Image(type="pil"),
|
60 |
title="Monocular Depth Estimation: Omnidata DPT-Hybrid",
|
61 |
+
description="Upload an image to estimate monocular depth. To use these models locally, you can use `torch.hub.load`. Code and examples in our [Github](https://github.com/alexsax/omnidata_models) repository. More information and the paper in the project page [Omnidata: A Scalable Pipeline for Making Multi-Task Mid-Level Vision Datasets from 3D Scans](https://omnidata.epfl.ch/).",
|
62 |
examples=[
|
63 |
"https://github.com/EPFL-VILAB/omnidata/blob/main/omnidata_tools/torch/assets/test1_rgb.png?raw=true",
|
64 |
"https://github.com/EPFL-VILAB/omnidata/blob/main/omnidata_tools/torch/assets/demo/test2.png?raw=true",
|