Spaces:
Sleeping
Sleeping
File size: 8,612 Bytes
4e60da3 7e7fb89 cf10ae8 7e7fb89 2b122fc 7e7fb89 4e60da3 7e7fb89 e9722ce 7e7fb89 7150512 7e7fb89 9fcef4d 7e7fb89 9fcef4d 7e7fb89 7150512 7e7fb89 ec1c2f8 7e7fb89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import gradio as gr
import os
os.environ["PYOPENGL_PLATFORM"] = "osmesa" #opengl seems to only work with TPU
print(os.environ['PYOPENGL_PLATFORM'])
from OpenGL.osmesa import OSMesaCreateContextAttribs
import numpy as np
import os
import trimesh
from pyrender import PerspectiveCamera,\
DirectionalLight, SpotLight, PointLight,\
MetallicRoughnessMaterial,\
Primitive, Mesh, Node, Scene,\
OffscreenRenderer
from PIL import Image
scene = Scene()
axis = trimesh.creation.axis()
axis = Mesh.from_trimesh(axis, smooth=False)
scene.add(axis)
r = OffscreenRenderer(
viewport_width=720,
viewport_height=720,
)
color, _ = r.render(scene)
color = color.astype(np.float32) / 255.0
img = Image.fromarray((color * 255).astype(np.uint8))
r.delete()
print(img)
# import os
# os.environ['PYOPENGL_PLATFORM'] = 'egl'
# import sys
# root_dir = os.path.dirname(os.path.abspath(__file__))
# sys.path.insert(1, os.path.join(root_dir, 'scenediffuser'))
# print(sys.path)
# import gradio as gr
# import interface as IF
# with gr.Blocks(css='style.css') as demo:
# with gr.Column(elem_id="col-container"):
# gr.Markdown("<p align='center' style='font-size: 1.5em;'>Diffusion-based Generation, Optimization, and Planning in 3D Scenes</p>")
# gr.HTML(value="<img src='file/figures/teaser.png' alt='Teaser' width='710px' height='284px' style='display: block; margin: auto;'>")
# gr.HTML(value="<p align='center' style='font-size: 1.2em; color: #485fc7;'><a href='https://arxiv.org/abs/2301.06015' target='_blank'>arXiv</a> | <a href='https://scenediffuser.github.io/' target='_blank'>Project Page</a> | <a href='https://github.com/scenediffuser/Scene-Diffuser' target='_blank'>Code</a></p>")
# gr.Markdown("<p align='center'><i>\"SceneDiffuser provides a unified model for solving scene-conditioned generation, optimization, and planning.\"</i></p>")
# ## five task
# ## pose generation
# with gr.Tab("Pose Generation"):
# with gr.Row():
# with gr.Column(scale=2):
# selector1 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
# with gr.Row():
# sample1 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
# seed1 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
# opt1 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
# scale1 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
# button1 = gr.Button("Run")
# with gr.Column(scale=3):
# image1 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
# # model1 = gr.Model3D(clear_color=[255, 255, 255, 255], label="3D Model [Result]")
# input1 = [selector1, sample1, seed1, opt1, scale1]
# button1.click(IF.pose_generation, inputs=input1, outputs=[image1])
# ## motion generation
# # with gr.Tab("Motion Generation"):
# # with gr.Row():
# # with gr.Column(scale=2):
# # selector2 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
# # with gr.Row():
# # sample2 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
# # seed2 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
# # with gr.Row():
# # withstart = gr.Checkbox(label='With Start', interactive=True, value=False)
# # opt2 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
# # scale_opt2 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
# # button2 = gr.Button("Run")
# # with gr.Column(scale=3):
# # image2 = gr.Image(label="Result")
# # input2 = [selector2, sample2, seed2, withstart, opt2, scale_opt2]
# # button2.click(IF.motion_generation, inputs=input2, outputs=image2)
# with gr.Tab("Motion Generation"):
# with gr.Row():
# with gr.Column(scale=2):
# input2 = [
# gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes')
# ]
# button2 = gr.Button("Generate")
# gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
# with gr.Column(scale=3):
# output2 = gr.Image(label="Result")
# button2.click(IF.motion_generation, inputs=input2, outputs=output2)
# ## grasp generation
# with gr.Tab("Grasp Generation"):
# with gr.Row():
# with gr.Column(scale=2):
# input3 = [
# gr.Dropdown(choices=['contactdb+apple', 'contactdb+camera', 'contactdb+cylinder_medium', 'contactdb+door_knob', 'contactdb+rubber_duck', 'contactdb+water_bottle', 'ycb+baseball', 'ycb+pear', 'ycb+potted_meat_can', 'ycb+tomato_soup_can'], label='Objects')
# ]
# button3 = gr.Button("Run")
# gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
# with gr.Column(scale=3):
# output3 = [
# gr.Model3D(clear_color=[255, 255, 255, 255], label="Result")
# ]
# button3.click(IF.grasp_generation, inputs=input3, outputs=output3)
# ## path planning
# with gr.Tab("Path Planing"):
# with gr.Row():
# with gr.Column(scale=2):
# selector4 = gr.Dropdown(choices=['scene0603_00', 'scene0621_00', 'scene0626_00', 'scene0634_00', 'scene0637_00', 'scene0640_00', 'scene0641_00', 'scene0645_00', 'scene0653_00', 'scene0667_00', 'scene0672_00', 'scene0673_00', 'scene0678_00', 'scene0694_00', 'scene0698_00'], label='Scenes', value='scene0621_00', interactive=True)
# mode4 = gr.Radio(choices=['Sampling', 'Planning'], value='Sampling', label='Mode', interactive=True)
# with gr.Row():
# sample4 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
# seed4 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
# with gr.Box():
# opt4 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
# scale_opt4 = gr.Slider(minimum=0.02, maximum=4.98, step=0.02, label='Scale', interactive=True, value=1.0)
# with gr.Box():
# pla4 = gr.Checkbox(label='Planner Guidance', interactive=True, value=True)
# scale_pla4 = gr.Slider(minimum=0.02, maximum=0.98, step=0.02, label='Scale', interactive=True, value=0.2)
# button4 = gr.Button("Run")
# with gr.Column(scale=3):
# image4 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
# number4 = gr.Number(label="Steps", precision=0)
# gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: 1. It may take a long time to do planning in <b>Planning</b> mode. 2. The <span style='color: #cc0000;'>red</span> balls represent the planning result, starting with the lightest red ball and ending with the darkest red ball. The <span style='color: #00cc00;'>green</span> ball indicates the target position.</p>")
# input4 = [selector4, mode4, sample4, seed4, opt4, scale_opt4, pla4, scale_pla4]
# button4.click(IF.path_planning, inputs=input4, outputs=[image4, number4])
# ## arm motion planning
# with gr.Tab("Arm Motion Planning"):
# gr.Markdown('Coming soon!')
# demo.launch()
|