SceneDiffuser commited on
Commit
7e7fb89
1 Parent(s): fcb4ec9
Files changed (3) hide show
  1. app.py +130 -98
  2. pre-requirements.txt +0 -3
  3. requirements.txt +3 -26
app.py CHANGED
@@ -1,107 +1,139 @@
1
- import os
2
- os.environ['PYOPENGL_PLATFORM'] = 'egl'
3
- import sys
4
- root_dir = os.path.dirname(os.path.abspath(__file__))
5
- sys.path.insert(1, os.path.join(root_dir, 'scenediffuser'))
6
- print(sys.path)
7
  import gradio as gr
8
 
9
- import interface as IF
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- with gr.Blocks(css='style.css') as demo:
12
- with gr.Column(elem_id="col-container"):
13
- gr.Markdown("<p align='center' style='font-size: 1.5em;'>Diffusion-based Generation, Optimization, and Planning in 3D Scenes</p>")
14
- gr.HTML(value="<img src='file/figures/teaser.png' alt='Teaser' width='710px' height='284px' style='display: block; margin: auto;'>")
15
- gr.HTML(value="<p align='center' style='font-size: 1.2em; color: #485fc7;'><a href='https://arxiv.org/abs/2301.06015' target='_blank'>arXiv</a> | <a href='https://scenediffuser.github.io/' target='_blank'>Project Page</a> | <a href='https://github.com/scenediffuser/Scene-Diffuser' target='_blank'>Code</a></p>")
16
- gr.Markdown("<p align='center'><i>\"SceneDiffuser provides a unified model for solving scene-conditioned generation, optimization, and planning.\"</i></p>")
17
 
18
- ## five task
19
- ## pose generation
20
- with gr.Tab("Pose Generation"):
21
- with gr.Row():
22
- with gr.Column(scale=2):
23
- selector1 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
24
- with gr.Row():
25
- sample1 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
26
- seed1 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
27
- opt1 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
28
- scale1 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
29
- button1 = gr.Button("Run")
30
- with gr.Column(scale=3):
31
- image1 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
32
- # model1 = gr.Model3D(clear_color=[255, 255, 255, 255], label="3D Model [Result]")
33
- input1 = [selector1, sample1, seed1, opt1, scale1]
34
- button1.click(IF.pose_generation, inputs=input1, outputs=[image1])
35
 
36
- ## motion generation
37
- # with gr.Tab("Motion Generation"):
38
- # with gr.Row():
39
- # with gr.Column(scale=2):
40
- # selector2 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
41
- # with gr.Row():
42
- # sample2 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
43
- # seed2 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
44
- # with gr.Row():
45
- # withstart = gr.Checkbox(label='With Start', interactive=True, value=False)
46
- # opt2 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
47
- # scale_opt2 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
48
- # button2 = gr.Button("Run")
49
- # with gr.Column(scale=3):
50
- # image2 = gr.Image(label="Result")
51
- # input2 = [selector2, sample2, seed2, withstart, opt2, scale_opt2]
52
- # button2.click(IF.motion_generation, inputs=input2, outputs=image2)
53
- with gr.Tab("Motion Generation"):
54
- with gr.Row():
55
- with gr.Column(scale=2):
56
- input2 = [
57
- gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes')
58
- ]
59
- button2 = gr.Button("Generate")
60
- gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
61
- with gr.Column(scale=3):
62
- output2 = gr.Image(label="Result")
63
- button2.click(IF.motion_generation, inputs=input2, outputs=output2)
64
 
65
- ## grasp generation
66
- with gr.Tab("Grasp Generation"):
67
- with gr.Row():
68
- with gr.Column(scale=2):
69
- input3 = [
70
- gr.Dropdown(choices=['contactdb+apple', 'contactdb+camera', 'contactdb+cylinder_medium', 'contactdb+door_knob', 'contactdb+rubber_duck', 'contactdb+water_bottle', 'ycb+baseball', 'ycb+pear', 'ycb+potted_meat_can', 'ycb+tomato_soup_can'], label='Objects')
71
- ]
72
- button3 = gr.Button("Run")
73
- gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
74
- with gr.Column(scale=3):
75
- output3 = [
76
- gr.Model3D(clear_color=[255, 255, 255, 255], label="Result")
77
- ]
78
- button3.click(IF.grasp_generation, inputs=input3, outputs=output3)
79
 
80
- ## path planning
81
- with gr.Tab("Path Planing"):
82
- with gr.Row():
83
- with gr.Column(scale=2):
84
- selector4 = gr.Dropdown(choices=['scene0603_00', 'scene0621_00', 'scene0626_00', 'scene0634_00', 'scene0637_00', 'scene0640_00', 'scene0641_00', 'scene0645_00', 'scene0653_00', 'scene0667_00', 'scene0672_00', 'scene0673_00', 'scene0678_00', 'scene0694_00', 'scene0698_00'], label='Scenes', value='scene0621_00', interactive=True)
85
- mode4 = gr.Radio(choices=['Sampling', 'Planning'], value='Sampling', label='Mode', interactive=True)
86
- with gr.Row():
87
- sample4 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
88
- seed4 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
89
- with gr.Box():
90
- opt4 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
91
- scale_opt4 = gr.Slider(minimum=0.02, maximum=4.98, step=0.02, label='Scale', interactive=True, value=1.0)
92
- with gr.Box():
93
- pla4 = gr.Checkbox(label='Planner Guidance', interactive=True, value=True)
94
- scale_pla4 = gr.Slider(minimum=0.02, maximum=0.98, step=0.02, label='Scale', interactive=True, value=0.2)
95
- button4 = gr.Button("Run")
96
- with gr.Column(scale=3):
97
- image4 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
98
- number4 = gr.Number(label="Steps", precision=0)
99
- gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: 1. It may take a long time to do planning in <b>Planning</b> mode. 2. The <span style='color: #cc0000;'>red</span> balls represent the planning result, starting with the lightest red ball and ending with the darkest red ball. The <span style='color: #00cc00;'>green</span> ball indicates the target position.</p>")
100
- input4 = [selector4, mode4, sample4, seed4, opt4, scale_opt4, pla4, scale_pla4]
101
- button4.click(IF.path_planning, inputs=input4, outputs=[image4, number4])
102
 
103
- ## arm motion planning
104
- with gr.Tab("Arm Motion Planning"):
105
- gr.Markdown('Coming soon!')
106
 
107
- demo.launch()
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ import os
4
+ os.environ["PYOPENGL_PLATFORM"] = "egl" #opengl seems to only work with TPU
5
+ from OpenGL import EGL
6
+ print(os.environ['PYOPENGL_PLATFORM'])
7
+
8
+ import numpy as np
9
+ import os
10
+ import trimesh
11
+ from pyrender import PerspectiveCamera,\
12
+ DirectionalLight, SpotLight, PointLight,\
13
+ MetallicRoughnessMaterial,\
14
+ Primitive, Mesh, Node, Scene,\
15
+ OffscreenRenderer
16
+ from PIL import Image
17
+
18
+ scene = Scene()
19
+ axis = trimesh.creation.axis()
20
+ axis = Mesh.from_trimesh(axis, smooth=False)
21
+ scene.add(axis)
22
+
23
+ r = OffscreenRenderer(
24
+ viewport_width=720,
25
+ viewport_height=720,
26
+ )
27
+ color, _ = r.render(scene)
28
+ color = color.astype(np.float32) / 255.0
29
+ img = Image.fromarray((color * 255).astype(np.uint8))
30
+ r.delete()
31
+ print(img)
32
+
33
+ # import os
34
+ # os.environ['PYOPENGL_PLATFORM'] = 'egl'
35
+ # import sys
36
+ # root_dir = os.path.dirname(os.path.abspath(__file__))
37
+ # sys.path.insert(1, os.path.join(root_dir, 'scenediffuser'))
38
+ # print(sys.path)
39
+ # import gradio as gr
40
+
41
+ # import interface as IF
42
 
43
+ # with gr.Blocks(css='style.css') as demo:
44
+ # with gr.Column(elem_id="col-container"):
45
+ # gr.Markdown("<p align='center' style='font-size: 1.5em;'>Diffusion-based Generation, Optimization, and Planning in 3D Scenes</p>")
46
+ # gr.HTML(value="<img src='file/figures/teaser.png' alt='Teaser' width='710px' height='284px' style='display: block; margin: auto;'>")
47
+ # gr.HTML(value="<p align='center' style='font-size: 1.2em; color: #485fc7;'><a href='https://arxiv.org/abs/2301.06015' target='_blank'>arXiv</a> | <a href='https://scenediffuser.github.io/' target='_blank'>Project Page</a> | <a href='https://github.com/scenediffuser/Scene-Diffuser' target='_blank'>Code</a></p>")
48
+ # gr.Markdown("<p align='center'><i>\"SceneDiffuser provides a unified model for solving scene-conditioned generation, optimization, and planning.\"</i></p>")
49
 
50
+ # ## five task
51
+ # ## pose generation
52
+ # with gr.Tab("Pose Generation"):
53
+ # with gr.Row():
54
+ # with gr.Column(scale=2):
55
+ # selector1 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
56
+ # with gr.Row():
57
+ # sample1 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
58
+ # seed1 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
59
+ # opt1 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
60
+ # scale1 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
61
+ # button1 = gr.Button("Run")
62
+ # with gr.Column(scale=3):
63
+ # image1 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
64
+ # # model1 = gr.Model3D(clear_color=[255, 255, 255, 255], label="3D Model [Result]")
65
+ # input1 = [selector1, sample1, seed1, opt1, scale1]
66
+ # button1.click(IF.pose_generation, inputs=input1, outputs=[image1])
67
 
68
+ # ## motion generation
69
+ # # with gr.Tab("Motion Generation"):
70
+ # # with gr.Row():
71
+ # # with gr.Column(scale=2):
72
+ # # selector2 = gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes', value='MPH16', interactive=True)
73
+ # # with gr.Row():
74
+ # # sample2 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
75
+ # # seed2 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
76
+ # # with gr.Row():
77
+ # # withstart = gr.Checkbox(label='With Start', interactive=True, value=False)
78
+ # # opt2 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
79
+ # # scale_opt2 = gr.Slider(minimum=0.1, maximum=9.9, step=0.1, label='Scale', interactive=True, value=1.1)
80
+ # # button2 = gr.Button("Run")
81
+ # # with gr.Column(scale=3):
82
+ # # image2 = gr.Image(label="Result")
83
+ # # input2 = [selector2, sample2, seed2, withstart, opt2, scale_opt2]
84
+ # # button2.click(IF.motion_generation, inputs=input2, outputs=image2)
85
+ # with gr.Tab("Motion Generation"):
86
+ # with gr.Row():
87
+ # with gr.Column(scale=2):
88
+ # input2 = [
89
+ # gr.Dropdown(choices=['MPH16', 'MPH1Library', 'N0SittingBooth', 'N3OpenArea'], label='Scenes')
90
+ # ]
91
+ # button2 = gr.Button("Generate")
92
+ # gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
93
+ # with gr.Column(scale=3):
94
+ # output2 = gr.Image(label="Result")
95
+ # button2.click(IF.motion_generation, inputs=input2, outputs=output2)
96
 
97
+ # ## grasp generation
98
+ # with gr.Tab("Grasp Generation"):
99
+ # with gr.Row():
100
+ # with gr.Column(scale=2):
101
+ # input3 = [
102
+ # gr.Dropdown(choices=['contactdb+apple', 'contactdb+camera', 'contactdb+cylinder_medium', 'contactdb+door_knob', 'contactdb+rubber_duck', 'contactdb+water_bottle', 'ycb+baseball', 'ycb+pear', 'ycb+potted_meat_can', 'ycb+tomato_soup_can'], label='Objects')
103
+ # ]
104
+ # button3 = gr.Button("Run")
105
+ # gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: the output results are pre-sampled results. We will deploy a real-time model for this task soon.</p>")
106
+ # with gr.Column(scale=3):
107
+ # output3 = [
108
+ # gr.Model3D(clear_color=[255, 255, 255, 255], label="Result")
109
+ # ]
110
+ # button3.click(IF.grasp_generation, inputs=input3, outputs=output3)
111
 
112
+ # ## path planning
113
+ # with gr.Tab("Path Planing"):
114
+ # with gr.Row():
115
+ # with gr.Column(scale=2):
116
+ # selector4 = gr.Dropdown(choices=['scene0603_00', 'scene0621_00', 'scene0626_00', 'scene0634_00', 'scene0637_00', 'scene0640_00', 'scene0641_00', 'scene0645_00', 'scene0653_00', 'scene0667_00', 'scene0672_00', 'scene0673_00', 'scene0678_00', 'scene0694_00', 'scene0698_00'], label='Scenes', value='scene0621_00', interactive=True)
117
+ # mode4 = gr.Radio(choices=['Sampling', 'Planning'], value='Sampling', label='Mode', interactive=True)
118
+ # with gr.Row():
119
+ # sample4 = gr.Slider(minimum=1, maximum=8, step=1, label='Count', interactive=True, value=1)
120
+ # seed4 = gr.Slider(minimum=0, maximum=2 ** 16, step=1, label='Seed', interactive=True, value=2023)
121
+ # with gr.Box():
122
+ # opt4 = gr.Checkbox(label='Optimizer Guidance', interactive=True, value=True)
123
+ # scale_opt4 = gr.Slider(minimum=0.02, maximum=4.98, step=0.02, label='Scale', interactive=True, value=1.0)
124
+ # with gr.Box():
125
+ # pla4 = gr.Checkbox(label='Planner Guidance', interactive=True, value=True)
126
+ # scale_pla4 = gr.Slider(minimum=0.02, maximum=0.98, step=0.02, label='Scale', interactive=True, value=0.2)
127
+ # button4 = gr.Button("Run")
128
+ # with gr.Column(scale=3):
129
+ # image4 = gr.Gallery(label="Image [Result]").style(grid=[1], height="50")
130
+ # number4 = gr.Number(label="Steps", precision=0)
131
+ # gr.HTML("<p style='font-size: 0.9em; color: #555555;'>Notes: 1. It may take a long time to do planning in <b>Planning</b> mode. 2. The <span style='color: #cc0000;'>red</span> balls represent the planning result, starting with the lightest red ball and ending with the darkest red ball. The <span style='color: #00cc00;'>green</span> ball indicates the target position.</p>")
132
+ # input4 = [selector4, mode4, sample4, seed4, opt4, scale_opt4, pla4, scale_pla4]
133
+ # button4.click(IF.path_planning, inputs=input4, outputs=[image4, number4])
134
 
135
+ # ## arm motion planning
136
+ # with gr.Tab("Arm Motion Planning"):
137
+ # gr.Markdown('Coming soon!')
138
 
139
+ # demo.launch()
pre-requirements.txt CHANGED
@@ -1,3 +0,0 @@
1
- --extra-index-url https://download.pytorch.org/whl/cu113
2
- torch==1.11.0+cu113
3
- torchvision==0.12.0+cu113
 
 
 
 
requirements.txt CHANGED
@@ -1,26 +1,3 @@
1
- -e git+https://github.com/Silverster98/pytorch_kinematics#egg=pytorch_kinematics
2
- git+https://github.com/Silverster98/pytorch3d@T4_CUDA
3
- git+https://github.com/otaheri/chamfer_distance.git@d2b524309db114d0f7ce18be6c01b3802cde9791
4
- git+https://github.com/Silverster98/pointops@T4
5
- git+https://github.com/nghorbani/human_body_prior
6
- urdf-parser-py==0.0.4
7
- einops==0.4.1
8
- hydra-core==1.2.0
9
- loguru==0.6.0
10
- matplotlib==3.5.1
11
- natsort==8.2.0
12
- networkx==2.8.6
13
- omegaconf==2.2.2
14
- opencv-python==4.6.0.66
15
- Pillow==9.0.1
16
- plotly==5.11.0
17
- protobuf==3.19.4
18
- pyquaternion==0.9.9
19
- pyrender==0.1.45
20
- smplx==0.1.28
21
- tabulate==0.8.10
22
- tensorboard==2.8.0
23
- tqdm==4.62.3
24
- transforms3d==0.4.1
25
- transformations==2022.9.26
26
- trimesh==3.12.7
 
1
+ pyrender
2
+ trimesh
3
+ pillow