Spaces:
dylanebert
/
Running on Zero

Dylan Ebert commited on
Commit
92f2e1f
1 Parent(s): dec9ec5

update gradio version

Browse files

.

.

.

debug

Update app.py

validate exists

.

.

.

.

.

match requirements

add xformers

flip output

rotate output

rot mat fix

.

.

Files changed (5) hide show
  1. README.md +2 -2
  2. app.py +44 -126
  3. core/gs.py +3 -0
  4. core/models.py +11 -0
  5. requirements.txt +5 -5
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: LGM
3
  emoji: 🦀
4
  colorFrom: red
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.17.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
1
  ---
2
+ title: LGM-Mini
3
  emoji: 🦀
4
  colorFrom: red
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.19.0
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py CHANGED
@@ -1,10 +1,6 @@
1
  import os
2
- import tyro
3
- import imageio
4
  import numpy as np
5
- import tqdm
6
  import torch
7
- import torch.nn as nn
8
  import torch.nn.functional as F
9
  import torchvision.transforms.functional as TF
10
  from safetensors.torch import load_file
@@ -15,23 +11,23 @@ import gradio as gr
15
  from huggingface_hub import hf_hub_download
16
  ckpt_path = hf_hub_download(repo_id="ashawkey/LGM", filename="model_fp16.safetensors")
17
 
18
- # NOTE: no -e... else it's not working!
19
- os.system("pip install ./diff-gaussian-rasterization")
 
 
20
 
21
  import kiui
22
  from kiui.op import recenter
23
- from kiui.cam import orbit_camera
24
 
25
- from core.options import AllConfigs, Options
26
  from core.models import LGM
27
  from mvdream.pipeline_mvdream import MVDreamPipeline
28
 
29
- import spaces
30
-
31
  IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
32
  IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
33
- GRADIO_VIDEO_PATH = 'gradio_output.mp4'
34
- GRADIO_PLY_PATH = 'gradio_output.ply'
 
35
 
36
  # opt = tyro.cli(AllConfigs)
37
  opt = Options(
@@ -67,9 +63,9 @@ model.eval()
67
 
68
  tan_half_fov = np.tan(0.5 * np.deg2rad(opt.fovy))
69
  proj_matrix = torch.zeros(4, 4, dtype=torch.float32, device=device)
70
- proj_matrix[0, 0] = 1 / tan_half_fov
71
- proj_matrix[1, 1] = 1 / tan_half_fov
72
- proj_matrix[2, 2] = (opt.zfar + opt.znear) / (opt.zfar - opt.znear)
73
  proj_matrix[3, 2] = - (opt.zfar * opt.znear) / (opt.zfar - opt.znear)
74
  proj_matrix[2, 3] = 1
75
 
@@ -94,44 +90,22 @@ pipe_image = pipe_image.to(device)
94
  bg_remover = rembg.new_session()
95
 
96
  # process function
97
- @spaces.GPU
98
- def process(input_image, prompt, prompt_neg='', input_elevation=0, input_num_steps=30, input_seed=42):
99
 
100
  # seed
101
- kiui.seed_everything(input_seed)
102
 
103
- os.makedirs(opt.workspace, exist_ok=True)
104
- output_video_path = os.path.join(opt.workspace, GRADIO_VIDEO_PATH)
105
- output_ply_path = os.path.join(opt.workspace, GRADIO_PLY_PATH)
106
 
107
- # text-conditioned
108
- if input_image is None:
109
- mv_image_uint8 = pipe_text(prompt, negative_prompt=prompt_neg, num_inference_steps=input_num_steps, guidance_scale=7.5, elevation=input_elevation)
110
- mv_image_uint8 = (mv_image_uint8 * 255).astype(np.uint8)
111
- # bg removal
112
- mv_image = []
113
- for i in range(4):
114
- image = rembg.remove(mv_image_uint8[i], session=bg_remover) # [H, W, 4]
115
- # to white bg
116
- image = image.astype(np.float32) / 255
117
- image = recenter(image, image[..., 0] > 0, border_ratio=0.2)
118
- image = image[..., :3] * image[..., -1:] + (1 - image[..., -1:])
119
- mv_image.append(image)
120
- # image-conditioned (may also input text, but no text usually works too)
121
- else:
122
- input_image = np.array(input_image) # uint8
123
- # bg removal
124
- carved_image = rembg.remove(input_image, session=bg_remover) # [H, W, 4]
125
- mask = carved_image[..., -1] > 0
126
- image = recenter(carved_image, mask, border_ratio=0.2)
127
- image = image.astype(np.float32) / 255.0
128
- image = image[..., :3] * image[..., 3:4] + (1 - image[..., 3:4])
129
- mv_image = pipe_image(prompt, image, negative_prompt=prompt_neg, num_inference_steps=input_num_steps, guidance_scale=5.0, elevation=input_elevation)
130
-
131
- mv_image_grid = np.concatenate([
132
- np.concatenate([mv_image[1], mv_image[2]], axis=1),
133
- np.concatenate([mv_image[3], mv_image[0]], axis=1),
134
- ], axis=0)
135
 
136
  # generate gaussians
137
  input_image = np.stack([mv_image[1], mv_image[2], mv_image[3], mv_image[0]], axis=0) # [4, 256, 256, 3], float32
@@ -139,7 +113,7 @@ def process(input_image, prompt, prompt_neg='', input_elevation=0, input_num_ste
139
  input_image = F.interpolate(input_image, size=(opt.input_size, opt.input_size), mode='bilinear', align_corners=False)
140
  input_image = TF.normalize(input_image, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
141
 
142
- rays_embeddings = model.prepare_default_rays(device, elevation=input_elevation)
143
  input_image = torch.cat([input_image, rays_embeddings], dim=1).unsqueeze(0) # [1, 4, 9, H, W]
144
 
145
  with torch.no_grad():
@@ -149,47 +123,8 @@ def process(input_image, prompt, prompt_neg='', input_elevation=0, input_num_ste
149
 
150
  # save gaussians
151
  model.gs.save_ply(gaussians, output_ply_path)
152
-
153
- # render 360 video
154
- images = []
155
- elevation = 0
156
- if opt.fancy_video:
157
- azimuth = np.arange(0, 720, 4, dtype=np.int32)
158
- for azi in tqdm.tqdm(azimuth):
159
-
160
- cam_poses = torch.from_numpy(orbit_camera(elevation, azi, radius=opt.cam_radius, opengl=True)).unsqueeze(0).to(device)
161
-
162
- cam_poses[:, :3, 1:3] *= -1 # invert up & forward direction
163
-
164
- # cameras needed by gaussian rasterizer
165
- cam_view = torch.inverse(cam_poses).transpose(1, 2) # [V, 4, 4]
166
- cam_view_proj = cam_view @ proj_matrix # [V, 4, 4]
167
- cam_pos = - cam_poses[:, :3, 3] # [V, 3]
168
-
169
- scale = min(azi / 360, 1)
170
-
171
- image = model.gs.render(gaussians, cam_view.unsqueeze(0), cam_view_proj.unsqueeze(0), cam_pos.unsqueeze(0), scale_modifier=scale)['image']
172
- images.append((image.squeeze(1).permute(0,2,3,1).contiguous().float().cpu().numpy() * 255).astype(np.uint8))
173
- else:
174
- azimuth = np.arange(0, 360, 2, dtype=np.int32)
175
- for azi in tqdm.tqdm(azimuth):
176
-
177
- cam_poses = torch.from_numpy(orbit_camera(elevation, azi, radius=opt.cam_radius, opengl=True)).unsqueeze(0).to(device)
178
-
179
- cam_poses[:, :3, 1:3] *= -1 # invert up & forward direction
180
-
181
- # cameras needed by gaussian rasterizer
182
- cam_view = torch.inverse(cam_poses).transpose(1, 2) # [V, 4, 4]
183
- cam_view_proj = cam_view @ proj_matrix # [V, 4, 4]
184
- cam_pos = - cam_poses[:, :3, 3] # [V, 3]
185
-
186
- image = model.gs.render(gaussians, cam_view.unsqueeze(0), cam_view_proj.unsqueeze(0), cam_pos.unsqueeze(0), scale_modifier=1)['image']
187
- images.append((image.squeeze(1).permute(0,2,3,1).contiguous().float().cpu().numpy() * 255).astype(np.uint8))
188
-
189
- images = np.concatenate(images, axis=0)
190
- imageio.mimwrite(output_video_path, images, fps=30)
191
-
192
- return output_ply_path, output_ply_path
193
 
194
  # gradio UI
195
 
@@ -197,12 +132,23 @@ _TITLE = '''LGM Mini'''
197
 
198
  _DESCRIPTION = '''
199
  <div>
200
- A lightweight version of <a href="https://huggingface.co/spaces/ashawkey/LGM">LGM: Large Multi-View Gaussian Model for High-Resolution 3D Content Creation</a>
201
  </div>
202
  '''
203
 
204
- block = gr.Blocks(title=_TITLE).queue()
 
 
 
 
 
 
 
 
 
205
  with block:
 
 
206
  with gr.Row():
207
  with gr.Column(scale=1):
208
  gr.Markdown('# ' + _TITLE)
@@ -211,26 +157,15 @@ with block:
211
  with gr.Row(variant='panel'):
212
  with gr.Column(scale=1):
213
  # input image
214
- input_image = gr.Image(label="image", type='pil')
215
- # input prompt
216
- input_text = gr.Textbox(label="prompt")
217
- # negative prompt
218
- input_neg_text = gr.Textbox(label="negative prompt", value='ugly, blurry, pixelated obscure, unnatural colors, poor lighting, dull, unclear, cropped, lowres, low quality, artifacts, duplicate')
219
- # elevation
220
- input_elevation = gr.Slider(label="elevation", minimum=-90, maximum=90, step=1, value=0)
221
- # inference steps
222
- input_num_steps = gr.Slider(label="inference steps", minimum=1, maximum=100, step=1, value=30)
223
- # random seed
224
- input_seed = gr.Slider(label="random seed", minimum=0, maximum=100000, step=1, value=0)
225
  # gen button
226
  button_gen = gr.Button("Generate")
227
 
228
 
229
  with gr.Column(scale=1):
230
  output_splat = gr.Model3D(label="3D Gaussians")
231
- output_file = gr.File(label="3D Gaussians (ply format)")
232
 
233
- button_gen.click(process, inputs=[input_image, input_text, input_neg_text, input_elevation, input_num_steps, input_seed], outputs=[output_splat, output_file])
234
 
235
  gr.Examples(
236
  examples=[
@@ -242,27 +177,10 @@ with block:
242
  "data_test/gso_rabbit.jpg",
243
  ],
244
  inputs=[input_image],
245
- outputs=[output_splat, output_file],
246
- fn=lambda x: process(input_image=x, prompt=''),
247
  cache_examples=True,
248
  label='Image-to-3D Examples'
249
  )
250
-
251
- gr.Examples(
252
- examples=[
253
- "teddy bear",
254
- "hamburger",
255
- "oldman's head sculpture",
256
- "headphone",
257
- "motorbike",
258
- "mech suit"
259
-
260
- ],
261
- inputs=[input_text],
262
- outputs=[output_splat, output_file],
263
- fn=lambda x: process(input_image=None, prompt=x),
264
- cache_examples=True,
265
- label='Text-to-3D Examples'
266
- )
267
 
268
- block.launch()
 
1
  import os
 
 
2
  import numpy as np
 
3
  import torch
 
4
  import torch.nn.functional as F
5
  import torchvision.transforms.functional as TF
6
  from safetensors.torch import load_file
 
11
  from huggingface_hub import hf_hub_download
12
  ckpt_path = hf_hub_download(repo_id="ashawkey/LGM", filename="model_fp16.safetensors")
13
 
14
+ try:
15
+ import diff_gaussian_rasterization
16
+ except ImportError:
17
+ os.system("pip install ./diff-gaussian-rasterization")
18
 
19
  import kiui
20
  from kiui.op import recenter
 
21
 
22
+ from core.options import Options
23
  from core.models import LGM
24
  from mvdream.pipeline_mvdream import MVDreamPipeline
25
 
 
 
26
  IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
27
  IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
28
+
29
+ TMP_DIR = '/tmp'
30
+ os.makedirs(TMP_DIR, exist_ok=True)
31
 
32
  # opt = tyro.cli(AllConfigs)
33
  opt = Options(
 
63
 
64
  tan_half_fov = np.tan(0.5 * np.deg2rad(opt.fovy))
65
  proj_matrix = torch.zeros(4, 4, dtype=torch.float32, device=device)
66
+ proj_matrix[0, 0] = -1 / tan_half_fov
67
+ proj_matrix[1, 1] = -1 / tan_half_fov
68
+ proj_matrix[2, 2] = - (opt.zfar + opt.znear) / (opt.zfar - opt.znear)
69
  proj_matrix[3, 2] = - (opt.zfar * opt.znear) / (opt.zfar - opt.znear)
70
  proj_matrix[2, 3] = 1
71
 
 
90
  bg_remover = rembg.new_session()
91
 
92
  # process function
93
+ def run(input_image):
94
+ prompt_neg = "ugly, blurry, pixelated obscure, unnatural colors, poor lighting, dull, unclear, cropped, lowres, low quality, artifacts, duplicate"
95
 
96
  # seed
97
+ kiui.seed_everything(42)
98
 
99
+ output_ply_path = os.path.join(TMP_DIR, 'output.ply')
 
 
100
 
101
+ input_image = np.array(input_image) # uint8
102
+ # bg removal
103
+ carved_image = rembg.remove(input_image, session=bg_remover) # [H, W, 4]
104
+ mask = carved_image[..., -1] > 0
105
+ image = recenter(carved_image, mask, border_ratio=0.2)
106
+ image = image.astype(np.float32) / 255.0
107
+ image = image[..., :3] * image[..., 3:4] + (1 - image[..., 3:4])
108
+ mv_image = pipe_image("", image, negative_prompt=prompt_neg, num_inference_steps=30, guidance_scale=5.0, elevation=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  # generate gaussians
111
  input_image = np.stack([mv_image[1], mv_image[2], mv_image[3], mv_image[0]], axis=0) # [4, 256, 256, 3], float32
 
113
  input_image = F.interpolate(input_image, size=(opt.input_size, opt.input_size), mode='bilinear', align_corners=False)
114
  input_image = TF.normalize(input_image, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
115
 
116
+ rays_embeddings = model.prepare_default_rays(device, elevation=0)
117
  input_image = torch.cat([input_image, rays_embeddings], dim=1).unsqueeze(0) # [1, 4, 9, H, W]
118
 
119
  with torch.no_grad():
 
123
 
124
  # save gaussians
125
  model.gs.save_ply(gaussians, output_ply_path)
126
+
127
+ return output_ply_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  # gradio UI
130
 
 
132
 
133
  _DESCRIPTION = '''
134
  <div>
135
+ A lightweight version of <a href="https://huggingface.co/spaces/ashawkey/LGM">LGM: Large Multi-View Gaussian Model for High-Resolution 3D Content Creation</a>.
136
  </div>
137
  '''
138
 
139
+ css = '''
140
+ #duplicate-button {
141
+ margin: auto;
142
+ color: white;
143
+ background: #1565c0;
144
+ border-radius: 100vh;
145
+ }
146
+ '''
147
+
148
+ block = gr.Blocks(title=_TITLE, css=css)
149
  with block:
150
+ gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
151
+
152
  with gr.Row():
153
  with gr.Column(scale=1):
154
  gr.Markdown('# ' + _TITLE)
 
157
  with gr.Row(variant='panel'):
158
  with gr.Column(scale=1):
159
  # input image
160
+ input_image = gr.Image(label="image", type='pil', height=300)
 
 
 
 
 
 
 
 
 
 
161
  # gen button
162
  button_gen = gr.Button("Generate")
163
 
164
 
165
  with gr.Column(scale=1):
166
  output_splat = gr.Model3D(label="3D Gaussians")
 
167
 
168
+ button_gen.click(fn=run, inputs=[input_image], outputs=[output_splat])
169
 
170
  gr.Examples(
171
  examples=[
 
177
  "data_test/gso_rabbit.jpg",
178
  ],
179
  inputs=[input_image],
180
+ outputs=[output_splat],
181
+ fn=lambda x: run(input_image=x),
182
  cache_examples=True,
183
  label='Image-to-3D Examples'
184
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
+ block.queue().launch(debug=True, share=True)
core/gs.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import numpy as np
2
 
3
  import torch
@@ -105,6 +106,8 @@ class GaussianRenderer:
105
  assert gaussians.shape[0] == 1, 'only support batch size 1'
106
 
107
  from plyfile import PlyData, PlyElement
 
 
108
 
109
  means3D = gaussians[0, :, 0:3].contiguous().float()
110
  opacity = gaussians[0, :, 3:4].contiguous().float()
 
1
+ import os
2
  import numpy as np
3
 
4
  import torch
 
106
  assert gaussians.shape[0] == 1, 'only support batch size 1'
107
 
108
  from plyfile import PlyData, PlyElement
109
+
110
+ os.makedirs(os.path.dirname(path), exist_ok=True)
111
 
112
  means3D = gaussians[0, :, 0:3].contiguous().float()
113
  opacity = gaussians[0, :, 3:4].contiguous().float()
core/models.py CHANGED
@@ -112,6 +112,17 @@ class LGM(nn.Module):
112
  rotation = self.rot_act(x[..., 7:11])
113
  rgbs = self.rgb_act(x[..., 11:])
114
 
 
 
 
 
 
 
 
 
 
 
 
115
  gaussians = torch.cat([pos, opacity, scale, rotation, rgbs], dim=-1) # [B, N, 14]
116
 
117
  return gaussians
 
112
  rotation = self.rot_act(x[..., 7:11])
113
  rgbs = self.rgb_act(x[..., 11:])
114
 
115
+ rot_matrix = torch.tensor([[1.0, 0.0, 0.0, 0.0],
116
+ [0.0, -1.0, 0.0, 0.0],
117
+ [0.0, 0.0, -1.0, 0.0],
118
+ [0.0, 0.0, 0.0, 1.0]], dtype=torch.float32, device=images.device)
119
+
120
+ pos_4d = torch.cat([pos, torch.ones_like(pos[..., :1])], dim=-1)
121
+ pos = torch.matmul(pos_4d, rot_matrix) # [B, N, 4]
122
+ pos = pos[..., :3]
123
+
124
+ rotation = torch.matmul(rotation, rot_matrix)
125
+
126
  gaussians = torch.cat([pos, opacity, scale, rotation, rgbs], dim=-1) # [B, N, 14]
127
 
128
  return gaussians
requirements.txt CHANGED
@@ -1,7 +1,3 @@
1
- --extra-index-url https://download.pytorch.org/whl/cu118
2
- torch==2.0.0
3
- xformers
4
-
5
  numpy
6
  tyro
7
  diffusers
@@ -28,4 +24,8 @@ trimesh
28
  kiui >= 0.2.3
29
  xatlas
30
  roma
31
- plyfile
 
 
 
 
 
 
 
 
 
1
  numpy
2
  tyro
3
  diffusers
 
24
  kiui >= 0.2.3
25
  xatlas
26
  roma
27
+ plyfile
28
+ torch==2.0.0 --index-url https://download.pytorch.org/whl/cu118
29
+ torchvision==0.15.1 --index-url https://download.pytorch.org/whl/cu118
30
+ torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu118
31
+ xformers