Hancy commited on
Commit
1615664
β€’
1 Parent(s): e67befd

modify on ZeroGPU

Browse files
Files changed (3) hide show
  1. app.py +12 -5
  2. app_config.py +0 -1
  3. sample_cond.py +4 -12
app.py CHANGED
@@ -1,15 +1,20 @@
1
  import gradio as gr
2
  import spaces
3
- import tempfile
4
  import os
5
  import torch
6
  import numpy as np
7
  from matplotlib.colors import LinearSegmentedColormap
8
 
9
- from app_config import CSS, HEADER, FOOTER, DEVICE
10
- import sample_cond
11
 
12
- model = sample_cond.load_model()
 
 
 
 
 
 
13
 
14
 
15
  def create_custom_colormap():
@@ -33,7 +38,7 @@ def colorize_depth(depth, log_scale):
33
  @spaces.GPU
34
  @torch.no_grad()
35
  def generate_lidar(model, cond):
36
- img, pcd = sample_cond.sample(model, cond)
37
  return img, pcd
38
 
39
 
@@ -46,6 +51,8 @@ def load_camera(image):
46
  return camera_cond
47
 
48
 
 
 
49
  with gr.Blocks(css=CSS) as demo:
50
  gr.Markdown(HEADER)
51
 
 
1
  import gradio as gr
2
  import spaces
 
3
  import os
4
  import torch
5
  import numpy as np
6
  from matplotlib.colors import LinearSegmentedColormap
7
 
8
+ from app_config import CSS, HEADER, FOOTER
9
+ from sample_cond import CKPT_PATH, MODEL_CFG, load_model_from_config, sample
10
 
11
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
12
+
13
+
14
+ def load_model():
15
+ pl_sd = torch.load(CKPT_PATH, map_location="cpu")
16
+ model = load_model_from_config(MODEL_CFG.model, pl_sd["state_dict"])
17
+ return model
18
 
19
 
20
  def create_custom_colormap():
 
38
  @spaces.GPU
39
  @torch.no_grad()
40
  def generate_lidar(model, cond):
41
+ img, pcd = sample(model, cond)
42
  return img, pcd
43
 
44
 
 
51
  return camera_cond
52
 
53
 
54
+ model = load_model().to(DEVICE)
55
+
56
  with gr.Blocks(css=CSS) as demo:
57
  gr.Markdown(HEADER)
58
 
app_config.py CHANGED
@@ -14,7 +14,6 @@ CSS = """
14
  max-height: 70vh;
15
  }
16
  """
17
- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
18
 
19
  HEADER = '''
20
  # LiDAR Diffusion
 
14
  max-height: 70vh;
15
  }
16
  """
 
17
 
18
  HEADER = '''
19
  # LiDAR Diffusion
sample_cond.py CHANGED
@@ -6,9 +6,8 @@ from omegaconf import OmegaConf
6
  from PIL import Image
7
 
8
  from lidm.models.diffusion.ddim import DDIMSampler
9
- from lidm.utils.misc_utils import instantiate_from_config, isimage, ismap
10
  from lidm.utils.lidar_utils import range2pcd
11
- from app_config import DEVICE
12
 
13
 
14
  CUSTOM_STEPS = 50
@@ -20,7 +19,7 @@ CFG_PATH = os.path.join(MODEL_PATH, 'config.yaml')
20
  CKPT_PATH = os.path.join(MODEL_PATH, 'model.ckpt')
21
 
22
  # settings
23
- model_config = OmegaConf.load(CFG_PATH)
24
 
25
 
26
  def custom_to_pcd(x, config, rgb=None):
@@ -64,20 +63,13 @@ def logs2pil(logs, keys=["sample"]):
64
  return imgs
65
 
66
 
67
- def load_model_from_config(config, sd, device):
68
  model = instantiate_from_config(config)
69
  model.load_state_dict(sd, strict=False)
70
- model.to(device)
71
  model.eval()
72
  return model
73
 
74
 
75
- def load_model():
76
- pl_sd = torch.load(CKPT_PATH, map_location="cpu")
77
- model = load_model_from_config(model_config.model, pl_sd["state_dict"], DEVICE)
78
- return model
79
-
80
-
81
  @torch.no_grad()
82
  def convsample_ddim(model, cond, steps, shape, eta=1.0, verbose=False):
83
  ddim = DDIMSampler(model)
@@ -103,7 +95,7 @@ def make_convolutional_sample(model, batch, batch_size, custom_steps=None, eta=1
103
  def sample(model, cond):
104
  batch = {'camera': cond}
105
  img = make_convolutional_sample(model, batch, batch_size=1, custom_steps=CUSTOM_STEPS, eta=ETA) # TODO add arguments for batch_size, custom_steps and eta
106
- pcd = custom_to_pcd(img, model_config)[0].astype(np.float32)
107
  img = img.squeeze().detach().cpu().numpy()
108
  return img, pcd
109
 
 
6
  from PIL import Image
7
 
8
  from lidm.models.diffusion.ddim import DDIMSampler
9
+ from lidm.utils.misc_utils import instantiate_from_config
10
  from lidm.utils.lidar_utils import range2pcd
 
11
 
12
 
13
  CUSTOM_STEPS = 50
 
19
  CKPT_PATH = os.path.join(MODEL_PATH, 'model.ckpt')
20
 
21
  # settings
22
+ MODEL_CFG = OmegaConf.load(CFG_PATH)
23
 
24
 
25
  def custom_to_pcd(x, config, rgb=None):
 
63
  return imgs
64
 
65
 
66
+ def load_model_from_config(config, sd):
67
  model = instantiate_from_config(config)
68
  model.load_state_dict(sd, strict=False)
 
69
  model.eval()
70
  return model
71
 
72
 
 
 
 
 
 
 
73
  @torch.no_grad()
74
  def convsample_ddim(model, cond, steps, shape, eta=1.0, verbose=False):
75
  ddim = DDIMSampler(model)
 
95
  def sample(model, cond):
96
  batch = {'camera': cond}
97
  img = make_convolutional_sample(model, batch, batch_size=1, custom_steps=CUSTOM_STEPS, eta=ETA) # TODO add arguments for batch_size, custom_steps and eta
98
+ pcd = custom_to_pcd(img, MODEL_CFG)[0].astype(np.float32)
99
  img = img.squeeze().detach().cpu().numpy()
100
  return img, pcd
101