customdiffusion360 commited on
Commit
a24f25c
1 Parent(s): aed849d

fix requirements.txt

Browse files
Files changed (3) hide show
  1. app.py +4 -5
  2. requirements.txt +2 -0
  3. sampling_for_demo.py +1 -17
app.py CHANGED
@@ -7,7 +7,6 @@ import glob
7
  import numpy as np
8
  from PIL import Image
9
  import time
10
- import tqdm
11
  import copy
12
  import sys
13
 
@@ -187,7 +186,7 @@ BASE_CONFIG = "configs/train_co3d_concept.yaml"
187
  BASE_CKPT = "pretrained-models/sd_xl_base_1.0.safetensors"
188
 
189
  start_time = time.time()
190
- # base_model = load_base_model(BASE_CONFIG, ckpt=BASE_CKPT, verbose=False)
191
  print(f"Time taken to load base model: {time.time() - start_time:.2f}s")
192
 
193
  global curr_camera_dict
@@ -334,6 +333,9 @@ with gr.Blocks(head=head,
334
  <a href='https://customdiffusion360.github.io/index.html' style="padding: 10px;">
335
  <img src='https://img.shields.io/badge/Project%20Page-8A2BE2'>
336
  </a>
 
 
 
337
  <a class="link" href='https://github.com/customdiffusion360/custom-diffusion360' style="padding: 10px;">
338
  <img src='https://img.shields.io/badge/Github-%23121011.svg'>
339
  </a>
@@ -343,9 +345,6 @@ with gr.Blocks(head=head,
343
  visible=True
344
  )
345
 
346
- gr
347
-
348
-
349
  if SPACE_ID == ORIGINAL_SPACE_ID:
350
  gr.Markdown(SHARED_UI_WARNING)
351
 
 
7
  import numpy as np
8
  from PIL import Image
9
  import time
 
10
  import copy
11
  import sys
12
 
 
186
  BASE_CKPT = "pretrained-models/sd_xl_base_1.0.safetensors"
187
 
188
  start_time = time.time()
189
+ base_model = load_base_model(BASE_CONFIG, ckpt=BASE_CKPT, verbose=False)
190
  print(f"Time taken to load base model: {time.time() - start_time:.2f}s")
191
 
192
  global curr_camera_dict
 
333
  <a href='https://customdiffusion360.github.io/index.html' style="padding: 10px;">
334
  <img src='https://img.shields.io/badge/Project%20Page-8A2BE2'>
335
  </a>
336
+ <a href='https://arxiv.org/abs/2404.12333'>
337
+ <img src="https://img.shields.io/badge/arXiv-2404.12333-red">
338
+ </a>
339
  <a class="link" href='https://github.com/customdiffusion360/custom-diffusion360' style="padding: 10px;">
340
  <img src='https://img.shields.io/badge/Github-%23121011.svg'>
341
  </a>
 
345
  visible=True
346
  )
347
 
 
 
 
348
  if SPACE_ID == ORIGINAL_SPACE_ID:
349
  gr.Markdown(SHARED_UI_WARNING)
350
 
requirements.txt CHANGED
@@ -1,3 +1,5 @@
 
 
1
  omegaconf
2
  einops
3
  fire
 
1
+ gradio
2
+ plotly
3
  omegaconf
4
  einops
5
  fire
sampling_for_demo.py CHANGED
@@ -1,5 +1,3 @@
1
- import glob
2
- import os
3
  import sys
4
  import copy
5
  from typing import List
@@ -275,20 +273,7 @@ def process_camera_json(camera_json, example_cam):
275
  eye = torch.tensor([camera_dict["eye"]["x"], camera_dict["eye"]["y"], camera_dict["eye"]["z"]], dtype=torch.float32).unsqueeze(0)
276
  up = torch.tensor([camera_dict["up"]["x"], camera_dict["up"]["y"], camera_dict["up"]["z"]], dtype=torch.float32).unsqueeze(0)
277
  center = torch.tensor([camera_dict["center"]["x"], camera_dict["center"]["y"], camera_dict["center"]["z"]], dtype=torch.float32).unsqueeze(0)
278
- new_R, new_T = look_at_view_transform(eye=eye, at=center, up=up)
279
-
280
- ## temp
281
- # new_R = torch.tensor([[[ 0.4988, 0.2666, 0.8247],
282
- # [-0.1917, -0.8940, 0.4049],
283
- # [ 0.8453, -0.3601, -0.3948]]], dtype=torch.float32)
284
- # new_T = torch.tensor([[ 0.0739, -0.0013, 0.9973]], dtype=torch.float32)
285
-
286
-
287
- # new_R = torch.tensor([[[ 0.2530, 0.2989, 0.9201],
288
- # [-0.2652, -0.8932, 0.3631],
289
- # [ 0.9304, -0.3359, -0.1467],]], dtype=torch.float32)
290
- # new_T = torch.tensor([[ 0.0081, 0.0337, 1.0452]], dtype=torch.float32)
291
-
292
 
293
  print("focal length", example_cam.focal_length)
294
  print("principal point", example_cam.principal_point)
@@ -475,4 +460,3 @@ def sample(model, data,
475
 
476
  print("generation done")
477
  return image
478
-
 
 
 
1
  import sys
2
  import copy
3
  from typing import List
 
273
  eye = torch.tensor([camera_dict["eye"]["x"], camera_dict["eye"]["y"], camera_dict["eye"]["z"]], dtype=torch.float32).unsqueeze(0)
274
  up = torch.tensor([camera_dict["up"]["x"], camera_dict["up"]["y"], camera_dict["up"]["z"]], dtype=torch.float32).unsqueeze(0)
275
  center = torch.tensor([camera_dict["center"]["x"], camera_dict["center"]["y"], camera_dict["center"]["z"]], dtype=torch.float32).unsqueeze(0)
276
+ new_R, new_T = look_at_view_transform(eye=eye, at=center, up=up)
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
278
  print("focal length", example_cam.focal_length)
279
  print("principal point", example_cam.principal_point)
 
460
 
461
  print("generation done")
462
  return image