bug fix
Browse files- .gitignore +1 -0
- mesh_reconstruction/render.py +1 -3
- scripts/sd_model_zoo.py +2 -2
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
*.pyc
|
mesh_reconstruction/render.py
CHANGED
@@ -12,8 +12,6 @@ def _warmup(glctx, device=None):
|
|
12 |
tri = tensor([[0, 1, 2]], dtype=torch.int32)
|
13 |
dr.rasterize(glctx, pos, tri, resolution=[256, 256])
|
14 |
|
15 |
-
glctx = dr.RasterizeGLContext(output_db=False, device="cuda")
|
16 |
-
|
17 |
class NormalsRenderer:
|
18 |
|
19 |
_glctx:dr.RasterizeGLContext = None
|
@@ -31,7 +29,7 @@ class NormalsRenderer:
|
|
31 |
else:
|
32 |
self._mvp = mvp
|
33 |
self._image_size = image_size
|
34 |
-
self._glctx =
|
35 |
_warmup(self._glctx, device)
|
36 |
|
37 |
def render(self,
|
|
|
12 |
tri = tensor([[0, 1, 2]], dtype=torch.int32)
|
13 |
dr.rasterize(glctx, pos, tri, resolution=[256, 256])
|
14 |
|
|
|
|
|
15 |
class NormalsRenderer:
|
16 |
|
17 |
_glctx:dr.RasterizeGLContext = None
|
|
|
29 |
else:
|
30 |
self._mvp = mvp
|
31 |
self._image_size = image_size
|
32 |
+
self._glctx = dr.RasterizeGLContext(output_db=False, device=device)
|
33 |
_warmup(self._glctx, device)
|
34 |
|
35 |
def render(self,
|
scripts/sd_model_zoo.py
CHANGED
@@ -65,7 +65,7 @@ def load_image_encoder():
|
|
65 |
)
|
66 |
return image_encoder
|
67 |
|
68 |
-
def load_common_sd15_pipe(base_model=DEFAULT_BASE_MODEL, device="
|
69 |
model_kwargs = dict(
|
70 |
torch_dtype=torch_dtype,
|
71 |
device_map=device,
|
@@ -119,7 +119,7 @@ def load_common_sd15_pipe(base_model=DEFAULT_BASE_MODEL, device="auto", controln
|
|
119 |
if enable_sequential_cpu_offload:
|
120 |
pipe.enable_sequential_cpu_offload()
|
121 |
else:
|
122 |
-
pipe = pipe.to("cuda")
|
123 |
pass
|
124 |
# pipe.enable_model_cpu_offload()
|
125 |
if vae_slicing:
|
|
|
65 |
)
|
66 |
return image_encoder
|
67 |
|
68 |
+
def load_common_sd15_pipe(base_model=DEFAULT_BASE_MODEL, device="balanced", controlnet=None, ip_adapter=False, plus_model=True, torch_dtype=torch.float16, model_cpu_offload_seq=None, enable_sequential_cpu_offload=False, vae_slicing=False, pipeline_class=None, **kwargs):
|
69 |
model_kwargs = dict(
|
70 |
torch_dtype=torch_dtype,
|
71 |
device_map=device,
|
|
|
119 |
if enable_sequential_cpu_offload:
|
120 |
pipe.enable_sequential_cpu_offload()
|
121 |
else:
|
122 |
+
# pipe = pipe.to("cuda")
|
123 |
pass
|
124 |
# pipe.enable_model_cpu_offload()
|
125 |
if vae_slicing:
|