LinKadel commited on
Commit
311a846
1 Parent(s): cc57c3b
Files changed (3) hide show
  1. app.py +3 -2
  2. geometry.py +72 -0
  3. utils.py +86 -0
app.py CHANGED
@@ -17,14 +17,15 @@ from diffusers import (
17
 
18
  print(f"Is CUDA available: {torch.cuda.is_available()}")
19
  print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
 
20
 
21
  import time
22
  from style import css
23
 
24
  BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
25
 
26
- title = "Flan T5 and Vanilla T5"
27
- description = "This demo compares [T5-large](https://huggingface.co/t5-large) and [Flan-T5-XX-large](https://huggingface.co/google/flan-t5-xxl). Note that T5 expects a very specific format of the prompts, so the examples below are not necessarily the best prompts to compare."
28
 
29
  def inference(text):
30
  output_flan = ""
 
17
 
18
  print(f"Is CUDA available: {torch.cuda.is_available()}")
19
  print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
20
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
21
 
22
  import time
23
  from style import css
24
 
25
  BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
26
 
27
+ title = "Ultra Heroes"
28
+ description = "Testing composites and lighting tweaks."
29
 
30
  def inference(text):
31
  output_flan = ""
geometry.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ def get_intrinsics(H,W):
4
+ """
5
+ Intrinsics for a pinhole camera model.
6
+ Assume fov of 55 degrees and central principal point.
7
+ """
8
+ f = 0.5 * W / np.tan(0.5 * 55 * np.pi / 180.0)
9
+ cx = 0.5 * W
10
+ cy = 0.5 * H
11
+ return np.array([[f, 0, cx],
12
+ [0, f, cy],
13
+ [0, 0, 1]])
14
+
15
+ def depth_to_points(depth, R=None, t=None):
16
+
17
+ K = get_intrinsics(depth.shape[1], depth.shape[2])
18
+ Kinv = np.linalg.inv(K)
19
+ if R is None:
20
+ R = np.eye(3)
21
+ if t is None:
22
+ t = np.zeros(3)
23
+
24
+ # M converts from your coordinate to PyTorch3D's coordinate system
25
+ M = np.eye(3)
26
+ M[0, 0] = -1.0
27
+ M[1, 1] = -1.0
28
+
29
+ height, width = depth.shape[1:3]
30
+
31
+ x = np.arange(width)
32
+ y = np.arange(height)
33
+ coord = np.stack(np.meshgrid(x, y), -1)
34
+ coord = np.concatenate((coord, np.ones_like(coord)[:, :, [0]]), -1) # z=1
35
+ coord = coord.astype(np.float32)
36
+ # coord = torch.as_tensor(coord, dtype=torch.float32, device=device)
37
+ coord = coord[None] # bs, h, w, 3
38
+
39
+ D = depth[:, :, :, None, None]
40
+ # print(D.shape, Kinv[None, None, None, ...].shape, coord[:, :, :, :, None].shape )
41
+ pts3D_1 = D * Kinv[None, None, None, ...] @ coord[:, :, :, :, None]
42
+ # pts3D_1 live in your coordinate system. Convert them to Py3D's
43
+ pts3D_1 = M[None, None, None, ...] @ pts3D_1
44
+ # from reference to targe tviewpoint
45
+ pts3D_2 = R[None, None, None, ...] @ pts3D_1 + t[None, None, None, :, None]
46
+ # pts3D_2 = pts3D_1
47
+ # depth_2 = pts3D_2[:, :, :, 2, :] # b,1,h,w
48
+ return pts3D_2[:, :, :, :3, 0][0]
49
+
50
+
51
+ def create_triangles(h, w, mask=None):
52
+ """Creates mesh triangle indices from a given pixel grid size.
53
+ This function is not and need not be differentiable as triangle indices are
54
+ fixed.
55
+ Args:
56
+ h: (int) denoting the height of the image.
57
+ w: (int) denoting the width of the image.
58
+ Returns:
59
+ triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3)
60
+ """
61
+ x, y = np.meshgrid(range(w - 1), range(h - 1))
62
+ tl = y * w + x
63
+ tr = y * w + x + 1
64
+ bl = (y + 1) * w + x
65
+ br = (y + 1) * w + x + 1
66
+ triangles = np.array([tl, bl, tr, br, tr, bl])
67
+ triangles = np.transpose(triangles, (1, 2, 0)).reshape(
68
+ ((w - 1) * (h - 1) * 2, 3))
69
+ if mask is not None:
70
+ mask = mask.reshape(-1)
71
+ triangles = triangles[mask[triangles].all(1)]
72
+ return triangles
utils.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import matplotlib
26
+ import matplotlib.cm
27
+ import numpy as np
28
+ import torch
29
+
30
+ def colorize(value, vmin=None, vmax=None, cmap='magma_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
31
+ """Converts a depth map to a color image.
32
+
33
+ Args:
34
+ value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
35
+ vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
36
+ vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
37
+ cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
38
+ invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
39
+ invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
40
+ background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
41
+ gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
42
+ value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
43
+
44
+ Returns:
45
+ numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
46
+ """
47
+ if isinstance(value, torch.Tensor):
48
+ value = value.detach().cpu().numpy()
49
+
50
+ value = value.squeeze()
51
+ if invalid_mask is None:
52
+ invalid_mask = value == invalid_val
53
+ mask = np.logical_not(invalid_mask)
54
+
55
+ # normalize
56
+ vmin = np.percentile(value[mask],2) if vmin is None else vmin
57
+ vmax = np.percentile(value[mask],85) if vmax is None else vmax
58
+ if vmin != vmax:
59
+ value = (value - vmin) / (vmax - vmin) # vmin..vmax
60
+ else:
61
+ # Avoid 0-division
62
+ value = value * 0.
63
+
64
+ # squeeze last dim if it exists
65
+ # grey out the invalid values
66
+
67
+ value[invalid_mask] = np.nan
68
+ cmapper = matplotlib.cm.get_cmap(cmap)
69
+ if value_transform:
70
+ value = value_transform(value)
71
+ # value = value / value.max()
72
+ value = cmapper(value, bytes=True) # (nxmx4)
73
+
74
+ # img = value[:, :, :]
75
+ img = value[...]
76
+ img[invalid_mask] = background_color
77
+
78
+ # return img.transpose((2, 0, 1))
79
+ if gamma_corrected:
80
+ # gamma correction
81
+ img = img / 255
82
+ img = np.power(img, 2.2)
83
+ img = img * 255
84
+ img = img.astype(np.uint8)
85
+ return img
86
+