JeffreyXiang commited on
Commit
15fe7bc
1 Parent(s): 1dd64b5
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +2 -0
  2. extensions/nvdiffrast/LICENSE.txt +97 -0
  3. extensions/nvdiffrast/README.md +42 -0
  4. extensions/nvdiffrast/nvdiffrast/__init__.py +9 -0
  5. extensions/nvdiffrast/nvdiffrast/common/antialias.cu +558 -0
  6. extensions/nvdiffrast/nvdiffrast/common/antialias.h +50 -0
  7. extensions/nvdiffrast/nvdiffrast/common/common.cpp +60 -0
  8. extensions/nvdiffrast/nvdiffrast/common/common.h +263 -0
  9. extensions/nvdiffrast/nvdiffrast/common/cudaraster/CudaRaster.hpp +63 -0
  10. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/BinRaster.inl +423 -0
  11. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Buffer.cpp +94 -0
  12. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Buffer.hpp +55 -0
  13. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/CoarseRaster.inl +730 -0
  14. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Constants.hpp +73 -0
  15. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/CudaRaster.cpp +79 -0
  16. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Defs.hpp +90 -0
  17. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/FineRaster.inl +385 -0
  18. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/PrivateDefs.hpp +153 -0
  19. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/RasterImpl.cpp +370 -0
  20. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/RasterImpl.hpp +102 -0
  21. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/RasterImpl_.cu +37 -0
  22. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/TriangleSetup.inl +402 -0
  23. extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Util.inl +452 -0
  24. extensions/nvdiffrast/nvdiffrast/common/framework.h +49 -0
  25. extensions/nvdiffrast/nvdiffrast/common/glutil.cpp +403 -0
  26. extensions/nvdiffrast/nvdiffrast/common/glutil.h +113 -0
  27. extensions/nvdiffrast/nvdiffrast/common/glutil_extlist.h +48 -0
  28. extensions/nvdiffrast/nvdiffrast/common/interpolate.cu +276 -0
  29. extensions/nvdiffrast/nvdiffrast/common/interpolate.h +49 -0
  30. extensions/nvdiffrast/nvdiffrast/common/rasterize.cu +276 -0
  31. extensions/nvdiffrast/nvdiffrast/common/rasterize.h +60 -0
  32. extensions/nvdiffrast/nvdiffrast/common/rasterize_gl.cpp +644 -0
  33. extensions/nvdiffrast/nvdiffrast/common/rasterize_gl.h +60 -0
  34. extensions/nvdiffrast/nvdiffrast/common/texture.cpp +104 -0
  35. extensions/nvdiffrast/nvdiffrast/common/texture.h +78 -0
  36. extensions/nvdiffrast/nvdiffrast/common/texture_.cu +1156 -0
  37. extensions/nvdiffrast/nvdiffrast/lib/setgpu.lib +0 -0
  38. extensions/nvdiffrast/nvdiffrast/tensorflow/__init__.py +12 -0
  39. extensions/nvdiffrast/nvdiffrast/tensorflow/ops.py +303 -0
  40. extensions/nvdiffrast/nvdiffrast/tensorflow/plugin_loader.py +219 -0
  41. extensions/nvdiffrast/nvdiffrast/tensorflow/tf_all.cu +36 -0
  42. extensions/nvdiffrast/nvdiffrast/tensorflow/tf_antialias.cu +278 -0
  43. extensions/nvdiffrast/nvdiffrast/tensorflow/tf_interpolate.cu +301 -0
  44. extensions/nvdiffrast/nvdiffrast/tensorflow/tf_rasterize.cu +242 -0
  45. extensions/nvdiffrast/nvdiffrast/tensorflow/tf_texture.cu +525 -0
  46. extensions/nvdiffrast/nvdiffrast/torch/__init__.py +10 -0
  47. extensions/nvdiffrast/nvdiffrast/torch/ops.py +734 -0
  48. extensions/nvdiffrast/nvdiffrast/torch/torch_antialias.cpp +243 -0
  49. extensions/nvdiffrast/nvdiffrast/torch/torch_bindings.cpp +73 -0
  50. extensions/nvdiffrast/nvdiffrast/torch/torch_bindings_gl.cpp +30 -0
app.py CHANGED
@@ -83,6 +83,8 @@ def image_to_3d(image: Image.Image) -> Tuple[dict, str]:
83
  """
84
  outputs = pipeline(image, formats=["gaussian", "mesh"], preprocess_image=False)
85
  video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
 
 
86
  model_id = uuid.uuid4()
87
  video_path = f"/tmp/Trellis-demo/{model_id}.mp4"
88
  os.makedirs(os.path.dirname(video_path), exist_ok=True)
 
83
  """
84
  outputs = pipeline(image, formats=["gaussian", "mesh"], preprocess_image=False)
85
  video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
86
+ video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
87
+ video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
88
  model_id = uuid.uuid4()
89
  video_path = f"/tmp/Trellis-demo/{model_id}.mp4"
90
  os.makedirs(os.path.dirname(video_path), exist_ok=True)
extensions/nvdiffrast/LICENSE.txt ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
2
+
3
+
4
+ Nvidia Source Code License (1-Way Commercial)
5
+
6
+ =======================================================================
7
+
8
+ 1. Definitions
9
+
10
+ "Licensor" means any person or entity that distributes its Work.
11
+
12
+ "Software" means the original work of authorship made available under
13
+ this License.
14
+
15
+ "Work" means the Software and any additions to or derivative works of
16
+ the Software that are made available under this License.
17
+
18
+ The terms "reproduce," "reproduction," "derivative works," and
19
+ "distribution" have the meaning as provided under U.S. copyright law;
20
+ provided, however, that for the purposes of this License, derivative
21
+ works shall not include works that remain separable from, or merely
22
+ link (or bind by name) to the interfaces of, the Work.
23
+
24
+ Works, including the Software, are "made available" under this License
25
+ by including in or with the Work either (a) a copyright notice
26
+ referencing the applicability of this License to the Work, or (b) a
27
+ copy of this License.
28
+
29
+ 2. License Grants
30
+
31
+ 2.1 Copyright Grant. Subject to the terms and conditions of this
32
+ License, each Licensor grants to you a perpetual, worldwide,
33
+ non-exclusive, royalty-free, copyright license to reproduce,
34
+ prepare derivative works of, publicly display, publicly perform,
35
+ sublicense and distribute its Work and any resulting derivative
36
+ works in any form.
37
+
38
+ 3. Limitations
39
+
40
+ 3.1 Redistribution. You may reproduce or distribute the Work only
41
+ if (a) you do so under this License, (b) you include a complete
42
+ copy of this License with your distribution, and (c) you retain
43
+ without modification any copyright, patent, trademark, or
44
+ attribution notices that are present in the Work.
45
+
46
+ 3.2 Derivative Works. You may specify that additional or different
47
+ terms apply to the use, reproduction, and distribution of your
48
+ derivative works of the Work ("Your Terms") only if (a) Your Terms
49
+ provide that the use limitation in Section 3.3 applies to your
50
+ derivative works, and (b) you identify the specific derivative
51
+ works that are subject to Your Terms. Notwithstanding Your Terms,
52
+ this License (including the redistribution requirements in Section
53
+ 3.1) will continue to apply to the Work itself.
54
+
55
+ 3.3 Use Limitation. The Work and any derivative works thereof only
56
+ may be used or intended for use non-commercially. The Work or
57
+ derivative works thereof may be used or intended for use by Nvidia
58
+ or its affiliates commercially or non-commercially. As used herein,
59
+ "non-commercially" means for research or evaluation purposes only
60
+ and not for any direct or indirect monetary gain.
61
+
62
+ 3.4 Patent Claims. If you bring or threaten to bring a patent claim
63
+ against any Licensor (including any claim, cross-claim or
64
+ counterclaim in a lawsuit) to enforce any patents that you allege
65
+ are infringed by any Work, then your rights under this License from
66
+ such Licensor (including the grant in Section 2.1) will terminate
67
+ immediately.
68
+
69
+ 3.5 Trademarks. This License does not grant any rights to use any
70
+ Licensor's or its affiliates' names, logos, or trademarks, except
71
+ as necessary to reproduce the notices described in this License.
72
+
73
+ 3.6 Termination. If you violate any term of this License, then your
74
+ rights under this License (including the grant in Section 2.1) will
75
+ terminate immediately.
76
+
77
+ 4. Disclaimer of Warranty.
78
+
79
+ THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
80
+ KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
81
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
82
+ NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
83
+ THIS LICENSE.
84
+
85
+ 5. Limitation of Liability.
86
+
87
+ EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
88
+ THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
89
+ SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
90
+ INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
91
+ OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
92
+ (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
93
+ LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
94
+ COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
95
+ THE POSSIBILITY OF SUCH DAMAGES.
96
+
97
+ =======================================================================
extensions/nvdiffrast/README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Nvdiffrast – Modular Primitives for High-Performance Differentiable Rendering
2
+
3
+ ![Teaser image](./docs/img/teaser.png)
4
+
5
+ **Modular Primitives for High-Performance Differentiable Rendering**<br>
6
+ Samuli Laine, Janne Hellsten, Tero Karras, Yeongho Seol, Jaakko Lehtinen, Timo Aila<br>
7
+ [http://arxiv.org/abs/2011.03277](http://arxiv.org/abs/2011.03277)
8
+
9
+ Nvdiffrast is a PyTorch/TensorFlow library that provides high-performance primitive operations for rasterization-based differentiable rendering.
10
+ Please refer to &#x261E;&#x261E; [nvdiffrast documentation](https://nvlabs.github.io/nvdiffrast) &#x261C;&#x261C; for more information.
11
+
12
+ ## Licenses
13
+
14
+ Copyright &copy; 2020&ndash;2024, NVIDIA Corporation. All rights reserved.
15
+
16
+ This work is made available under the [Nvidia Source Code License](https://github.com/NVlabs/nvdiffrast/blob/main/LICENSE.txt).
17
+
18
+ For business inquiries, please visit our website and submit the form: [NVIDIA Research Licensing](https://www.nvidia.com/en-us/research/inquiries/)
19
+
20
+ We do not currently accept outside code contributions in the form of pull requests.
21
+
22
+ Environment map stored as part of `samples/data/envphong.npz` is derived from a Wave Engine
23
+ [sample material](https://github.com/WaveEngine/Samples-2.5/tree/master/Materials/EnvironmentMap/Content/Assets/CubeMap.cubemap)
24
+ originally shared under
25
+ [MIT License](https://github.com/WaveEngine/Samples-2.5/blob/master/LICENSE.md).
26
+ Mesh and texture stored as part of `samples/data/earth.npz` are derived from
27
+ [3D Earth Photorealistic 2K](https://www.turbosquid.com/3d-models/3d-realistic-earth-photorealistic-2k-1279125)
28
+ model originally made available under
29
+ [TurboSquid 3D Model License](https://blog.turbosquid.com/turbosquid-3d-model-license/#3d-model-license).
30
+
31
+ ## Citation
32
+
33
+ ```
34
+ @article{Laine2020diffrast,
35
+ title = {Modular Primitives for High-Performance Differentiable Rendering},
36
+ author = {Samuli Laine and Janne Hellsten and Tero Karras and Yeongho Seol and Jaakko Lehtinen and Timo Aila},
37
+ journal = {ACM Transactions on Graphics},
38
+ year = {2020},
39
+ volume = {39},
40
+ number = {6}
41
+ }
42
+ ```
extensions/nvdiffrast/nvdiffrast/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ __version__ = '0.3.3'
extensions/nvdiffrast/nvdiffrast/common/antialias.cu ADDED
@@ -0,0 +1,558 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "antialias.h"
10
+
11
+ //------------------------------------------------------------------------
12
+ // Helpers.
13
+
14
+ #define F32_MAX (3.402823466e+38f)
15
+ static __forceinline__ __device__ bool same_sign(float a, float b) { return (__float_as_int(a) ^ __float_as_int(b)) >= 0; }
16
+ static __forceinline__ __device__ bool rational_gt(float n0, float n1, float d0, float d1) { return (n0*d1 > n1*d0) == same_sign(d0, d1); }
17
+ static __forceinline__ __device__ int max_idx3(float n0, float n1, float n2, float d0, float d1, float d2)
18
+ {
19
+ bool g10 = rational_gt(n1, n0, d1, d0);
20
+ bool g20 = rational_gt(n2, n0, d2, d0);
21
+ bool g21 = rational_gt(n2, n1, d2, d1);
22
+ if (g20 && g21) return 2;
23
+ if (g10) return 1;
24
+ return 0;
25
+ }
26
+
27
+ //------------------------------------------------------------------------
28
+ // Format of antialiasing work items stored in work buffer. Usually accessed directly as int4.
29
+
30
+ struct AAWorkItem
31
+ {
32
+ enum
33
+ {
34
+ EDGE_MASK = 3, // Edge index in lowest bits.
35
+ FLAG_DOWN_BIT = 2, // Down instead of right.
36
+ FLAG_TRI1_BIT = 3, // Edge is from other pixel's triangle.
37
+ };
38
+
39
+ int px, py; // Pixel x, y.
40
+ unsigned int pz_flags; // High 16 bits = pixel z, low 16 bits = edge index and flags.
41
+ float alpha; // Antialiasing alpha value. Zero if no AA.
42
+ };
43
+
44
+ //------------------------------------------------------------------------
45
+ // Hash functions. Adapted from public-domain code at http://www.burtleburtle.net/bob/hash/doobs.html
46
+
47
+ #define JENKINS_MAGIC (0x9e3779b9u)
48
+ static __device__ __forceinline__ void jenkins_mix(unsigned int& a, unsigned int& b, unsigned int& c)
49
+ {
50
+ a -= b; a -= c; a ^= (c>>13);
51
+ b -= c; b -= a; b ^= (a<<8);
52
+ c -= a; c -= b; c ^= (b>>13);
53
+ a -= b; a -= c; a ^= (c>>12);
54
+ b -= c; b -= a; b ^= (a<<16);
55
+ c -= a; c -= b; c ^= (b>>5);
56
+ a -= b; a -= c; a ^= (c>>3);
57
+ b -= c; b -= a; b ^= (a<<10);
58
+ c -= a; c -= b; c ^= (b>>15);
59
+ }
60
+
61
+ // Helper class for hash index iteration. Implements simple odd-skip linear probing with a key-dependent skip.
62
+ class HashIndex
63
+ {
64
+ public:
65
+ __device__ __forceinline__ HashIndex(const AntialiasKernelParams& p, uint64_t key)
66
+ {
67
+ m_mask = (p.allocTriangles << AA_LOG_HASH_ELEMENTS_PER_TRIANGLE(p.allocTriangles)) - 1; // This should work until triangle count exceeds 1073741824.
68
+ m_idx = (uint32_t)(key & 0xffffffffu);
69
+ m_skip = (uint32_t)(key >> 32);
70
+ uint32_t dummy = JENKINS_MAGIC;
71
+ jenkins_mix(m_idx, m_skip, dummy);
72
+ m_idx &= m_mask;
73
+ m_skip &= m_mask;
74
+ m_skip |= 1;
75
+ }
76
+ __device__ __forceinline__ int get(void) const { return m_idx; }
77
+ __device__ __forceinline__ void next(void) { m_idx = (m_idx + m_skip) & m_mask; }
78
+ private:
79
+ uint32_t m_idx, m_skip, m_mask;
80
+ };
81
+
82
+ static __device__ __forceinline__ void hash_insert(const AntialiasKernelParams& p, uint64_t key, int v)
83
+ {
84
+ HashIndex idx(p, key);
85
+ while(1)
86
+ {
87
+ uint64_t prev = atomicCAS((unsigned long long*)&p.evHash[idx.get()], 0, (unsigned long long)key);
88
+ if (prev == 0 || prev == key)
89
+ break;
90
+ idx.next();
91
+ }
92
+ int* q = (int*)&p.evHash[idx.get()];
93
+ int a = atomicCAS(q+2, 0, v);
94
+ if (a != 0 && a != v)
95
+ atomicCAS(q+3, 0, v);
96
+ }
97
+
98
+ static __device__ __forceinline__ int2 hash_find(const AntialiasKernelParams& p, uint64_t key)
99
+ {
100
+ HashIndex idx(p, key);
101
+ while(1)
102
+ {
103
+ uint4 entry = p.evHash[idx.get()];
104
+ uint64_t k = ((uint64_t)entry.x) | (((uint64_t)entry.y) << 32);
105
+ if (k == key || k == 0)
106
+ return make_int2((int)entry.z, (int)entry.w);
107
+ idx.next();
108
+ }
109
+ }
110
+
111
+ static __device__ __forceinline__ void evhash_insert_vertex(const AntialiasKernelParams& p, int va, int vb, int vn)
112
+ {
113
+ if (va == vb)
114
+ return;
115
+
116
+ uint64_t v0 = (uint32_t)min(va, vb) + 1; // canonical vertex order
117
+ uint64_t v1 = (uint32_t)max(va, vb) + 1;
118
+ uint64_t vk = v0 | (v1 << 32); // hash key
119
+ hash_insert(p, vk, vn + 1);
120
+ }
121
+
122
+ static __forceinline__ __device__ int evhash_find_vertex(const AntialiasKernelParams& p, int va, int vb, int vr)
123
+ {
124
+ if (va == vb)
125
+ return -1;
126
+
127
+ uint64_t v0 = (uint32_t)min(va, vb) + 1; // canonical vertex order
128
+ uint64_t v1 = (uint32_t)max(va, vb) + 1;
129
+ uint64_t vk = v0 | (v1 << 32); // hash key
130
+ int2 vn = hash_find(p, vk) - 1;
131
+ if (vn.x == vr) return vn.y;
132
+ if (vn.y == vr) return vn.x;
133
+ return -1;
134
+ }
135
+
136
+ //------------------------------------------------------------------------
137
+ // Mesh analysis kernel.
138
+
139
+ __global__ void AntialiasFwdMeshKernel(const AntialiasKernelParams p)
140
+ {
141
+ int idx = threadIdx.x + blockIdx.x * blockDim.x;
142
+ if (idx >= p.numTriangles)
143
+ return;
144
+
145
+ int v0 = p.tri[idx * 3 + 0];
146
+ int v1 = p.tri[idx * 3 + 1];
147
+ int v2 = p.tri[idx * 3 + 2];
148
+
149
+ if (v0 < 0 || v0 >= p.numVertices ||
150
+ v1 < 0 || v1 >= p.numVertices ||
151
+ v2 < 0 || v2 >= p.numVertices)
152
+ return;
153
+
154
+ if (v0 == v1 || v1 == v2 || v2 == v0)
155
+ return;
156
+
157
+ evhash_insert_vertex(p, v1, v2, v0);
158
+ evhash_insert_vertex(p, v2, v0, v1);
159
+ evhash_insert_vertex(p, v0, v1, v2);
160
+ }
161
+
162
+ //------------------------------------------------------------------------
163
+ // Discontinuity finder kernel.
164
+
165
+ __global__ void AntialiasFwdDiscontinuityKernel(const AntialiasKernelParams p)
166
+ {
167
+ // Calculate pixel position.
168
+ int px = blockIdx.x * AA_DISCONTINUITY_KERNEL_BLOCK_WIDTH + threadIdx.x;
169
+ int py = blockIdx.y * AA_DISCONTINUITY_KERNEL_BLOCK_HEIGHT + threadIdx.y;
170
+ int pz = blockIdx.z;
171
+ if (px >= p.width || py >= p.height || pz >= p.n)
172
+ return;
173
+
174
+ // Pointer to our TriIdx and fetch.
175
+ int pidx0 = ((px + p.width * (py + p.height * pz)) << 2) + 3;
176
+ float tri0 = p.rasterOut[pidx0]; // These can stay as float, as we only compare them against each other.
177
+
178
+ // Look right, clamp at edge.
179
+ int pidx1 = pidx0;
180
+ if (px < p.width - 1)
181
+ pidx1 += 4;
182
+ float tri1 = p.rasterOut[pidx1];
183
+
184
+ // Look down, clamp at edge.
185
+ int pidx2 = pidx0;
186
+ if (py < p.height - 1)
187
+ pidx2 += p.width << 2;
188
+ float tri2 = p.rasterOut[pidx2];
189
+
190
+ // Determine amount of work.
191
+ int count = 0;
192
+ if (tri1 != tri0) count = 1;
193
+ if (tri2 != tri0) count += 1;
194
+ if (!count)
195
+ return; // Exit warp.
196
+
197
+ // Coalesce work counter update to once per CTA.
198
+ __shared__ int s_temp;
199
+ s_temp = 0;
200
+ __syncthreads();
201
+ int idx = atomicAdd(&s_temp, count);
202
+ __syncthreads();
203
+ if (idx == 0)
204
+ {
205
+ int base = atomicAdd(&p.workBuffer[0].x, s_temp);
206
+ s_temp = base + 1; // don't clobber the counters in first slot.
207
+ }
208
+ __syncthreads();
209
+ idx += s_temp;
210
+
211
+ // Write to memory.
212
+ if (tri1 != tri0) p.workBuffer[idx++] = make_int4(px, py, (pz << 16), 0);
213
+ if (tri2 != tri0) p.workBuffer[idx] = make_int4(px, py, (pz << 16) + (1 << AAWorkItem::FLAG_DOWN_BIT), 0);
214
+ }
215
+
216
+ //------------------------------------------------------------------------
217
+ // Forward analysis kernel.
218
+
219
+ __global__ void AntialiasFwdAnalysisKernel(const AntialiasKernelParams p)
220
+ {
221
+ __shared__ int s_base;
222
+ int workCount = p.workBuffer[0].x;
223
+ for(;;)
224
+ {
225
+ // Persistent threads work fetcher.
226
+ __syncthreads();
227
+ if (threadIdx.x == 0)
228
+ s_base = atomicAdd(&p.workBuffer[0].y, AA_ANALYSIS_KERNEL_THREADS_PER_BLOCK);
229
+ __syncthreads();
230
+ int thread_idx = s_base + threadIdx.x;
231
+ if (thread_idx >= workCount)
232
+ return;
233
+
234
+ int4* pItem = p.workBuffer + thread_idx + 1;
235
+ int4 item = *pItem;
236
+ int px = item.x;
237
+ int py = item.y;
238
+ int pz = (int)(((unsigned int)item.z) >> 16);
239
+ int d = (item.z >> AAWorkItem::FLAG_DOWN_BIT) & 1;
240
+
241
+ int pixel0 = px + p.width * (py + p.height * pz);
242
+ int pixel1 = pixel0 + (d ? p.width : 1);
243
+ float2 zt0 = ((float2*)p.rasterOut)[(pixel0 << 1) + 1];
244
+ float2 zt1 = ((float2*)p.rasterOut)[(pixel1 << 1) + 1];
245
+ int tri0 = float_to_triidx(zt0.y) - 1;
246
+ int tri1 = float_to_triidx(zt1.y) - 1;
247
+
248
+ // Select triangle based on background / depth.
249
+ int tri = (tri0 >= 0) ? tri0 : tri1;
250
+ if (tri0 >= 0 && tri1 >= 0)
251
+ tri = (zt0.x < zt1.x) ? tri0 : tri1;
252
+ if (tri == tri1)
253
+ {
254
+ // Calculate with respect to neighbor pixel if chose that triangle.
255
+ px += 1 - d;
256
+ py += d;
257
+ }
258
+
259
+ // Bail out if triangle index is corrupt.
260
+ if (tri < 0 || tri >= p.numTriangles)
261
+ continue;
262
+
263
+ // Fetch vertex indices.
264
+ int vi0 = p.tri[tri * 3 + 0];
265
+ int vi1 = p.tri[tri * 3 + 1];
266
+ int vi2 = p.tri[tri * 3 + 2];
267
+
268
+ // Bail out if vertex indices are corrupt.
269
+ if (vi0 < 0 || vi0 >= p.numVertices ||
270
+ vi1 < 0 || vi1 >= p.numVertices ||
271
+ vi2 < 0 || vi2 >= p.numVertices)
272
+ continue;
273
+
274
+ // Fetch opposite vertex indices. Use vertex itself (always silhouette) if no opposite vertex exists.
275
+ int op0 = evhash_find_vertex(p, vi2, vi1, vi0);
276
+ int op1 = evhash_find_vertex(p, vi0, vi2, vi1);
277
+ int op2 = evhash_find_vertex(p, vi1, vi0, vi2);
278
+
279
+ // Instance mode: Adjust vertex indices based on minibatch index.
280
+ if (p.instance_mode)
281
+ {
282
+ int vbase = pz * p.numVertices;
283
+ vi0 += vbase;
284
+ vi1 += vbase;
285
+ vi2 += vbase;
286
+ if (op0 >= 0) op0 += vbase;
287
+ if (op1 >= 0) op1 += vbase;
288
+ if (op2 >= 0) op2 += vbase;
289
+ }
290
+
291
+ // Fetch vertex positions.
292
+ float4 p0 = ((float4*)p.pos)[vi0];
293
+ float4 p1 = ((float4*)p.pos)[vi1];
294
+ float4 p2 = ((float4*)p.pos)[vi2];
295
+ float4 o0 = (op0 < 0) ? p0 : ((float4*)p.pos)[op0];
296
+ float4 o1 = (op1 < 0) ? p1 : ((float4*)p.pos)[op1];
297
+ float4 o2 = (op2 < 0) ? p2 : ((float4*)p.pos)[op2];
298
+
299
+ // Project vertices to pixel space.
300
+ float w0 = 1.f / p0.w;
301
+ float w1 = 1.f / p1.w;
302
+ float w2 = 1.f / p2.w;
303
+ float ow0 = 1.f / o0.w;
304
+ float ow1 = 1.f / o1.w;
305
+ float ow2 = 1.f / o2.w;
306
+ float fx = (float)px + .5f - p.xh;
307
+ float fy = (float)py + .5f - p.yh;
308
+ float x0 = p0.x * w0 * p.xh - fx;
309
+ float y0 = p0.y * w0 * p.yh - fy;
310
+ float x1 = p1.x * w1 * p.xh - fx;
311
+ float y1 = p1.y * w1 * p.yh - fy;
312
+ float x2 = p2.x * w2 * p.xh - fx;
313
+ float y2 = p2.y * w2 * p.yh - fy;
314
+ float ox0 = o0.x * ow0 * p.xh - fx;
315
+ float oy0 = o0.y * ow0 * p.yh - fy;
316
+ float ox1 = o1.x * ow1 * p.xh - fx;
317
+ float oy1 = o1.y * ow1 * p.yh - fy;
318
+ float ox2 = o2.x * ow2 * p.xh - fx;
319
+ float oy2 = o2.y * ow2 * p.yh - fy;
320
+
321
+ // Signs to kill non-silhouette edges.
322
+ float bb = (x1-x0)*(y2-y0) - (x2-x0)*(y1-y0); // Triangle itself.
323
+ float a0 = (x1-ox0)*(y2-oy0) - (x2-ox0)*(y1-oy0); // Wings.
324
+ float a1 = (x2-ox1)*(y0-oy1) - (x0-ox1)*(y2-oy1);
325
+ float a2 = (x0-ox2)*(y1-oy2) - (x1-ox2)*(y0-oy2);
326
+
327
+ // If no matching signs anywhere, skip the rest.
328
+ if (same_sign(a0, bb) || same_sign(a1, bb) || same_sign(a2, bb))
329
+ {
330
+ // XY flip for horizontal edges.
331
+ if (d)
332
+ {
333
+ swap(x0, y0);
334
+ swap(x1, y1);
335
+ swap(x2, y2);
336
+ }
337
+
338
+ float dx0 = x2 - x1;
339
+ float dx1 = x0 - x2;
340
+ float dx2 = x1 - x0;
341
+ float dy0 = y2 - y1;
342
+ float dy1 = y0 - y2;
343
+ float dy2 = y1 - y0;
344
+
345
+ // Check if an edge crosses between us and the neighbor pixel.
346
+ float dc = -F32_MAX;
347
+ float ds = (tri == tri0) ? 1.f : -1.f;
348
+ float d0 = ds * (x1*dy0 - y1*dx0);
349
+ float d1 = ds * (x2*dy1 - y2*dx1);
350
+ float d2 = ds * (x0*dy2 - y0*dx2);
351
+
352
+ if (same_sign(y1, y2)) d0 = -F32_MAX, dy0 = 1.f;
353
+ if (same_sign(y2, y0)) d1 = -F32_MAX, dy1 = 1.f;
354
+ if (same_sign(y0, y1)) d2 = -F32_MAX, dy2 = 1.f;
355
+
356
+ int di = max_idx3(d0, d1, d2, dy0, dy1, dy2);
357
+ if (di == 0 && same_sign(a0, bb) && fabsf(dy0) >= fabsf(dx0)) dc = d0 / dy0;
358
+ if (di == 1 && same_sign(a1, bb) && fabsf(dy1) >= fabsf(dx1)) dc = d1 / dy1;
359
+ if (di == 2 && same_sign(a2, bb) && fabsf(dy2) >= fabsf(dx2)) dc = d2 / dy2;
360
+ float eps = .0625f; // Expect no more than 1/16 pixel inaccuracy.
361
+
362
+ // Adjust output image if a suitable edge was found.
363
+ if (dc > -eps && dc < 1.f + eps)
364
+ {
365
+ dc = fminf(fmaxf(dc, 0.f), 1.f);
366
+ float alpha = ds * (.5f - dc);
367
+ const float* pColor0 = p.color + pixel0 * p.channels;
368
+ const float* pColor1 = p.color + pixel1 * p.channels;
369
+ float* pOutput = p.output + (alpha > 0.f ? pixel0 : pixel1) * p.channels;
370
+ for (int i=0; i < p.channels; i++)
371
+ atomicAdd(&pOutput[i], alpha * (pColor1[i] - pColor0[i]));
372
+
373
+ // Rewrite the work item's flags and alpha. Keep original px, py.
374
+ unsigned int flags = pz << 16;
375
+ flags |= di;
376
+ flags |= d << AAWorkItem::FLAG_DOWN_BIT;
377
+ flags |= (__float_as_uint(ds) >> 31) << AAWorkItem::FLAG_TRI1_BIT;
378
+ ((int2*)pItem)[1] = make_int2(flags, __float_as_int(alpha));
379
+ }
380
+ }
381
+ }
382
+ }
383
+
384
+ //------------------------------------------------------------------------
385
+ // Gradient kernel.
386
+
387
+ __global__ void AntialiasGradKernel(const AntialiasKernelParams p)
388
+ {
389
+ // Temporary space for coalesced atomics.
390
+ CA_DECLARE_TEMP(AA_GRAD_KERNEL_THREADS_PER_BLOCK);
391
+ __shared__ int s_base; // Work counter communication across entire CTA.
392
+
393
+ int workCount = p.workBuffer[0].x;
394
+
395
+ for(;;)
396
+ {
397
+ // Persistent threads work fetcher.
398
+ __syncthreads();
399
+ if (threadIdx.x == 0)
400
+ s_base = atomicAdd(&p.workBuffer[0].y, AA_GRAD_KERNEL_THREADS_PER_BLOCK);
401
+ __syncthreads();
402
+ int thread_idx = s_base + threadIdx.x;
403
+ if (thread_idx >= workCount)
404
+ return;
405
+
406
+ // Read work item filled out by forward kernel.
407
+ int4 item = p.workBuffer[thread_idx + 1];
408
+ unsigned int amask = __ballot_sync(0xffffffffu, item.w);
409
+ if (item.w == 0)
410
+ continue; // No effect.
411
+
412
+ // Unpack work item and replicate setup from forward analysis kernel.
413
+ int px = item.x;
414
+ int py = item.y;
415
+ int pz = (int)(((unsigned int)item.z) >> 16);
416
+ int d = (item.z >> AAWorkItem::FLAG_DOWN_BIT) & 1;
417
+ float alpha = __int_as_float(item.w);
418
+ int tri1 = (item.z >> AAWorkItem::FLAG_TRI1_BIT) & 1;
419
+ int di = item.z & AAWorkItem::EDGE_MASK;
420
+ float ds = __int_as_float(__float_as_int(1.0) | (tri1 << 31));
421
+ int pixel0 = px + p.width * (py + p.height * pz);
422
+ int pixel1 = pixel0 + (d ? p.width : 1);
423
+ int tri = float_to_triidx(p.rasterOut[((tri1 ? pixel1 : pixel0) << 2) + 3]) - 1;
424
+ if (tri1)
425
+ {
426
+ px += 1 - d;
427
+ py += d;
428
+ }
429
+
430
+ // Bail out if triangle index is corrupt.
431
+ bool triFail = (tri < 0 || tri >= p.numTriangles);
432
+ amask = __ballot_sync(amask, !triFail);
433
+ if (triFail)
434
+ continue;
435
+
436
+ // Outgoing color gradients.
437
+ float* pGrad0 = p.gradColor + pixel0 * p.channels;
438
+ float* pGrad1 = p.gradColor + pixel1 * p.channels;
439
+
440
+ // Incoming color gradients.
441
+ const float* pDy = p.dy + (alpha > 0.f ? pixel0 : pixel1) * p.channels;
442
+
443
+ // Position gradient weight based on colors and incoming gradients.
444
+ float dd = 0.f;
445
+ const float* pColor0 = p.color + pixel0 * p.channels;
446
+ const float* pColor1 = p.color + pixel1 * p.channels;
447
+
448
+ // Loop over channels and accumulate.
449
+ for (int i=0; i < p.channels; i++)
450
+ {
451
+ float dy = pDy[i];
452
+ if (dy != 0.f)
453
+ {
454
+ // Update position gradient weight.
455
+ dd += dy * (pColor1[i] - pColor0[i]);
456
+
457
+ // Update color gradients. No coalescing because all have different targets.
458
+ float v = alpha * dy;
459
+ atomicAdd(&pGrad0[i], -v);
460
+ atomicAdd(&pGrad1[i], v);
461
+ }
462
+ }
463
+
464
+ // If position weight is zero, skip the rest.
465
+ bool noGrad = (dd == 0.f);
466
+ amask = __ballot_sync(amask, !noGrad);
467
+ if (noGrad)
468
+ continue;
469
+
470
+ // Fetch vertex indices of the active edge and their positions.
471
+ int i1 = (di < 2) ? (di + 1) : 0;
472
+ int i2 = (i1 < 2) ? (i1 + 1) : 0;
473
+ int vi1 = p.tri[3 * tri + i1];
474
+ int vi2 = p.tri[3 * tri + i2];
475
+
476
+ // Bail out if vertex indices are corrupt.
477
+ bool vtxFail = (vi1 < 0 || vi1 >= p.numVertices || vi2 < 0 || vi2 >= p.numVertices);
478
+ amask = __ballot_sync(amask, !vtxFail);
479
+ if (vtxFail)
480
+ continue;
481
+
482
+ // Instance mode: Adjust vertex indices based on minibatch index.
483
+ if (p.instance_mode)
484
+ {
485
+ vi1 += pz * p.numVertices;
486
+ vi2 += pz * p.numVertices;
487
+ }
488
+
489
+ // Fetch vertex positions.
490
+ float4 p1 = ((float4*)p.pos)[vi1];
491
+ float4 p2 = ((float4*)p.pos)[vi2];
492
+
493
+ // Project vertices to pixel space.
494
+ float pxh = p.xh;
495
+ float pyh = p.yh;
496
+ float fx = (float)px + .5f - pxh;
497
+ float fy = (float)py + .5f - pyh;
498
+
499
+ // XY flip for horizontal edges.
500
+ if (d)
501
+ {
502
+ swap(p1.x, p1.y);
503
+ swap(p2.x, p2.y);
504
+ swap(pxh, pyh);
505
+ swap(fx, fy);
506
+ }
507
+
508
+ // Gradient calculation setup.
509
+ float w1 = 1.f / p1.w;
510
+ float w2 = 1.f / p2.w;
511
+ float x1 = p1.x * w1 * pxh - fx;
512
+ float y1 = p1.y * w1 * pyh - fy;
513
+ float x2 = p2.x * w2 * pxh - fx;
514
+ float y2 = p2.y * w2 * pyh - fy;
515
+ float dx = x2 - x1;
516
+ float dy = y2 - y1;
517
+ float db = x1*dy - y1*dx;
518
+
519
+ // Compute inverse delta-y with epsilon.
520
+ float ep = copysignf(1e-3f, dy); // ~1/1000 pixel.
521
+ float iy = 1.f / (dy + ep);
522
+
523
+ // Compute position gradients.
524
+ float dby = db * iy;
525
+ float iw1 = -w1 * iy * dd;
526
+ float iw2 = w2 * iy * dd;
527
+ float gp1x = iw1 * pxh * y2;
528
+ float gp2x = iw2 * pxh * y1;
529
+ float gp1y = iw1 * pyh * (dby - x2);
530
+ float gp2y = iw2 * pyh * (dby - x1);
531
+ float gp1w = -(p1.x * gp1x + p1.y * gp1y) * w1;
532
+ float gp2w = -(p2.x * gp2x + p2.y * gp2y) * w2;
533
+
534
+ // XY flip the gradients.
535
+ if (d)
536
+ {
537
+ swap(gp1x, gp1y);
538
+ swap(gp2x, gp2y);
539
+ }
540
+
541
+ // Kill position gradients if alpha was saturated.
542
+ if (fabsf(alpha) >= 0.5f)
543
+ {
544
+ gp1x = gp1y = gp1w = 0.f;
545
+ gp2x = gp2y = gp2w = 0.f;
546
+ }
547
+
548
+ // Initialize coalesced atomics. Match both triangle ID and edge index.
549
+ // Also note that some threads may be inactive.
550
+ CA_SET_GROUP_MASK(tri ^ (di << 30), amask);
551
+
552
+ // Accumulate gradients.
553
+ caAtomicAdd3_xyw(p.gradPos + 4 * vi1, gp1x, gp1y, gp1w);
554
+ caAtomicAdd3_xyw(p.gradPos + 4 * vi2, gp2x, gp2y, gp2w);
555
+ }
556
+ }
557
+
558
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/antialias.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+ #include "common.h"
11
+
12
+ //------------------------------------------------------------------------
13
+ // Constants and helpers.
14
+
15
+ #define AA_DISCONTINUITY_KERNEL_BLOCK_WIDTH 32
16
+ #define AA_DISCONTINUITY_KERNEL_BLOCK_HEIGHT 8
17
+ #define AA_ANALYSIS_KERNEL_THREADS_PER_BLOCK 256
18
+ #define AA_MESH_KERNEL_THREADS_PER_BLOCK 256
19
+ #define AA_HASH_ELEMENTS_PER_TRIANGLE(alloc) ((alloc) >= (2 << 25) ? 4 : 8) // With more than 16777216 triangles (alloc >= 33554432) use smallest possible value of 4 to conserve memory, otherwise use 8 for fewer collisions.
20
+ #define AA_LOG_HASH_ELEMENTS_PER_TRIANGLE(alloc) ((alloc) >= (2 << 25) ? 2 : 3)
21
+ #define AA_GRAD_KERNEL_THREADS_PER_BLOCK 256
22
+
23
+ //------------------------------------------------------------------------
24
+ // CUDA kernel params.
25
+
26
+ struct AntialiasKernelParams
27
+ {
28
+ const float* color; // Incoming color buffer.
29
+ const float* rasterOut; // Incoming rasterizer output buffer.
30
+ const int* tri; // Incoming triangle buffer.
31
+ const float* pos; // Incoming position buffer.
32
+ float* output; // Output buffer of forward kernel.
33
+ const float* dy; // Incoming gradients.
34
+ float* gradColor; // Output buffer, color gradient.
35
+ float* gradPos; // Output buffer, position gradient.
36
+ int4* workBuffer; // Buffer for storing intermediate work items. First item reserved for counters.
37
+ uint4* evHash; // Edge-vertex hash.
38
+ int allocTriangles; // Number of triangles accommodated by evHash. Always power of two.
39
+ int numTriangles; // Number of triangles.
40
+ int numVertices; // Number of vertices.
41
+ int width; // Input width.
42
+ int height; // Input height.
43
+ int n; // Minibatch size.
44
+ int channels; // Channel count in color input.
45
+ float xh, yh; // Transfer to pixel space.
46
+ int instance_mode; // 0=normal, 1=instance mode.
47
+ int tri_const; // 1 if triangle array is known to be constant.
48
+ };
49
+
50
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/common.cpp ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include <cuda_runtime.h>
10
+
11
+ //------------------------------------------------------------------------
12
+ // Block and grid size calculators for kernel launches.
13
+
14
+ dim3 getLaunchBlockSize(int maxWidth, int maxHeight, int width, int height)
15
+ {
16
+ int maxThreads = maxWidth * maxHeight;
17
+ if (maxThreads <= 1 || (width * height) <= 1)
18
+ return dim3(1, 1, 1); // Degenerate.
19
+
20
+ // Start from max size.
21
+ int bw = maxWidth;
22
+ int bh = maxHeight;
23
+
24
+ // Optimizations for weirdly sized buffers.
25
+ if (width < bw)
26
+ {
27
+ // Decrease block width to smallest power of two that covers the buffer width.
28
+ while ((bw >> 1) >= width)
29
+ bw >>= 1;
30
+
31
+ // Maximize height.
32
+ bh = maxThreads / bw;
33
+ if (bh > height)
34
+ bh = height;
35
+ }
36
+ else if (height < bh)
37
+ {
38
+ // Halve height and double width until fits completely inside buffer vertically.
39
+ while (bh > height)
40
+ {
41
+ bh >>= 1;
42
+ if (bw < width)
43
+ bw <<= 1;
44
+ }
45
+ }
46
+
47
+ // Done.
48
+ return dim3(bw, bh, 1);
49
+ }
50
+
51
+ dim3 getLaunchGridSize(dim3 blockSize, int width, int height, int depth)
52
+ {
53
+ dim3 gridSize;
54
+ gridSize.x = (width - 1) / blockSize.x + 1;
55
+ gridSize.y = (height - 1) / blockSize.y + 1;
56
+ gridSize.z = (depth - 1) / blockSize.z + 1;
57
+ return gridSize;
58
+ }
59
+
60
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/common.h ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+ #include <cuda.h>
11
+ #include <stdint.h>
12
+
13
+ //------------------------------------------------------------------------
14
+ // C++ helper function prototypes.
15
+
16
+ dim3 getLaunchBlockSize(int maxWidth, int maxHeight, int width, int height);
17
+ dim3 getLaunchGridSize(dim3 blockSize, int width, int height, int depth);
18
+
19
+ //------------------------------------------------------------------------
20
+ // The rest is CUDA device code specific stuff.
21
+
22
+ #ifdef __CUDACC__
23
+
24
+ //------------------------------------------------------------------------
25
+ // Helpers for CUDA vector types.
26
+
27
+ static __device__ __forceinline__ float2& operator*= (float2& a, const float2& b) { a.x *= b.x; a.y *= b.y; return a; }
28
+ static __device__ __forceinline__ float2& operator+= (float2& a, const float2& b) { a.x += b.x; a.y += b.y; return a; }
29
+ static __device__ __forceinline__ float2& operator-= (float2& a, const float2& b) { a.x -= b.x; a.y -= b.y; return a; }
30
+ static __device__ __forceinline__ float2& operator*= (float2& a, float b) { a.x *= b; a.y *= b; return a; }
31
+ static __device__ __forceinline__ float2& operator+= (float2& a, float b) { a.x += b; a.y += b; return a; }
32
+ static __device__ __forceinline__ float2& operator-= (float2& a, float b) { a.x -= b; a.y -= b; return a; }
33
+ static __device__ __forceinline__ float2 operator* (const float2& a, const float2& b) { return make_float2(a.x * b.x, a.y * b.y); }
34
+ static __device__ __forceinline__ float2 operator+ (const float2& a, const float2& b) { return make_float2(a.x + b.x, a.y + b.y); }
35
+ static __device__ __forceinline__ float2 operator- (const float2& a, const float2& b) { return make_float2(a.x - b.x, a.y - b.y); }
36
+ static __device__ __forceinline__ float2 operator* (const float2& a, float b) { return make_float2(a.x * b, a.y * b); }
37
+ static __device__ __forceinline__ float2 operator+ (const float2& a, float b) { return make_float2(a.x + b, a.y + b); }
38
+ static __device__ __forceinline__ float2 operator- (const float2& a, float b) { return make_float2(a.x - b, a.y - b); }
39
+ static __device__ __forceinline__ float2 operator* (float a, const float2& b) { return make_float2(a * b.x, a * b.y); }
40
+ static __device__ __forceinline__ float2 operator+ (float a, const float2& b) { return make_float2(a + b.x, a + b.y); }
41
+ static __device__ __forceinline__ float2 operator- (float a, const float2& b) { return make_float2(a - b.x, a - b.y); }
42
+ static __device__ __forceinline__ float2 operator- (const float2& a) { return make_float2(-a.x, -a.y); }
43
+ static __device__ __forceinline__ float3& operator*= (float3& a, const float3& b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; return a; }
44
+ static __device__ __forceinline__ float3& operator+= (float3& a, const float3& b) { a.x += b.x; a.y += b.y; a.z += b.z; return a; }
45
+ static __device__ __forceinline__ float3& operator-= (float3& a, const float3& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; return a; }
46
+ static __device__ __forceinline__ float3& operator*= (float3& a, float b) { a.x *= b; a.y *= b; a.z *= b; return a; }
47
+ static __device__ __forceinline__ float3& operator+= (float3& a, float b) { a.x += b; a.y += b; a.z += b; return a; }
48
+ static __device__ __forceinline__ float3& operator-= (float3& a, float b) { a.x -= b; a.y -= b; a.z -= b; return a; }
49
+ static __device__ __forceinline__ float3 operator* (const float3& a, const float3& b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); }
50
+ static __device__ __forceinline__ float3 operator+ (const float3& a, const float3& b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); }
51
+ static __device__ __forceinline__ float3 operator- (const float3& a, const float3& b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }
52
+ static __device__ __forceinline__ float3 operator* (const float3& a, float b) { return make_float3(a.x * b, a.y * b, a.z * b); }
53
+ static __device__ __forceinline__ float3 operator+ (const float3& a, float b) { return make_float3(a.x + b, a.y + b, a.z + b); }
54
+ static __device__ __forceinline__ float3 operator- (const float3& a, float b) { return make_float3(a.x - b, a.y - b, a.z - b); }
55
+ static __device__ __forceinline__ float3 operator* (float a, const float3& b) { return make_float3(a * b.x, a * b.y, a * b.z); }
56
+ static __device__ __forceinline__ float3 operator+ (float a, const float3& b) { return make_float3(a + b.x, a + b.y, a + b.z); }
57
+ static __device__ __forceinline__ float3 operator- (float a, const float3& b) { return make_float3(a - b.x, a - b.y, a - b.z); }
58
+ static __device__ __forceinline__ float3 operator- (const float3& a) { return make_float3(-a.x, -a.y, -a.z); }
59
+ static __device__ __forceinline__ float4& operator*= (float4& a, const float4& b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; return a; }
60
+ static __device__ __forceinline__ float4& operator+= (float4& a, const float4& b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; return a; }
61
+ static __device__ __forceinline__ float4& operator-= (float4& a, const float4& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; return a; }
62
+ static __device__ __forceinline__ float4& operator*= (float4& a, float b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; return a; }
63
+ static __device__ __forceinline__ float4& operator+= (float4& a, float b) { a.x += b; a.y += b; a.z += b; a.w += b; return a; }
64
+ static __device__ __forceinline__ float4& operator-= (float4& a, float b) { a.x -= b; a.y -= b; a.z -= b; a.w -= b; return a; }
65
+ static __device__ __forceinline__ float4 operator* (const float4& a, const float4& b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }
66
+ static __device__ __forceinline__ float4 operator+ (const float4& a, const float4& b) { return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); }
67
+ static __device__ __forceinline__ float4 operator- (const float4& a, const float4& b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); }
68
+ static __device__ __forceinline__ float4 operator* (const float4& a, float b) { return make_float4(a.x * b, a.y * b, a.z * b, a.w * b); }
69
+ static __device__ __forceinline__ float4 operator+ (const float4& a, float b) { return make_float4(a.x + b, a.y + b, a.z + b, a.w + b); }
70
+ static __device__ __forceinline__ float4 operator- (const float4& a, float b) { return make_float4(a.x - b, a.y - b, a.z - b, a.w - b); }
71
+ static __device__ __forceinline__ float4 operator* (float a, const float4& b) { return make_float4(a * b.x, a * b.y, a * b.z, a * b.w); }
72
+ static __device__ __forceinline__ float4 operator+ (float a, const float4& b) { return make_float4(a + b.x, a + b.y, a + b.z, a + b.w); }
73
+ static __device__ __forceinline__ float4 operator- (float a, const float4& b) { return make_float4(a - b.x, a - b.y, a - b.z, a - b.w); }
74
+ static __device__ __forceinline__ float4 operator- (const float4& a) { return make_float4(-a.x, -a.y, -a.z, -a.w); }
75
+ static __device__ __forceinline__ int2& operator*= (int2& a, const int2& b) { a.x *= b.x; a.y *= b.y; return a; }
76
+ static __device__ __forceinline__ int2& operator+= (int2& a, const int2& b) { a.x += b.x; a.y += b.y; return a; }
77
+ static __device__ __forceinline__ int2& operator-= (int2& a, const int2& b) { a.x -= b.x; a.y -= b.y; return a; }
78
+ static __device__ __forceinline__ int2& operator*= (int2& a, int b) { a.x *= b; a.y *= b; return a; }
79
+ static __device__ __forceinline__ int2& operator+= (int2& a, int b) { a.x += b; a.y += b; return a; }
80
+ static __device__ __forceinline__ int2& operator-= (int2& a, int b) { a.x -= b; a.y -= b; return a; }
81
+ static __device__ __forceinline__ int2 operator* (const int2& a, const int2& b) { return make_int2(a.x * b.x, a.y * b.y); }
82
+ static __device__ __forceinline__ int2 operator+ (const int2& a, const int2& b) { return make_int2(a.x + b.x, a.y + b.y); }
83
+ static __device__ __forceinline__ int2 operator- (const int2& a, const int2& b) { return make_int2(a.x - b.x, a.y - b.y); }
84
+ static __device__ __forceinline__ int2 operator* (const int2& a, int b) { return make_int2(a.x * b, a.y * b); }
85
+ static __device__ __forceinline__ int2 operator+ (const int2& a, int b) { return make_int2(a.x + b, a.y + b); }
86
+ static __device__ __forceinline__ int2 operator- (const int2& a, int b) { return make_int2(a.x - b, a.y - b); }
87
+ static __device__ __forceinline__ int2 operator* (int a, const int2& b) { return make_int2(a * b.x, a * b.y); }
88
+ static __device__ __forceinline__ int2 operator+ (int a, const int2& b) { return make_int2(a + b.x, a + b.y); }
89
+ static __device__ __forceinline__ int2 operator- (int a, const int2& b) { return make_int2(a - b.x, a - b.y); }
90
+ static __device__ __forceinline__ int2 operator- (const int2& a) { return make_int2(-a.x, -a.y); }
91
+ static __device__ __forceinline__ int3& operator*= (int3& a, const int3& b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; return a; }
92
+ static __device__ __forceinline__ int3& operator+= (int3& a, const int3& b) { a.x += b.x; a.y += b.y; a.z += b.z; return a; }
93
+ static __device__ __forceinline__ int3& operator-= (int3& a, const int3& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; return a; }
94
+ static __device__ __forceinline__ int3& operator*= (int3& a, int b) { a.x *= b; a.y *= b; a.z *= b; return a; }
95
+ static __device__ __forceinline__ int3& operator+= (int3& a, int b) { a.x += b; a.y += b; a.z += b; return a; }
96
+ static __device__ __forceinline__ int3& operator-= (int3& a, int b) { a.x -= b; a.y -= b; a.z -= b; return a; }
97
+ static __device__ __forceinline__ int3 operator* (const int3& a, const int3& b) { return make_int3(a.x * b.x, a.y * b.y, a.z * b.z); }
98
+ static __device__ __forceinline__ int3 operator+ (const int3& a, const int3& b) { return make_int3(a.x + b.x, a.y + b.y, a.z + b.z); }
99
+ static __device__ __forceinline__ int3 operator- (const int3& a, const int3& b) { return make_int3(a.x - b.x, a.y - b.y, a.z - b.z); }
100
+ static __device__ __forceinline__ int3 operator* (const int3& a, int b) { return make_int3(a.x * b, a.y * b, a.z * b); }
101
+ static __device__ __forceinline__ int3 operator+ (const int3& a, int b) { return make_int3(a.x + b, a.y + b, a.z + b); }
102
+ static __device__ __forceinline__ int3 operator- (const int3& a, int b) { return make_int3(a.x - b, a.y - b, a.z - b); }
103
+ static __device__ __forceinline__ int3 operator* (int a, const int3& b) { return make_int3(a * b.x, a * b.y, a * b.z); }
104
+ static __device__ __forceinline__ int3 operator+ (int a, const int3& b) { return make_int3(a + b.x, a + b.y, a + b.z); }
105
+ static __device__ __forceinline__ int3 operator- (int a, const int3& b) { return make_int3(a - b.x, a - b.y, a - b.z); }
106
+ static __device__ __forceinline__ int3 operator- (const int3& a) { return make_int3(-a.x, -a.y, -a.z); }
107
+ static __device__ __forceinline__ int4& operator*= (int4& a, const int4& b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; return a; }
108
+ static __device__ __forceinline__ int4& operator+= (int4& a, const int4& b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; return a; }
109
+ static __device__ __forceinline__ int4& operator-= (int4& a, const int4& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; return a; }
110
+ static __device__ __forceinline__ int4& operator*= (int4& a, int b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; return a; }
111
+ static __device__ __forceinline__ int4& operator+= (int4& a, int b) { a.x += b; a.y += b; a.z += b; a.w += b; return a; }
112
+ static __device__ __forceinline__ int4& operator-= (int4& a, int b) { a.x -= b; a.y -= b; a.z -= b; a.w -= b; return a; }
113
+ static __device__ __forceinline__ int4 operator* (const int4& a, const int4& b) { return make_int4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }
114
+ static __device__ __forceinline__ int4 operator+ (const int4& a, const int4& b) { return make_int4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); }
115
+ static __device__ __forceinline__ int4 operator- (const int4& a, const int4& b) { return make_int4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); }
116
+ static __device__ __forceinline__ int4 operator* (const int4& a, int b) { return make_int4(a.x * b, a.y * b, a.z * b, a.w * b); }
117
+ static __device__ __forceinline__ int4 operator+ (const int4& a, int b) { return make_int4(a.x + b, a.y + b, a.z + b, a.w + b); }
118
+ static __device__ __forceinline__ int4 operator- (const int4& a, int b) { return make_int4(a.x - b, a.y - b, a.z - b, a.w - b); }
119
+ static __device__ __forceinline__ int4 operator* (int a, const int4& b) { return make_int4(a * b.x, a * b.y, a * b.z, a * b.w); }
120
+ static __device__ __forceinline__ int4 operator+ (int a, const int4& b) { return make_int4(a + b.x, a + b.y, a + b.z, a + b.w); }
121
+ static __device__ __forceinline__ int4 operator- (int a, const int4& b) { return make_int4(a - b.x, a - b.y, a - b.z, a - b.w); }
122
+ static __device__ __forceinline__ int4 operator- (const int4& a) { return make_int4(-a.x, -a.y, -a.z, -a.w); }
123
+ static __device__ __forceinline__ uint2& operator*= (uint2& a, const uint2& b) { a.x *= b.x; a.y *= b.y; return a; }
124
+ static __device__ __forceinline__ uint2& operator+= (uint2& a, const uint2& b) { a.x += b.x; a.y += b.y; return a; }
125
+ static __device__ __forceinline__ uint2& operator-= (uint2& a, const uint2& b) { a.x -= b.x; a.y -= b.y; return a; }
126
+ static __device__ __forceinline__ uint2& operator*= (uint2& a, unsigned int b) { a.x *= b; a.y *= b; return a; }
127
+ static __device__ __forceinline__ uint2& operator+= (uint2& a, unsigned int b) { a.x += b; a.y += b; return a; }
128
+ static __device__ __forceinline__ uint2& operator-= (uint2& a, unsigned int b) { a.x -= b; a.y -= b; return a; }
129
+ static __device__ __forceinline__ uint2 operator* (const uint2& a, const uint2& b) { return make_uint2(a.x * b.x, a.y * b.y); }
130
+ static __device__ __forceinline__ uint2 operator+ (const uint2& a, const uint2& b) { return make_uint2(a.x + b.x, a.y + b.y); }
131
+ static __device__ __forceinline__ uint2 operator- (const uint2& a, const uint2& b) { return make_uint2(a.x - b.x, a.y - b.y); }
132
+ static __device__ __forceinline__ uint2 operator* (const uint2& a, unsigned int b) { return make_uint2(a.x * b, a.y * b); }
133
+ static __device__ __forceinline__ uint2 operator+ (const uint2& a, unsigned int b) { return make_uint2(a.x + b, a.y + b); }
134
+ static __device__ __forceinline__ uint2 operator- (const uint2& a, unsigned int b) { return make_uint2(a.x - b, a.y - b); }
135
+ static __device__ __forceinline__ uint2 operator* (unsigned int a, const uint2& b) { return make_uint2(a * b.x, a * b.y); }
136
+ static __device__ __forceinline__ uint2 operator+ (unsigned int a, const uint2& b) { return make_uint2(a + b.x, a + b.y); }
137
+ static __device__ __forceinline__ uint2 operator- (unsigned int a, const uint2& b) { return make_uint2(a - b.x, a - b.y); }
138
+ static __device__ __forceinline__ uint3& operator*= (uint3& a, const uint3& b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; return a; }
139
+ static __device__ __forceinline__ uint3& operator+= (uint3& a, const uint3& b) { a.x += b.x; a.y += b.y; a.z += b.z; return a; }
140
+ static __device__ __forceinline__ uint3& operator-= (uint3& a, const uint3& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; return a; }
141
+ static __device__ __forceinline__ uint3& operator*= (uint3& a, unsigned int b) { a.x *= b; a.y *= b; a.z *= b; return a; }
142
+ static __device__ __forceinline__ uint3& operator+= (uint3& a, unsigned int b) { a.x += b; a.y += b; a.z += b; return a; }
143
+ static __device__ __forceinline__ uint3& operator-= (uint3& a, unsigned int b) { a.x -= b; a.y -= b; a.z -= b; return a; }
144
+ static __device__ __forceinline__ uint3 operator* (const uint3& a, const uint3& b) { return make_uint3(a.x * b.x, a.y * b.y, a.z * b.z); }
145
+ static __device__ __forceinline__ uint3 operator+ (const uint3& a, const uint3& b) { return make_uint3(a.x + b.x, a.y + b.y, a.z + b.z); }
146
+ static __device__ __forceinline__ uint3 operator- (const uint3& a, const uint3& b) { return make_uint3(a.x - b.x, a.y - b.y, a.z - b.z); }
147
+ static __device__ __forceinline__ uint3 operator* (const uint3& a, unsigned int b) { return make_uint3(a.x * b, a.y * b, a.z * b); }
148
+ static __device__ __forceinline__ uint3 operator+ (const uint3& a, unsigned int b) { return make_uint3(a.x + b, a.y + b, a.z + b); }
149
+ static __device__ __forceinline__ uint3 operator- (const uint3& a, unsigned int b) { return make_uint3(a.x - b, a.y - b, a.z - b); }
150
+ static __device__ __forceinline__ uint3 operator* (unsigned int a, const uint3& b) { return make_uint3(a * b.x, a * b.y, a * b.z); }
151
+ static __device__ __forceinline__ uint3 operator+ (unsigned int a, const uint3& b) { return make_uint3(a + b.x, a + b.y, a + b.z); }
152
+ static __device__ __forceinline__ uint3 operator- (unsigned int a, const uint3& b) { return make_uint3(a - b.x, a - b.y, a - b.z); }
153
+ static __device__ __forceinline__ uint4& operator*= (uint4& a, const uint4& b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; return a; }
154
+ static __device__ __forceinline__ uint4& operator+= (uint4& a, const uint4& b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; return a; }
155
+ static __device__ __forceinline__ uint4& operator-= (uint4& a, const uint4& b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; return a; }
156
+ static __device__ __forceinline__ uint4& operator*= (uint4& a, unsigned int b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; return a; }
157
+ static __device__ __forceinline__ uint4& operator+= (uint4& a, unsigned int b) { a.x += b; a.y += b; a.z += b; a.w += b; return a; }
158
+ static __device__ __forceinline__ uint4& operator-= (uint4& a, unsigned int b) { a.x -= b; a.y -= b; a.z -= b; a.w -= b; return a; }
159
+ static __device__ __forceinline__ uint4 operator* (const uint4& a, const uint4& b) { return make_uint4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); }
160
+ static __device__ __forceinline__ uint4 operator+ (const uint4& a, const uint4& b) { return make_uint4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); }
161
+ static __device__ __forceinline__ uint4 operator- (const uint4& a, const uint4& b) { return make_uint4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); }
162
+ static __device__ __forceinline__ uint4 operator* (const uint4& a, unsigned int b) { return make_uint4(a.x * b, a.y * b, a.z * b, a.w * b); }
163
+ static __device__ __forceinline__ uint4 operator+ (const uint4& a, unsigned int b) { return make_uint4(a.x + b, a.y + b, a.z + b, a.w + b); }
164
+ static __device__ __forceinline__ uint4 operator- (const uint4& a, unsigned int b) { return make_uint4(a.x - b, a.y - b, a.z - b, a.w - b); }
165
+ static __device__ __forceinline__ uint4 operator* (unsigned int a, const uint4& b) { return make_uint4(a * b.x, a * b.y, a * b.z, a * b.w); }
166
+ static __device__ __forceinline__ uint4 operator+ (unsigned int a, const uint4& b) { return make_uint4(a + b.x, a + b.y, a + b.z, a + b.w); }
167
+ static __device__ __forceinline__ uint4 operator- (unsigned int a, const uint4& b) { return make_uint4(a - b.x, a - b.y, a - b.z, a - b.w); }
168
+
169
+ template<class T> static __device__ __forceinline__ T zero_value(void);
170
+ template<> __device__ __forceinline__ float zero_value<float> (void) { return 0.f; }
171
+ template<> __device__ __forceinline__ float2 zero_value<float2>(void) { return make_float2(0.f, 0.f); }
172
+ template<> __device__ __forceinline__ float4 zero_value<float4>(void) { return make_float4(0.f, 0.f, 0.f, 0.f); }
173
+ static __device__ __forceinline__ float3 make_float3(const float2& a, float b) { return make_float3(a.x, a.y, b); }
174
+ static __device__ __forceinline__ float4 make_float4(const float3& a, float b) { return make_float4(a.x, a.y, a.z, b); }
175
+ static __device__ __forceinline__ float4 make_float4(const float2& a, const float2& b) { return make_float4(a.x, a.y, b.x, b.y); }
176
+ static __device__ __forceinline__ int3 make_int3(const int2& a, int b) { return make_int3(a.x, a.y, b); }
177
+ static __device__ __forceinline__ int4 make_int4(const int3& a, int b) { return make_int4(a.x, a.y, a.z, b); }
178
+ static __device__ __forceinline__ int4 make_int4(const int2& a, const int2& b) { return make_int4(a.x, a.y, b.x, b.y); }
179
+ static __device__ __forceinline__ uint3 make_uint3(const uint2& a, unsigned int b) { return make_uint3(a.x, a.y, b); }
180
+ static __device__ __forceinline__ uint4 make_uint4(const uint3& a, unsigned int b) { return make_uint4(a.x, a.y, a.z, b); }
181
+ static __device__ __forceinline__ uint4 make_uint4(const uint2& a, const uint2& b) { return make_uint4(a.x, a.y, b.x, b.y); }
182
+
183
+ template<class T> static __device__ __forceinline__ void swap(T& a, T& b) { T temp = a; a = b; b = temp; }
184
+
185
+ //------------------------------------------------------------------------
186
+ // Triangle ID <-> float32 conversion functions to support very large triangle IDs.
187
+ //
188
+ // Values up to and including 16777216 (also, negative values) are converted trivially and retain
189
+ // compatibility with previous versions. Larger values are mapped to unique float32 that are not equal to
190
+ // the ID. The largest value that converts to float32 and back without generating inf or nan is 889192447.
191
+
192
+ static __device__ __forceinline__ int float_to_triidx(float x) { if (x <= 16777216.f) return (int)x; return __float_as_int(x) - 0x4a800000; }
193
+ static __device__ __forceinline__ float triidx_to_float(int x) { if (x <= 0x01000000) return (float)x; return __int_as_float(0x4a800000 + x); }
194
+
195
+ //------------------------------------------------------------------------
196
+ // Coalesced atomics. These are all done via macros.
197
+
198
+ #if __CUDA_ARCH__ >= 700 // Warp match instruction __match_any_sync() is only available on compute capability 7.x and higher
199
+
200
+ #define CA_TEMP _ca_temp
201
+ #define CA_TEMP_PARAM float* CA_TEMP
202
+ #define CA_DECLARE_TEMP(threads_per_block) \
203
+ __shared__ float CA_TEMP[(threads_per_block)]
204
+
205
+ #define CA_SET_GROUP_MASK(group, thread_mask) \
206
+ bool _ca_leader; \
207
+ float* _ca_ptr; \
208
+ do { \
209
+ int tidx = threadIdx.x + blockDim.x * threadIdx.y; \
210
+ int lane = tidx & 31; \
211
+ int warp = tidx >> 5; \
212
+ int tmask = __match_any_sync((thread_mask), (group)); \
213
+ int leader = __ffs(tmask) - 1; \
214
+ _ca_leader = (leader == lane); \
215
+ _ca_ptr = &_ca_temp[((warp << 5) + leader)]; \
216
+ } while(0)
217
+
218
+ #define CA_SET_GROUP(group) \
219
+ CA_SET_GROUP_MASK((group), 0xffffffffu)
220
+
221
+ #define caAtomicAdd(ptr, value) \
222
+ do { \
223
+ if (_ca_leader) \
224
+ *_ca_ptr = 0.f; \
225
+ atomicAdd(_ca_ptr, (value)); \
226
+ if (_ca_leader) \
227
+ atomicAdd((ptr), *_ca_ptr); \
228
+ } while(0)
229
+
230
+ #define caAtomicAdd3_xyw(ptr, x, y, w) \
231
+ do { \
232
+ caAtomicAdd((ptr), (x)); \
233
+ caAtomicAdd((ptr)+1, (y)); \
234
+ caAtomicAdd((ptr)+3, (w)); \
235
+ } while(0)
236
+
237
+ #define caAtomicAddTexture(ptr, level, idx, value) \
238
+ do { \
239
+ CA_SET_GROUP((idx) ^ ((level) << 27)); \
240
+ caAtomicAdd((ptr)+(idx), (value)); \
241
+ } while(0)
242
+
243
+ //------------------------------------------------------------------------
244
+ // Disable atomic coalescing for compute capability lower than 7.x
245
+
246
+ #else // __CUDA_ARCH__ >= 700
247
+ #define CA_TEMP _ca_temp
248
+ #define CA_TEMP_PARAM float CA_TEMP
249
+ #define CA_DECLARE_TEMP(threads_per_block) CA_TEMP_PARAM
250
+ #define CA_SET_GROUP_MASK(group, thread_mask)
251
+ #define CA_SET_GROUP(group)
252
+ #define caAtomicAdd(ptr, value) atomicAdd((ptr), (value))
253
+ #define caAtomicAdd3_xyw(ptr, x, y, w) \
254
+ do { \
255
+ atomicAdd((ptr), (x)); \
256
+ atomicAdd((ptr)+1, (y)); \
257
+ atomicAdd((ptr)+3, (w)); \
258
+ } while(0)
259
+ #define caAtomicAddTexture(ptr, level, idx, value) atomicAdd((ptr)+(idx), (value))
260
+ #endif // __CUDA_ARCH__ >= 700
261
+
262
+ //------------------------------------------------------------------------
263
+ #endif // __CUDACC__
extensions/nvdiffrast/nvdiffrast/common/cudaraster/CudaRaster.hpp ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+
11
+ //------------------------------------------------------------------------
12
+ // This is a slimmed-down and modernized version of the original
13
+ // CudaRaster codebase that accompanied the HPG 2011 paper
14
+ // "High-Performance Software Rasterization on GPUs" by Laine and Karras.
15
+ // Modifications have been made to accommodate post-Volta execution model
16
+ // with warp divergence. Support for shading, blending, quad rendering,
17
+ // and supersampling have been removed as unnecessary for nvdiffrast.
18
+ //------------------------------------------------------------------------
19
+
20
+ namespace CR
21
+ {
22
+
23
+ class RasterImpl;
24
+
25
+ //------------------------------------------------------------------------
26
+ // Interface class to isolate user from implementation details.
27
+ //------------------------------------------------------------------------
28
+
29
+ class CudaRaster
30
+ {
31
+ public:
32
+ enum
33
+ {
34
+ RenderModeFlag_EnableBackfaceCulling = 1 << 0, // Enable backface culling.
35
+ RenderModeFlag_EnableDepthPeeling = 1 << 1, // Enable depth peeling. Must have a peel buffer set.
36
+ };
37
+
38
+ public:
39
+ CudaRaster (void);
40
+ ~CudaRaster (void);
41
+
42
+ void setBufferSize (int width, int height, int numImages); // Width and height are internally rounded up to multiples of tile size (8x8) for buffer sizes.
43
+ void setViewport (int width, int height, int offsetX, int offsetY); // Tiled rendering viewport setup.
44
+ void setRenderModeFlags (unsigned int renderModeFlags); // Affects all subsequent calls to drawTriangles(). Defaults to zero.
45
+ void deferredClear (unsigned int clearColor); // Clears color and depth buffers during next call to drawTriangles().
46
+ void setVertexBuffer (void* vertices, int numVertices); // GPU pointer managed by caller. Vertex positions in clip space as float4 (x, y, z, w).
47
+ void setIndexBuffer (void* indices, int numTriangles); // GPU pointer managed by caller. Triangle index+color quadruplets as uint4 (idx0, idx1, idx2, color).
48
+ bool drawTriangles (const int* ranges, bool peel, cudaStream_t stream); // Ranges (offsets and counts) as #triangles entries, not as bytes. If NULL, draw all triangles. Returns false in case of internal overflow.
49
+ void* getColorBuffer (void); // GPU pointer managed by CudaRaster.
50
+ void* getDepthBuffer (void); // GPU pointer managed by CudaRaster.
51
+ void swapDepthAndPeel (void); // Swap depth and peeling buffers.
52
+
53
+ private:
54
+ CudaRaster (const CudaRaster&); // forbidden
55
+ CudaRaster& operator= (const CudaRaster&); // forbidden
56
+
57
+ private:
58
+ RasterImpl* m_impl; // Opaque pointer to implementation.
59
+ };
60
+
61
+ //------------------------------------------------------------------------
62
+ } // namespace CR
63
+
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/BinRaster.inl ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ //------------------------------------------------------------------------
10
+
11
+ __device__ __inline__ void binRasterImpl(const CRParams p)
12
+ {
13
+ __shared__ volatile U32 s_broadcast [CR_BIN_WARPS + 16];
14
+ __shared__ volatile S32 s_outOfs [CR_MAXBINS_SQR];
15
+ __shared__ volatile S32 s_outTotal [CR_MAXBINS_SQR];
16
+ __shared__ volatile S32 s_overIndex [CR_MAXBINS_SQR];
17
+ __shared__ volatile S32 s_outMask [CR_BIN_WARPS][CR_MAXBINS_SQR + 1]; // +1 to avoid bank collisions
18
+ __shared__ volatile S32 s_outCount [CR_BIN_WARPS][CR_MAXBINS_SQR + 1]; // +1 to avoid bank collisions
19
+ __shared__ volatile S32 s_triBuf [CR_BIN_WARPS*32*4]; // triangle ring buffer
20
+ __shared__ volatile U32 s_batchPos;
21
+ __shared__ volatile U32 s_bufCount;
22
+ __shared__ volatile U32 s_overTotal;
23
+ __shared__ volatile U32 s_allocBase;
24
+
25
+ const CRImageParams& ip = getImageParams(p, blockIdx.z);
26
+ CRAtomics& atomics = p.atomics[blockIdx.z];
27
+ const U8* triSubtris = (const U8*)p.triSubtris + p.maxSubtris * blockIdx.z;
28
+ const CRTriangleHeader* triHeader = (const CRTriangleHeader*)p.triHeader + p.maxSubtris * blockIdx.z;
29
+
30
+ S32* binFirstSeg = (S32*)p.binFirstSeg + CR_MAXBINS_SQR * CR_BIN_STREAMS_SIZE * blockIdx.z;
31
+ S32* binTotal = (S32*)p.binTotal + CR_MAXBINS_SQR * CR_BIN_STREAMS_SIZE * blockIdx.z;
32
+ S32* binSegData = (S32*)p.binSegData + p.maxBinSegs * CR_BIN_SEG_SIZE * blockIdx.z;
33
+ S32* binSegNext = (S32*)p.binSegNext + p.maxBinSegs * blockIdx.z;
34
+ S32* binSegCount = (S32*)p.binSegCount + p.maxBinSegs * blockIdx.z;
35
+
36
+ if (atomics.numSubtris > p.maxSubtris)
37
+ return;
38
+
39
+ // per-thread state
40
+ int thrInBlock = threadIdx.x + threadIdx.y * 32;
41
+ int batchPos = 0;
42
+
43
+ // first 16 elements of s_broadcast are always zero
44
+ if (thrInBlock < 16)
45
+ s_broadcast[thrInBlock] = 0;
46
+
47
+ // initialize output linked lists and offsets
48
+ if (thrInBlock < p.numBins)
49
+ {
50
+ binFirstSeg[(thrInBlock << CR_BIN_STREAMS_LOG2) + blockIdx.x] = -1;
51
+ s_outOfs[thrInBlock] = -CR_BIN_SEG_SIZE;
52
+ s_outTotal[thrInBlock] = 0;
53
+ }
54
+
55
+ // repeat until done
56
+ for(;;)
57
+ {
58
+ // get batch
59
+ if (thrInBlock == 0)
60
+ s_batchPos = atomicAdd(&atomics.binCounter, ip.binBatchSize);
61
+ __syncthreads();
62
+ batchPos = s_batchPos;
63
+
64
+ // all batches done?
65
+ if (batchPos >= ip.triCount)
66
+ break;
67
+
68
+ // per-thread state
69
+ int bufIndex = 0;
70
+ int bufCount = 0;
71
+ int batchEnd = min(batchPos + ip.binBatchSize, ip.triCount);
72
+
73
+ // loop over batch as long as we have triangles in it
74
+ do
75
+ {
76
+ // read more triangles
77
+ while (bufCount < CR_BIN_WARPS*32 && batchPos < batchEnd)
78
+ {
79
+ // get subtriangle count
80
+
81
+ int triIdx = batchPos + thrInBlock;
82
+ int num = 0;
83
+ if (triIdx < batchEnd)
84
+ num = triSubtris[triIdx];
85
+
86
+ // cumulative sum of subtriangles within each warp
87
+ U32 myIdx = __popc(__ballot_sync(~0u, num & 1) & getLaneMaskLt());
88
+ if (__any_sync(~0u, num > 1))
89
+ {
90
+ myIdx += __popc(__ballot_sync(~0u, num & 2) & getLaneMaskLt()) * 2;
91
+ myIdx += __popc(__ballot_sync(~0u, num & 4) & getLaneMaskLt()) * 4;
92
+ }
93
+ if (threadIdx.x == 31) // Do not assume that last thread in warp wins the write.
94
+ s_broadcast[threadIdx.y + 16] = myIdx + num;
95
+ __syncthreads();
96
+
97
+ // cumulative sum of per-warp subtriangle counts
98
+ // Note: cannot have more than 32 warps or this needs to sync between each step.
99
+ bool act = (thrInBlock < CR_BIN_WARPS);
100
+ U32 actMask = __ballot_sync(~0u, act);
101
+ if (threadIdx.y == 0 && act)
102
+ {
103
+ volatile U32* ptr = &s_broadcast[thrInBlock + 16];
104
+ U32 val = *ptr;
105
+ #if (CR_BIN_WARPS > 1)
106
+ val += ptr[-1]; __syncwarp(actMask);
107
+ *ptr = val; __syncwarp(actMask);
108
+ #endif
109
+ #if (CR_BIN_WARPS > 2)
110
+ val += ptr[-2]; __syncwarp(actMask);
111
+ *ptr = val; __syncwarp(actMask);
112
+ #endif
113
+ #if (CR_BIN_WARPS > 4)
114
+ val += ptr[-4]; __syncwarp(actMask);
115
+ *ptr = val; __syncwarp(actMask);
116
+ #endif
117
+ #if (CR_BIN_WARPS > 8)
118
+ val += ptr[-8]; __syncwarp(actMask);
119
+ *ptr = val; __syncwarp(actMask);
120
+ #endif
121
+ #if (CR_BIN_WARPS > 16)
122
+ val += ptr[-16]; __syncwarp(actMask);
123
+ *ptr = val; __syncwarp(actMask);
124
+ #endif
125
+
126
+ // initially assume that we consume everything
127
+ // only last active thread does the writes
128
+ if (threadIdx.x == CR_BIN_WARPS - 1)
129
+ {
130
+ s_batchPos = batchPos + CR_BIN_WARPS * 32;
131
+ s_bufCount = bufCount + val;
132
+ }
133
+ }
134
+ __syncthreads();
135
+
136
+ // skip if no subtriangles
137
+ if (num)
138
+ {
139
+ // calculate write position for first subtriangle
140
+ U32 pos = bufCount + myIdx + s_broadcast[threadIdx.y + 16 - 1];
141
+
142
+ // only write if entire triangle fits
143
+ if (pos + num <= CR_ARRAY_SIZE(s_triBuf))
144
+ {
145
+ pos += bufIndex; // adjust for current start position
146
+ pos &= CR_ARRAY_SIZE(s_triBuf)-1;
147
+ if (num == 1)
148
+ s_triBuf[pos] = triIdx * 8 + 7; // single triangle
149
+ else
150
+ {
151
+ for (int i=0; i < num; i++)
152
+ {
153
+ s_triBuf[pos] = triIdx * 8 + i;
154
+ pos++;
155
+ pos &= CR_ARRAY_SIZE(s_triBuf)-1;
156
+ }
157
+ }
158
+ } else if (pos <= CR_ARRAY_SIZE(s_triBuf))
159
+ {
160
+ // this triangle is the first that failed, overwrite total count and triangle count
161
+ s_batchPos = batchPos + thrInBlock;
162
+ s_bufCount = pos;
163
+ }
164
+ }
165
+
166
+ // update triangle counts
167
+ __syncthreads();
168
+ batchPos = s_batchPos;
169
+ bufCount = s_bufCount;
170
+ }
171
+
172
+ // make every warp clear its output buffers
173
+ for (int i=threadIdx.x; i < p.numBins; i += 32)
174
+ s_outMask[threadIdx.y][i] = 0;
175
+ __syncwarp();
176
+
177
+ // choose our triangle
178
+ uint4 triData = make_uint4(0, 0, 0, 0);
179
+ if (thrInBlock < bufCount)
180
+ {
181
+ U32 triPos = bufIndex + thrInBlock;
182
+ triPos &= CR_ARRAY_SIZE(s_triBuf)-1;
183
+
184
+ // find triangle
185
+ int triIdx = s_triBuf[triPos];
186
+ int dataIdx = triIdx >> 3;
187
+ int subtriIdx = triIdx & 7;
188
+ if (subtriIdx != 7)
189
+ dataIdx = triHeader[dataIdx].misc + subtriIdx;
190
+
191
+ // read triangle
192
+
193
+ triData = *(((const uint4*)triHeader) + dataIdx);
194
+ }
195
+
196
+ // setup bounding box and edge functions, and rasterize
197
+ S32 lox, loy, hix, hiy;
198
+ bool hasTri = (thrInBlock < bufCount);
199
+ U32 hasTriMask = __ballot_sync(~0u, hasTri);
200
+ if (hasTri)
201
+ {
202
+ S32 v0x = add_s16lo_s16lo(triData.x, p.widthPixelsVp * (CR_SUBPIXEL_SIZE >> 1));
203
+ S32 v0y = add_s16hi_s16lo(triData.x, p.heightPixelsVp * (CR_SUBPIXEL_SIZE >> 1));
204
+ S32 d01x = sub_s16lo_s16lo(triData.y, triData.x);
205
+ S32 d01y = sub_s16hi_s16hi(triData.y, triData.x);
206
+ S32 d02x = sub_s16lo_s16lo(triData.z, triData.x);
207
+ S32 d02y = sub_s16hi_s16hi(triData.z, triData.x);
208
+ int binLog = CR_BIN_LOG2 + CR_TILE_LOG2 + CR_SUBPIXEL_LOG2;
209
+ lox = add_clamp_0_x((v0x + min_min(d01x, 0, d02x)) >> binLog, 0, p.widthBins - 1);
210
+ loy = add_clamp_0_x((v0y + min_min(d01y, 0, d02y)) >> binLog, 0, p.heightBins - 1);
211
+ hix = add_clamp_0_x((v0x + max_max(d01x, 0, d02x)) >> binLog, 0, p.widthBins - 1);
212
+ hiy = add_clamp_0_x((v0y + max_max(d01y, 0, d02y)) >> binLog, 0, p.heightBins - 1);
213
+
214
+ U32 bit = 1 << threadIdx.x;
215
+ #if __CUDA_ARCH__ >= 700
216
+ bool multi = (hix != lox || hiy != loy);
217
+ if (!__any_sync(hasTriMask, multi))
218
+ {
219
+ int binIdx = lox + p.widthBins * loy;
220
+ U32 mask = __match_any_sync(hasTriMask, binIdx);
221
+ s_outMask[threadIdx.y][binIdx] = mask;
222
+ __syncwarp(hasTriMask);
223
+ } else
224
+ #endif
225
+ {
226
+ bool complex = (hix > lox+1 || hiy > loy+1);
227
+ if (!__any_sync(hasTriMask, complex))
228
+ {
229
+ int binIdx = lox + p.widthBins * loy;
230
+ atomicOr((U32*)&s_outMask[threadIdx.y][binIdx], bit);
231
+ if (hix > lox) atomicOr((U32*)&s_outMask[threadIdx.y][binIdx + 1], bit);
232
+ if (hiy > loy) atomicOr((U32*)&s_outMask[threadIdx.y][binIdx + p.widthBins], bit);
233
+ if (hix > lox && hiy > loy) atomicOr((U32*)&s_outMask[threadIdx.y][binIdx + p.widthBins + 1], bit);
234
+ } else
235
+ {
236
+ S32 d12x = d02x - d01x, d12y = d02y - d01y;
237
+ v0x -= lox << binLog, v0y -= loy << binLog;
238
+
239
+ S32 t01 = v0x * d01y - v0y * d01x;
240
+ S32 t02 = v0y * d02x - v0x * d02y;
241
+ S32 t12 = d01x * d12y - d01y * d12x - t01 - t02;
242
+ S32 b01 = add_sub(t01 >> binLog, max(d01x, 0), min(d01y, 0));
243
+ S32 b02 = add_sub(t02 >> binLog, max(d02y, 0), min(d02x, 0));
244
+ S32 b12 = add_sub(t12 >> binLog, max(d12x, 0), min(d12y, 0));
245
+
246
+ int width = hix - lox + 1;
247
+ d01x += width * d01y;
248
+ d02x += width * d02y;
249
+ d12x += width * d12y;
250
+
251
+ U8* currPtr = (U8*)&s_outMask[threadIdx.y][lox + loy * p.widthBins];
252
+ U8* skipPtr = (U8*)&s_outMask[threadIdx.y][(hix + 1) + loy * p.widthBins];
253
+ U8* endPtr = (U8*)&s_outMask[threadIdx.y][lox + (hiy + 1) * p.widthBins];
254
+ int stride = p.widthBins * 4;
255
+ int ptrYInc = stride - width * 4;
256
+
257
+ do
258
+ {
259
+ if (b01 >= 0 && b02 >= 0 && b12 >= 0)
260
+ atomicOr((U32*)currPtr, bit);
261
+ currPtr += 4, b01 -= d01y, b02 += d02y, b12 -= d12y;
262
+ if (currPtr == skipPtr)
263
+ currPtr += ptrYInc, b01 += d01x, b02 -= d02x, b12 += d12x, skipPtr += stride;
264
+ }
265
+ while (currPtr != endPtr);
266
+ }
267
+ }
268
+ }
269
+
270
+ // count per-bin contributions
271
+ if (thrInBlock == 0)
272
+ s_overTotal = 0; // overflow counter
273
+
274
+ // ensure that out masks are done
275
+ __syncthreads();
276
+
277
+ int overIndex = -1;
278
+ bool act = (thrInBlock < p.numBins);
279
+ U32 actMask = __ballot_sync(~0u, act);
280
+ if (act)
281
+ {
282
+ U8* srcPtr = (U8*)&s_outMask[0][thrInBlock];
283
+ U8* dstPtr = (U8*)&s_outCount[0][thrInBlock];
284
+ int total = 0;
285
+ for (int i = 0; i < CR_BIN_WARPS; i++)
286
+ {
287
+ total += __popc(*(U32*)srcPtr);
288
+ *(U32*)dstPtr = total;
289
+ srcPtr += (CR_MAXBINS_SQR + 1) * 4;
290
+ dstPtr += (CR_MAXBINS_SQR + 1) * 4;
291
+ }
292
+
293
+ // overflow => request a new segment
294
+ int ofs = s_outOfs[thrInBlock];
295
+ bool ovr = (((ofs - 1) >> CR_BIN_SEG_LOG2) != (((ofs - 1) + total) >> CR_BIN_SEG_LOG2));
296
+ U32 ovrMask = __ballot_sync(actMask, ovr);
297
+ if (ovr)
298
+ {
299
+ overIndex = __popc(ovrMask & getLaneMaskLt());
300
+ if (overIndex == 0)
301
+ s_broadcast[threadIdx.y + 16] = atomicAdd((U32*)&s_overTotal, __popc(ovrMask));
302
+ __syncwarp(ovrMask);
303
+ overIndex += s_broadcast[threadIdx.y + 16];
304
+ s_overIndex[thrInBlock] = overIndex;
305
+ }
306
+ }
307
+
308
+ // sync after overTotal is ready
309
+ __syncthreads();
310
+
311
+ // at least one segment overflowed => allocate segments
312
+ U32 overTotal = s_overTotal;
313
+ U32 allocBase = 0;
314
+ if (overTotal > 0)
315
+ {
316
+ // allocate memory
317
+ if (thrInBlock == 0)
318
+ {
319
+ U32 allocBase = atomicAdd(&atomics.numBinSegs, overTotal);
320
+ s_allocBase = (allocBase + overTotal <= p.maxBinSegs) ? allocBase : 0;
321
+ }
322
+ __syncthreads();
323
+ allocBase = s_allocBase;
324
+
325
+ // did my bin overflow?
326
+ if (overIndex != -1)
327
+ {
328
+ // calculate new segment index
329
+ int segIdx = allocBase + overIndex;
330
+
331
+ // add to linked list
332
+ if (s_outOfs[thrInBlock] < 0)
333
+ binFirstSeg[(thrInBlock << CR_BIN_STREAMS_LOG2) + blockIdx.x] = segIdx;
334
+ else
335
+ binSegNext[(s_outOfs[thrInBlock] - 1) >> CR_BIN_SEG_LOG2] = segIdx;
336
+
337
+ // defaults
338
+ binSegNext [segIdx] = -1;
339
+ binSegCount[segIdx] = CR_BIN_SEG_SIZE;
340
+ }
341
+ }
342
+
343
+ // concurrent emission -- each warp handles its own triangle
344
+ if (thrInBlock < bufCount)
345
+ {
346
+ int triPos = (bufIndex + thrInBlock) & (CR_ARRAY_SIZE(s_triBuf) - 1);
347
+ int currBin = lox + loy * p.widthBins;
348
+ int skipBin = (hix + 1) + loy * p.widthBins;
349
+ int endBin = lox + (hiy + 1) * p.widthBins;
350
+ int binYInc = p.widthBins - (hix - lox + 1);
351
+
352
+ // loop over triangle's bins
353
+ do
354
+ {
355
+ U32 outMask = s_outMask[threadIdx.y][currBin];
356
+ if (outMask & (1<<threadIdx.x))
357
+ {
358
+ int idx = __popc(outMask & getLaneMaskLt());
359
+ if (threadIdx.y > 0)
360
+ idx += s_outCount[threadIdx.y-1][currBin];
361
+
362
+ int base = s_outOfs[currBin];
363
+ int free = (-base) & (CR_BIN_SEG_SIZE - 1);
364
+ if (idx >= free)
365
+ idx += ((allocBase + s_overIndex[currBin]) << CR_BIN_SEG_LOG2) - free;
366
+ else
367
+ idx += base;
368
+
369
+ binSegData[idx] = s_triBuf[triPos];
370
+ }
371
+
372
+ currBin++;
373
+ if (currBin == skipBin)
374
+ currBin += binYInc, skipBin += p.widthBins;
375
+ }
376
+ while (currBin != endBin);
377
+ }
378
+
379
+ // wait all triangles to finish, then replace overflown segment offsets
380
+ __syncthreads();
381
+ if (thrInBlock < p.numBins)
382
+ {
383
+ U32 total = s_outCount[CR_BIN_WARPS - 1][thrInBlock];
384
+ U32 oldOfs = s_outOfs[thrInBlock];
385
+ if (overIndex == -1)
386
+ s_outOfs[thrInBlock] = oldOfs + total;
387
+ else
388
+ {
389
+ int addr = oldOfs + total;
390
+ addr = ((addr - 1) & (CR_BIN_SEG_SIZE - 1)) + 1;
391
+ addr += (allocBase + overIndex) << CR_BIN_SEG_LOG2;
392
+ s_outOfs[thrInBlock] = addr;
393
+ }
394
+ s_outTotal[thrInBlock] += total;
395
+ }
396
+
397
+ // these triangles are now done
398
+ int count = ::min(bufCount, CR_BIN_WARPS * 32);
399
+ bufCount -= count;
400
+ bufIndex += count;
401
+ bufIndex &= CR_ARRAY_SIZE(s_triBuf)-1;
402
+ }
403
+ while (bufCount > 0 || batchPos < batchEnd);
404
+
405
+ // flush all bins
406
+ if (thrInBlock < p.numBins)
407
+ {
408
+ int ofs = s_outOfs[thrInBlock];
409
+ if (ofs & (CR_BIN_SEG_SIZE-1))
410
+ {
411
+ int seg = ofs >> CR_BIN_SEG_LOG2;
412
+ binSegCount[seg] = ofs & (CR_BIN_SEG_SIZE-1);
413
+ s_outOfs[thrInBlock] = (ofs + CR_BIN_SEG_SIZE - 1) & -CR_BIN_SEG_SIZE;
414
+ }
415
+ }
416
+ }
417
+
418
+ // output totals
419
+ if (thrInBlock < p.numBins)
420
+ binTotal[(thrInBlock << CR_BIN_STREAMS_LOG2) + blockIdx.x] = s_outTotal[thrInBlock];
421
+ }
422
+
423
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Buffer.cpp ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "../../framework.h"
10
+ #include "Buffer.hpp"
11
+
12
+ using namespace CR;
13
+
14
+ //------------------------------------------------------------------------
15
+ // GPU buffer.
16
+ //------------------------------------------------------------------------
17
+
18
+ Buffer::Buffer(void)
19
+ : m_gpuPtr(NULL),
20
+ m_bytes (0)
21
+ {
22
+ // empty
23
+ }
24
+
25
+ Buffer::~Buffer(void)
26
+ {
27
+ if (m_gpuPtr)
28
+ cudaFree(m_gpuPtr); // Don't throw an exception.
29
+ }
30
+
31
+ void Buffer::reset(size_t bytes)
32
+ {
33
+ if (bytes == m_bytes)
34
+ return;
35
+
36
+ if (m_gpuPtr)
37
+ {
38
+ NVDR_CHECK_CUDA_ERROR(cudaFree(m_gpuPtr));
39
+ m_gpuPtr = NULL;
40
+ }
41
+
42
+ if (bytes > 0)
43
+ NVDR_CHECK_CUDA_ERROR(cudaMalloc(&m_gpuPtr, bytes));
44
+
45
+ m_bytes = bytes;
46
+ }
47
+
48
+ void Buffer::grow(size_t bytes)
49
+ {
50
+ if (bytes > m_bytes)
51
+ reset(bytes);
52
+ }
53
+
54
+ //------------------------------------------------------------------------
55
+ // Host buffer with page-locked memory.
56
+ //------------------------------------------------------------------------
57
+
58
+ HostBuffer::HostBuffer(void)
59
+ : m_hostPtr(NULL),
60
+ m_bytes (0)
61
+ {
62
+ // empty
63
+ }
64
+
65
+ HostBuffer::~HostBuffer(void)
66
+ {
67
+ if (m_hostPtr)
68
+ cudaFreeHost(m_hostPtr); // Don't throw an exception.
69
+ }
70
+
71
+ void HostBuffer::reset(size_t bytes)
72
+ {
73
+ if (bytes == m_bytes)
74
+ return;
75
+
76
+ if (m_hostPtr)
77
+ {
78
+ NVDR_CHECK_CUDA_ERROR(cudaFreeHost(m_hostPtr));
79
+ m_hostPtr = NULL;
80
+ }
81
+
82
+ if (bytes > 0)
83
+ NVDR_CHECK_CUDA_ERROR(cudaMallocHost(&m_hostPtr, bytes));
84
+
85
+ m_bytes = bytes;
86
+ }
87
+
88
+ void HostBuffer::grow(size_t bytes)
89
+ {
90
+ if (bytes > m_bytes)
91
+ reset(bytes);
92
+ }
93
+
94
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Buffer.hpp ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+ #include "Defs.hpp"
11
+
12
+ namespace CR
13
+ {
14
+ //------------------------------------------------------------------------
15
+
16
+ class Buffer
17
+ {
18
+ public:
19
+ Buffer (void);
20
+ ~Buffer (void);
21
+
22
+ void reset (size_t bytes);
23
+ void grow (size_t bytes);
24
+ void* getPtr (size_t offset = 0) { return (void*)(((uintptr_t)m_gpuPtr) + offset); }
25
+ size_t getSize (void) const { return m_bytes; }
26
+
27
+ void setPtr (void* ptr) { m_gpuPtr = ptr; }
28
+
29
+ private:
30
+ void* m_gpuPtr;
31
+ size_t m_bytes;
32
+ };
33
+
34
+ //------------------------------------------------------------------------
35
+
36
+ class HostBuffer
37
+ {
38
+ public:
39
+ HostBuffer (void);
40
+ ~HostBuffer (void);
41
+
42
+ void reset (size_t bytes);
43
+ void grow (size_t bytes);
44
+ void* getPtr (void) { return m_hostPtr; }
45
+ size_t getSize (void) const { return m_bytes; }
46
+
47
+ void setPtr (void* ptr) { m_hostPtr = ptr; }
48
+
49
+ private:
50
+ void* m_hostPtr;
51
+ size_t m_bytes;
52
+ };
53
+
54
+ //------------------------------------------------------------------------
55
+ }
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/CoarseRaster.inl ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ //------------------------------------------------------------------------
10
+
11
+ __device__ __inline__ int globalTileIdx(int tileInBin, int widthTiles)
12
+ {
13
+ int tileX = tileInBin & (CR_BIN_SIZE - 1);
14
+ int tileY = tileInBin >> CR_BIN_LOG2;
15
+ return tileX + tileY * widthTiles;
16
+ }
17
+
18
+ //------------------------------------------------------------------------
19
+
20
+ __device__ __inline__ void coarseRasterImpl(const CRParams p)
21
+ {
22
+ // Common.
23
+
24
+ __shared__ volatile U32 s_workCounter;
25
+ __shared__ volatile U32 s_scanTemp [CR_COARSE_WARPS][48]; // 3KB
26
+
27
+ // Input.
28
+
29
+ __shared__ volatile U32 s_binOrder [CR_MAXBINS_SQR]; // 1KB
30
+ __shared__ volatile S32 s_binStreamCurrSeg [CR_BIN_STREAMS_SIZE]; // 0KB
31
+ __shared__ volatile S32 s_binStreamFirstTri [CR_BIN_STREAMS_SIZE]; // 0KB
32
+ __shared__ volatile S32 s_triQueue [CR_COARSE_QUEUE_SIZE]; // 4KB
33
+ __shared__ volatile S32 s_triQueueWritePos;
34
+ __shared__ volatile U32 s_binStreamSelectedOfs;
35
+ __shared__ volatile U32 s_binStreamSelectedSize;
36
+
37
+ // Output.
38
+
39
+ __shared__ volatile U32 s_warpEmitMask [CR_COARSE_WARPS][CR_BIN_SQR + 1]; // 16KB, +1 to avoid bank collisions
40
+ __shared__ volatile U32 s_warpEmitPrefixSum [CR_COARSE_WARPS][CR_BIN_SQR + 1]; // 16KB, +1 to avoid bank collisions
41
+ __shared__ volatile U32 s_tileEmitPrefixSum [CR_BIN_SQR + 1]; // 1KB, zero at the beginning
42
+ __shared__ volatile U32 s_tileAllocPrefixSum[CR_BIN_SQR + 1]; // 1KB, zero at the beginning
43
+ __shared__ volatile S32 s_tileStreamCurrOfs [CR_BIN_SQR]; // 1KB
44
+ __shared__ volatile U32 s_firstAllocSeg;
45
+ __shared__ volatile U32 s_firstActiveIdx;
46
+
47
+ // Pointers and constants.
48
+
49
+ CRAtomics& atomics = p.atomics[blockIdx.z];
50
+ const CRTriangleHeader* triHeader = (const CRTriangleHeader*)p.triHeader + p.maxSubtris * blockIdx.z;
51
+ const S32* binFirstSeg = (const S32*)p.binFirstSeg + CR_MAXBINS_SQR * CR_BIN_STREAMS_SIZE * blockIdx.z;
52
+ const S32* binTotal = (const S32*)p.binTotal + CR_MAXBINS_SQR * CR_BIN_STREAMS_SIZE * blockIdx.z;
53
+ const S32* binSegData = (const S32*)p.binSegData + p.maxBinSegs * CR_BIN_SEG_SIZE * blockIdx.z;
54
+ const S32* binSegNext = (const S32*)p.binSegNext + p.maxBinSegs * blockIdx.z;
55
+ const S32* binSegCount = (const S32*)p.binSegCount + p.maxBinSegs * blockIdx.z;
56
+ S32* activeTiles = (S32*)p.activeTiles + CR_MAXTILES_SQR * blockIdx.z;
57
+ S32* tileFirstSeg = (S32*)p.tileFirstSeg + CR_MAXTILES_SQR * blockIdx.z;
58
+ S32* tileSegData = (S32*)p.tileSegData + p.maxTileSegs * CR_TILE_SEG_SIZE * blockIdx.z;
59
+ S32* tileSegNext = (S32*)p.tileSegNext + p.maxTileSegs * blockIdx.z;
60
+ S32* tileSegCount = (S32*)p.tileSegCount + p.maxTileSegs * blockIdx.z;
61
+
62
+ int tileLog = CR_TILE_LOG2 + CR_SUBPIXEL_LOG2;
63
+ int thrInBlock = threadIdx.x + threadIdx.y * 32;
64
+ int emitShift = CR_BIN_LOG2 * 2 + 5; // We scan ((numEmits << emitShift) | numAllocs) over tiles.
65
+
66
+ if (atomics.numSubtris > p.maxSubtris || atomics.numBinSegs > p.maxBinSegs)
67
+ return;
68
+
69
+ // Initialize sharedmem arrays.
70
+
71
+ if (thrInBlock == 0)
72
+ {
73
+ s_tileEmitPrefixSum[0] = 0;
74
+ s_tileAllocPrefixSum[0] = 0;
75
+ }
76
+ s_scanTemp[threadIdx.y][threadIdx.x] = 0;
77
+
78
+ // Sort bins in descending order of triangle count.
79
+
80
+ for (int binIdx = thrInBlock; binIdx < p.numBins; binIdx += CR_COARSE_WARPS * 32)
81
+ {
82
+ int count = 0;
83
+ for (int i = 0; i < CR_BIN_STREAMS_SIZE; i++)
84
+ count += binTotal[(binIdx << CR_BIN_STREAMS_LOG2) + i];
85
+ s_binOrder[binIdx] = (~count << (CR_MAXBINS_LOG2 * 2)) | binIdx;
86
+ }
87
+
88
+ __syncthreads();
89
+ sortShared(s_binOrder, p.numBins);
90
+
91
+ // Process each bin by one block.
92
+
93
+ for (;;)
94
+ {
95
+ // Pick a bin for the block.
96
+
97
+ if (thrInBlock == 0)
98
+ s_workCounter = atomicAdd(&atomics.coarseCounter, 1);
99
+ __syncthreads();
100
+
101
+ int workCounter = s_workCounter;
102
+ if (workCounter >= p.numBins)
103
+ break;
104
+
105
+ U32 binOrder = s_binOrder[workCounter];
106
+ bool binEmpty = ((~binOrder >> (CR_MAXBINS_LOG2 * 2)) == 0);
107
+ if (binEmpty && !p.deferredClear)
108
+ break;
109
+
110
+ int binIdx = binOrder & (CR_MAXBINS_SQR - 1);
111
+
112
+ // Initialize input/output streams.
113
+
114
+ int triQueueWritePos = 0;
115
+ int triQueueReadPos = 0;
116
+
117
+ if (thrInBlock < CR_BIN_STREAMS_SIZE)
118
+ {
119
+ int segIdx = binFirstSeg[(binIdx << CR_BIN_STREAMS_LOG2) + thrInBlock];
120
+ s_binStreamCurrSeg[thrInBlock] = segIdx;
121
+ s_binStreamFirstTri[thrInBlock] = (segIdx == -1) ? ~0u : binSegData[segIdx << CR_BIN_SEG_LOG2];
122
+ }
123
+
124
+ for (int tileInBin = CR_COARSE_WARPS * 32 - 1 - thrInBlock; tileInBin < CR_BIN_SQR; tileInBin += CR_COARSE_WARPS * 32)
125
+ s_tileStreamCurrOfs[tileInBin] = -CR_TILE_SEG_SIZE;
126
+
127
+ // Initialize per-bin state.
128
+
129
+ int binY = idiv_fast(binIdx, p.widthBins);
130
+ int binX = binIdx - binY * p.widthBins;
131
+ int originX = (binX << (CR_BIN_LOG2 + tileLog)) - (p.widthPixelsVp << (CR_SUBPIXEL_LOG2 - 1));
132
+ int originY = (binY << (CR_BIN_LOG2 + tileLog)) - (p.heightPixelsVp << (CR_SUBPIXEL_LOG2 - 1));
133
+ int maxTileXInBin = ::min(p.widthTiles - (binX << CR_BIN_LOG2), CR_BIN_SIZE) - 1;
134
+ int maxTileYInBin = ::min(p.heightTiles - (binY << CR_BIN_LOG2), CR_BIN_SIZE) - 1;
135
+ int binTileIdx = (binX + binY * p.widthTiles) << CR_BIN_LOG2;
136
+
137
+ // Entire block: Merge input streams and process triangles.
138
+
139
+ if (!binEmpty)
140
+ do
141
+ {
142
+ //------------------------------------------------------------------------
143
+ // Merge.
144
+ //------------------------------------------------------------------------
145
+
146
+ // Entire block: Not enough triangles => merge and queue segments.
147
+ // NOTE: The bin exit criterion assumes that we queue more triangles than we actually need.
148
+
149
+ while (triQueueWritePos - triQueueReadPos <= CR_COARSE_WARPS * 32)
150
+ {
151
+ // First warp: Choose the segment with the lowest initial triangle index.
152
+
153
+ bool hasStream = (thrInBlock < CR_BIN_STREAMS_SIZE);
154
+ U32 hasStreamMask = __ballot_sync(~0u, hasStream);
155
+ if (hasStream)
156
+ {
157
+ // Find the stream with the lowest triangle index.
158
+
159
+ U32 firstTri = s_binStreamFirstTri[thrInBlock];
160
+ U32 t = firstTri;
161
+ volatile U32* v = &s_scanTemp[0][thrInBlock + 16];
162
+
163
+ #if (CR_BIN_STREAMS_SIZE > 1)
164
+ v[0] = t; __syncwarp(hasStreamMask); t = ::min(t, v[-1]); __syncwarp(hasStreamMask);
165
+ #endif
166
+ #if (CR_BIN_STREAMS_SIZE > 2)
167
+ v[0] = t; __syncwarp(hasStreamMask); t = ::min(t, v[-2]); __syncwarp(hasStreamMask);
168
+ #endif
169
+ #if (CR_BIN_STREAMS_SIZE > 4)
170
+ v[0] = t; __syncwarp(hasStreamMask); t = ::min(t, v[-4]); __syncwarp(hasStreamMask);
171
+ #endif
172
+ #if (CR_BIN_STREAMS_SIZE > 8)
173
+ v[0] = t; __syncwarp(hasStreamMask); t = ::min(t, v[-8]); __syncwarp(hasStreamMask);
174
+ #endif
175
+ #if (CR_BIN_STREAMS_SIZE > 16)
176
+ v[0] = t; __syncwarp(hasStreamMask); t = ::min(t, v[-16]); __syncwarp(hasStreamMask);
177
+ #endif
178
+ v[0] = t; __syncwarp(hasStreamMask);
179
+
180
+ // Consume and broadcast.
181
+
182
+ bool first = (s_scanTemp[0][CR_BIN_STREAMS_SIZE - 1 + 16] == firstTri);
183
+ U32 firstMask = __ballot_sync(hasStreamMask, first);
184
+ if (first && (firstMask >> threadIdx.x) == 1u)
185
+ {
186
+ int segIdx = s_binStreamCurrSeg[thrInBlock];
187
+ s_binStreamSelectedOfs = segIdx << CR_BIN_SEG_LOG2;
188
+ if (segIdx != -1)
189
+ {
190
+ int segSize = binSegCount[segIdx];
191
+ int segNext = binSegNext[segIdx];
192
+ s_binStreamSelectedSize = segSize;
193
+ s_triQueueWritePos = triQueueWritePos + segSize;
194
+ s_binStreamCurrSeg[thrInBlock] = segNext;
195
+ s_binStreamFirstTri[thrInBlock] = (segNext == -1) ? ~0u : binSegData[segNext << CR_BIN_SEG_LOG2];
196
+ }
197
+ }
198
+ }
199
+
200
+ // No more segments => break.
201
+
202
+ __syncthreads();
203
+ triQueueWritePos = s_triQueueWritePos;
204
+ int segOfs = s_binStreamSelectedOfs;
205
+ if (segOfs < 0)
206
+ break;
207
+
208
+ int segSize = s_binStreamSelectedSize;
209
+ __syncthreads();
210
+
211
+ // Fetch triangles into the queue.
212
+
213
+ for (int idxInSeg = CR_COARSE_WARPS * 32 - 1 - thrInBlock; idxInSeg < segSize; idxInSeg += CR_COARSE_WARPS * 32)
214
+ {
215
+ S32 triIdx = binSegData[segOfs + idxInSeg];
216
+ s_triQueue[(triQueueWritePos - segSize + idxInSeg) & (CR_COARSE_QUEUE_SIZE - 1)] = triIdx;
217
+ }
218
+ }
219
+
220
+ // All threads: Clear emit masks.
221
+
222
+ for (int maskIdx = thrInBlock; maskIdx < CR_COARSE_WARPS * CR_BIN_SQR; maskIdx += CR_COARSE_WARPS * 32)
223
+ s_warpEmitMask[maskIdx >> (CR_BIN_LOG2 * 2)][maskIdx & (CR_BIN_SQR - 1)] = 0;
224
+
225
+ __syncthreads();
226
+
227
+ //------------------------------------------------------------------------
228
+ // Raster.
229
+ //------------------------------------------------------------------------
230
+
231
+ // Triangle per thread: Read from the queue.
232
+
233
+ int triIdx = -1;
234
+ if (triQueueReadPos + thrInBlock < triQueueWritePos)
235
+ triIdx = s_triQueue[(triQueueReadPos + thrInBlock) & (CR_COARSE_QUEUE_SIZE - 1)];
236
+
237
+ uint4 triData = make_uint4(0, 0, 0, 0);
238
+ if (triIdx != -1)
239
+ {
240
+ int dataIdx = triIdx >> 3;
241
+ int subtriIdx = triIdx & 7;
242
+ if (subtriIdx != 7)
243
+ dataIdx = triHeader[dataIdx].misc + subtriIdx;
244
+ triData = *((uint4*)triHeader + dataIdx);
245
+ }
246
+
247
+ // 32 triangles per warp: Record emits (= tile intersections).
248
+
249
+ if (__any_sync(~0u, triIdx != -1))
250
+ {
251
+ S32 v0x = sub_s16lo_s16lo(triData.x, originX);
252
+ S32 v0y = sub_s16hi_s16lo(triData.x, originY);
253
+ S32 d01x = sub_s16lo_s16lo(triData.y, triData.x);
254
+ S32 d01y = sub_s16hi_s16hi(triData.y, triData.x);
255
+ S32 d02x = sub_s16lo_s16lo(triData.z, triData.x);
256
+ S32 d02y = sub_s16hi_s16hi(triData.z, triData.x);
257
+
258
+ // Compute tile-based AABB.
259
+
260
+ int lox = add_clamp_0_x((v0x + min_min(d01x, 0, d02x)) >> tileLog, 0, maxTileXInBin);
261
+ int loy = add_clamp_0_x((v0y + min_min(d01y, 0, d02y)) >> tileLog, 0, maxTileYInBin);
262
+ int hix = add_clamp_0_x((v0x + max_max(d01x, 0, d02x)) >> tileLog, 0, maxTileXInBin);
263
+ int hiy = add_clamp_0_x((v0y + max_max(d01y, 0, d02y)) >> tileLog, 0, maxTileYInBin);
264
+ int sizex = add_sub(hix, 1, lox);
265
+ int sizey = add_sub(hiy, 1, loy);
266
+ int area = sizex * sizey;
267
+
268
+ // Miscellaneous init.
269
+
270
+ U8* currPtr = (U8*)&s_warpEmitMask[threadIdx.y][lox + (loy << CR_BIN_LOG2)];
271
+ int ptrYInc = CR_BIN_SIZE * 4 - (sizex << 2);
272
+ U32 maskBit = 1 << threadIdx.x;
273
+
274
+ // Case A: All AABBs are small => record the full AABB using atomics.
275
+
276
+ if (__all_sync(~0u, sizex <= 2 && sizey <= 2))
277
+ {
278
+ if (triIdx != -1)
279
+ {
280
+ atomicOr((U32*)currPtr, maskBit);
281
+ if (sizex == 2) atomicOr((U32*)(currPtr + 4), maskBit);
282
+ if (sizey == 2) atomicOr((U32*)(currPtr + CR_BIN_SIZE * 4), maskBit);
283
+ if (sizex == 2 && sizey == 2) atomicOr((U32*)(currPtr + 4 + CR_BIN_SIZE * 4), maskBit);
284
+ }
285
+ }
286
+ else
287
+ {
288
+ // Compute warp-AABB (scan-32).
289
+
290
+ U32 aabbMask = add_sub(2 << hix, 0x20000 << hiy, 1 << lox) - (0x10000 << loy);
291
+ if (triIdx == -1)
292
+ aabbMask = 0;
293
+
294
+ volatile U32* v = &s_scanTemp[threadIdx.y][threadIdx.x + 16];
295
+ v[0] = aabbMask; __syncwarp(); aabbMask |= v[-1]; __syncwarp();
296
+ v[0] = aabbMask; __syncwarp(); aabbMask |= v[-2]; __syncwarp();
297
+ v[0] = aabbMask; __syncwarp(); aabbMask |= v[-4]; __syncwarp();
298
+ v[0] = aabbMask; __syncwarp(); aabbMask |= v[-8]; __syncwarp();
299
+ v[0] = aabbMask; __syncwarp(); aabbMask |= v[-16]; __syncwarp();
300
+ v[0] = aabbMask; __syncwarp(); aabbMask = s_scanTemp[threadIdx.y][47];
301
+
302
+ U32 maskX = aabbMask & 0xFFFF;
303
+ U32 maskY = aabbMask >> 16;
304
+ int wlox = findLeadingOne(maskX ^ (maskX - 1));
305
+ int wloy = findLeadingOne(maskY ^ (maskY - 1));
306
+ int whix = findLeadingOne(maskX);
307
+ int whiy = findLeadingOne(maskY);
308
+ int warea = (add_sub(whix, 1, wlox)) * (add_sub(whiy, 1, wloy));
309
+
310
+ // Initialize edge functions.
311
+
312
+ S32 d12x = d02x - d01x;
313
+ S32 d12y = d02y - d01y;
314
+ v0x -= lox << tileLog;
315
+ v0y -= loy << tileLog;
316
+
317
+ S32 t01 = v0x * d01y - v0y * d01x;
318
+ S32 t02 = v0y * d02x - v0x * d02y;
319
+ S32 t12 = d01x * d12y - d01y * d12x - t01 - t02;
320
+ S32 b01 = add_sub(t01 >> tileLog, ::max(d01x, 0), ::min(d01y, 0));
321
+ S32 b02 = add_sub(t02 >> tileLog, ::max(d02y, 0), ::min(d02x, 0));
322
+ S32 b12 = add_sub(t12 >> tileLog, ::max(d12x, 0), ::min(d12y, 0));
323
+
324
+ d01x += sizex * d01y;
325
+ d02x += sizex * d02y;
326
+ d12x += sizex * d12y;
327
+
328
+ // Case B: Warp-AABB is not much larger than largest AABB => Check tiles in warp-AABB, record using ballots.
329
+ if (__any_sync(~0u, warea * 4 <= area * 8))
330
+ {
331
+ // Not sure if this is any faster than Case C after all the post-Volta ballot mask tracking.
332
+ bool act = (triIdx != -1);
333
+ U32 actMask = __ballot_sync(~0u, act);
334
+ if (act)
335
+ {
336
+ for (int y = wloy; y <= whiy; y++)
337
+ {
338
+ bool yIn = (y >= loy && y <= hiy);
339
+ U32 yMask = __ballot_sync(actMask, yIn);
340
+ if (yIn)
341
+ {
342
+ for (int x = wlox; x <= whix; x++)
343
+ {
344
+ bool xyIn = (x >= lox && x <= hix);
345
+ U32 xyMask = __ballot_sync(yMask, xyIn);
346
+ if (xyIn)
347
+ {
348
+ U32 res = __ballot_sync(xyMask, b01 >= 0 && b02 >= 0 && b12 >= 0);
349
+ if (threadIdx.x == 31 - __clz(xyMask))
350
+ *(U32*)currPtr = res;
351
+ currPtr += 4, b01 -= d01y, b02 += d02y, b12 -= d12y;
352
+ }
353
+ }
354
+ currPtr += ptrYInc, b01 += d01x, b02 -= d02x, b12 += d12x;
355
+ }
356
+ }
357
+ }
358
+ }
359
+
360
+ // Case C: General case => Check tiles in AABB, record using atomics.
361
+
362
+ else
363
+ {
364
+ if (triIdx != -1)
365
+ {
366
+ U8* skipPtr = currPtr + (sizex << 2);
367
+ U8* endPtr = currPtr + (sizey << (CR_BIN_LOG2 + 2));
368
+ do
369
+ {
370
+ if (b01 >= 0 && b02 >= 0 && b12 >= 0)
371
+ atomicOr((U32*)currPtr, maskBit);
372
+ currPtr += 4, b01 -= d01y, b02 += d02y, b12 -= d12y;
373
+ if (currPtr == skipPtr)
374
+ currPtr += ptrYInc, b01 += d01x, b02 -= d02x, b12 += d12x, skipPtr += CR_BIN_SIZE * 4;
375
+ }
376
+ while (currPtr != endPtr);
377
+ }
378
+ }
379
+ }
380
+ }
381
+
382
+ __syncthreads();
383
+
384
+ //------------------------------------------------------------------------
385
+ // Count.
386
+ //------------------------------------------------------------------------
387
+
388
+ // Tile per thread: Initialize prefix sums.
389
+
390
+ for (int tileInBin_base = 0; tileInBin_base < CR_BIN_SQR; tileInBin_base += CR_COARSE_WARPS * 32)
391
+ {
392
+ int tileInBin = tileInBin_base + thrInBlock;
393
+ bool act = (tileInBin < CR_BIN_SQR);
394
+ U32 actMask = __ballot_sync(~0u, act);
395
+ if (act)
396
+ {
397
+ // Compute prefix sum of emits over warps.
398
+
399
+ U8* srcPtr = (U8*)&s_warpEmitMask[0][tileInBin];
400
+ U8* dstPtr = (U8*)&s_warpEmitPrefixSum[0][tileInBin];
401
+ int tileEmits = 0;
402
+ for (int i = 0; i < CR_COARSE_WARPS; i++)
403
+ {
404
+ tileEmits += __popc(*(U32*)srcPtr);
405
+ *(U32*)dstPtr = tileEmits;
406
+ srcPtr += (CR_BIN_SQR + 1) * 4;
407
+ dstPtr += (CR_BIN_SQR + 1) * 4;
408
+ }
409
+
410
+ // Determine the number of segments to allocate.
411
+
412
+ int spaceLeft = -s_tileStreamCurrOfs[tileInBin] & (CR_TILE_SEG_SIZE - 1);
413
+ int tileAllocs = (tileEmits - spaceLeft + CR_TILE_SEG_SIZE - 1) >> CR_TILE_SEG_LOG2;
414
+ volatile U32* v = &s_tileEmitPrefixSum[tileInBin + 1];
415
+
416
+ // All counters within the warp are small => compute prefix sum using ballot.
417
+
418
+ if (!__any_sync(actMask, tileEmits >= 2))
419
+ {
420
+ U32 m = getLaneMaskLe();
421
+ *v = (__popc(__ballot_sync(actMask, tileEmits & 1) & m) << emitShift) | __popc(__ballot_sync(actMask, tileAllocs & 1) & m);
422
+ }
423
+
424
+ // Otherwise => scan-32 within the warp.
425
+
426
+ else
427
+ {
428
+ U32 sum = (tileEmits << emitShift) | tileAllocs;
429
+ *v = sum; __syncwarp(actMask); if (threadIdx.x >= 1) sum += v[-1]; __syncwarp(actMask);
430
+ *v = sum; __syncwarp(actMask); if (threadIdx.x >= 2) sum += v[-2]; __syncwarp(actMask);
431
+ *v = sum; __syncwarp(actMask); if (threadIdx.x >= 4) sum += v[-4]; __syncwarp(actMask);
432
+ *v = sum; __syncwarp(actMask); if (threadIdx.x >= 8) sum += v[-8]; __syncwarp(actMask);
433
+ *v = sum; __syncwarp(actMask); if (threadIdx.x >= 16) sum += v[-16]; __syncwarp(actMask);
434
+ *v = sum; __syncwarp(actMask);
435
+ }
436
+ }
437
+ }
438
+
439
+ // First warp: Scan-8.
440
+
441
+ __syncthreads();
442
+
443
+ bool scan8 = (thrInBlock < CR_BIN_SQR / 32);
444
+ U32 scan8Mask = __ballot_sync(~0u, scan8);
445
+ if (scan8)
446
+ {
447
+ int sum = s_tileEmitPrefixSum[(thrInBlock << 5) + 32];
448
+ volatile U32* v = &s_scanTemp[0][thrInBlock + 16];
449
+ v[0] = sum; __syncwarp(scan8Mask);
450
+ #if (CR_BIN_SQR > 1 * 32)
451
+ sum += v[-1]; __syncwarp(scan8Mask); v[0] = sum; __syncwarp(scan8Mask);
452
+ #endif
453
+ #if (CR_BIN_SQR > 2 * 32)
454
+ sum += v[-2]; __syncwarp(scan8Mask); v[0] = sum; __syncwarp(scan8Mask);
455
+ #endif
456
+ #if (CR_BIN_SQR > 4 * 32)
457
+ sum += v[-4]; __syncwarp(scan8Mask); v[0] = sum; __syncwarp(scan8Mask);
458
+ #endif
459
+ }
460
+
461
+ __syncthreads();
462
+
463
+ // Tile per thread: Finalize prefix sums.
464
+ // Single thread: Allocate segments.
465
+
466
+ for (int tileInBin = thrInBlock; tileInBin < CR_BIN_SQR; tileInBin += CR_COARSE_WARPS * 32)
467
+ {
468
+ int sum = s_tileEmitPrefixSum[tileInBin + 1] + s_scanTemp[0][(tileInBin >> 5) + 15];
469
+ int numEmits = sum >> emitShift;
470
+ int numAllocs = sum & ((1 << emitShift) - 1);
471
+ s_tileEmitPrefixSum[tileInBin + 1] = numEmits;
472
+ s_tileAllocPrefixSum[tileInBin + 1] = numAllocs;
473
+
474
+ if (tileInBin == CR_BIN_SQR - 1 && numAllocs != 0)
475
+ {
476
+ int t = atomicAdd(&atomics.numTileSegs, numAllocs);
477
+ s_firstAllocSeg = (t + numAllocs <= p.maxTileSegs) ? t : 0;
478
+ }
479
+ }
480
+
481
+ __syncthreads();
482
+ int firstAllocSeg = s_firstAllocSeg;
483
+ int totalEmits = s_tileEmitPrefixSum[CR_BIN_SQR];
484
+ int totalAllocs = s_tileAllocPrefixSum[CR_BIN_SQR];
485
+
486
+ //------------------------------------------------------------------------
487
+ // Emit.
488
+ //------------------------------------------------------------------------
489
+
490
+ // Emit per thread: Write triangle index to globalmem.
491
+
492
+ for (int emitInBin = thrInBlock; emitInBin < totalEmits; emitInBin += CR_COARSE_WARPS * 32)
493
+ {
494
+ // Find tile in bin.
495
+
496
+ U8* tileBase = (U8*)&s_tileEmitPrefixSum[0];
497
+ U8* tilePtr = tileBase;
498
+ U8* ptr;
499
+
500
+ #if (CR_BIN_SQR > 128)
501
+ ptr = tilePtr + 0x80 * 4; if (emitInBin >= *(U32*)ptr) tilePtr = ptr;
502
+ #endif
503
+ #if (CR_BIN_SQR > 64)
504
+ ptr = tilePtr + 0x40 * 4; if (emitInBin >= *(U32*)ptr) tilePtr = ptr;
505
+ #endif
506
+ #if (CR_BIN_SQR > 32)
507
+ ptr = tilePtr + 0x20 * 4; if (emitInBin >= *(U32*)ptr) tilePtr = ptr;
508
+ #endif
509
+ #if (CR_BIN_SQR > 16)
510
+ ptr = tilePtr + 0x10 * 4; if (emitInBin >= *(U32*)ptr) tilePtr = ptr;
511
+ #endif
512
+ #if (CR_BIN_SQR > 8)
513
+ ptr = tilePtr + 0x08 * 4; if (emitInBin >= *(U32*)ptr) tilePtr = ptr;
514
+ #endif
515
+ #if (CR_BIN_SQR > 4)
516
+ ptr = tilePtr + 0x04 * 4; if (emitInBin >= *(U32*)ptr) tilePtr = ptr;
517
+ #endif
518
+ #if (CR_BIN_SQR > 2)
519
+ ptr = tilePtr + 0x02 * 4; if (emitInBin >= *(U32*)ptr) tilePtr = ptr;
520
+ #endif
521
+ #if (CR_BIN_SQR > 1)
522
+ ptr = tilePtr + 0x01 * 4; if (emitInBin >= *(U32*)ptr) tilePtr = ptr;
523
+ #endif
524
+
525
+ int tileInBin = (tilePtr - tileBase) >> 2;
526
+ int emitInTile = emitInBin - *(U32*)tilePtr;
527
+
528
+ // Find warp in tile.
529
+
530
+ int warpStep = (CR_BIN_SQR + 1) * 4;
531
+ U8* warpBase = (U8*)&s_warpEmitPrefixSum[0][tileInBin] - warpStep;
532
+ U8* warpPtr = warpBase;
533
+
534
+ #if (CR_COARSE_WARPS > 8)
535
+ ptr = warpPtr + 0x08 * warpStep; if (emitInTile >= *(U32*)ptr) warpPtr = ptr;
536
+ #endif
537
+ #if (CR_COARSE_WARPS > 4)
538
+ ptr = warpPtr + 0x04 * warpStep; if (emitInTile >= *(U32*)ptr) warpPtr = ptr;
539
+ #endif
540
+ #if (CR_COARSE_WARPS > 2)
541
+ ptr = warpPtr + 0x02 * warpStep; if (emitInTile >= *(U32*)ptr) warpPtr = ptr;
542
+ #endif
543
+ #if (CR_COARSE_WARPS > 1)
544
+ ptr = warpPtr + 0x01 * warpStep; if (emitInTile >= *(U32*)ptr) warpPtr = ptr;
545
+ #endif
546
+
547
+ int warpInTile = (warpPtr - warpBase) >> (CR_BIN_LOG2 * 2 + 2);
548
+ U32 emitMask = *(U32*)(warpPtr + warpStep + ((U8*)s_warpEmitMask - (U8*)s_warpEmitPrefixSum));
549
+ int emitInWarp = emitInTile - *(U32*)(warpPtr + warpStep) + __popc(emitMask);
550
+
551
+ // Find thread in warp.
552
+
553
+ int threadInWarp = 0;
554
+ int pop = __popc(emitMask & 0xFFFF);
555
+ bool pred = (emitInWarp >= pop);
556
+ if (pred) emitInWarp -= pop;
557
+ if (pred) emitMask >>= 0x10;
558
+ if (pred) threadInWarp += 0x10;
559
+
560
+ pop = __popc(emitMask & 0xFF);
561
+ pred = (emitInWarp >= pop);
562
+ if (pred) emitInWarp -= pop;
563
+ if (pred) emitMask >>= 0x08;
564
+ if (pred) threadInWarp += 0x08;
565
+
566
+ pop = __popc(emitMask & 0xF);
567
+ pred = (emitInWarp >= pop);
568
+ if (pred) emitInWarp -= pop;
569
+ if (pred) emitMask >>= 0x04;
570
+ if (pred) threadInWarp += 0x04;
571
+
572
+ pop = __popc(emitMask & 0x3);
573
+ pred = (emitInWarp >= pop);
574
+ if (pred) emitInWarp -= pop;
575
+ if (pred) emitMask >>= 0x02;
576
+ if (pred) threadInWarp += 0x02;
577
+
578
+ if (emitInWarp >= (emitMask & 1))
579
+ threadInWarp++;
580
+
581
+ // Figure out where to write.
582
+
583
+ int currOfs = s_tileStreamCurrOfs[tileInBin];
584
+ int spaceLeft = -currOfs & (CR_TILE_SEG_SIZE - 1);
585
+ int outOfs = emitInTile;
586
+
587
+ if (outOfs < spaceLeft)
588
+ outOfs += currOfs;
589
+ else
590
+ {
591
+ int allocLo = firstAllocSeg + s_tileAllocPrefixSum[tileInBin];
592
+ outOfs += (allocLo << CR_TILE_SEG_LOG2) - spaceLeft;
593
+ }
594
+
595
+ // Write.
596
+
597
+ int queueIdx = warpInTile * 32 + threadInWarp;
598
+ int triIdx = s_triQueue[(triQueueReadPos + queueIdx) & (CR_COARSE_QUEUE_SIZE - 1)];
599
+
600
+ tileSegData[outOfs] = triIdx;
601
+ }
602
+
603
+ //------------------------------------------------------------------------
604
+ // Patch.
605
+ //------------------------------------------------------------------------
606
+
607
+ // Allocated segment per thread: Initialize next-pointer and count.
608
+
609
+ for (int i = CR_COARSE_WARPS * 32 - 1 - thrInBlock; i < totalAllocs; i += CR_COARSE_WARPS * 32)
610
+ {
611
+ int segIdx = firstAllocSeg + i;
612
+ tileSegNext[segIdx] = segIdx + 1;
613
+ tileSegCount[segIdx] = CR_TILE_SEG_SIZE;
614
+ }
615
+
616
+ // Tile per thread: Fix previous segment's next-pointer and update s_tileStreamCurrOfs.
617
+
618
+ __syncthreads();
619
+ for (int tileInBin = CR_COARSE_WARPS * 32 - 1 - thrInBlock; tileInBin < CR_BIN_SQR; tileInBin += CR_COARSE_WARPS * 32)
620
+ {
621
+ int oldOfs = s_tileStreamCurrOfs[tileInBin];
622
+ int newOfs = oldOfs + s_warpEmitPrefixSum[CR_COARSE_WARPS - 1][tileInBin];
623
+ int allocLo = s_tileAllocPrefixSum[tileInBin];
624
+ int allocHi = s_tileAllocPrefixSum[tileInBin + 1];
625
+
626
+ if (allocLo != allocHi)
627
+ {
628
+ S32* nextPtr = &tileSegNext[(oldOfs - 1) >> CR_TILE_SEG_LOG2];
629
+ if (oldOfs < 0)
630
+ nextPtr = &tileFirstSeg[binTileIdx + globalTileIdx(tileInBin, p.widthTiles)];
631
+ *nextPtr = firstAllocSeg + allocLo;
632
+
633
+ newOfs--;
634
+ newOfs &= CR_TILE_SEG_SIZE - 1;
635
+ newOfs |= (firstAllocSeg + allocHi - 1) << CR_TILE_SEG_LOG2;
636
+ newOfs++;
637
+ }
638
+ s_tileStreamCurrOfs[tileInBin] = newOfs;
639
+ }
640
+
641
+ // Advance queue read pointer.
642
+ // Queue became empty => bin done.
643
+
644
+ triQueueReadPos += CR_COARSE_WARPS * 32;
645
+ }
646
+ while (triQueueReadPos < triQueueWritePos);
647
+
648
+ // Tile per thread: Fix next-pointer and count of the last segment.
649
+ // 32 tiles per warp: Count active tiles.
650
+
651
+ __syncthreads();
652
+
653
+ for (int tileInBin_base = 0; tileInBin_base < CR_BIN_SQR; tileInBin_base += CR_COARSE_WARPS * 32)
654
+ {
655
+ int tileInBin = tileInBin_base + thrInBlock;
656
+ bool act = (tileInBin < CR_BIN_SQR);
657
+ U32 actMask = __ballot_sync(~0u, act);
658
+ if (act)
659
+ {
660
+ int tileX = tileInBin & (CR_BIN_SIZE - 1);
661
+ int tileY = tileInBin >> CR_BIN_LOG2;
662
+ bool force = (p.deferredClear & tileX <= maxTileXInBin & tileY <= maxTileYInBin);
663
+
664
+ int ofs = s_tileStreamCurrOfs[tileInBin];
665
+ int segIdx = (ofs - 1) >> CR_TILE_SEG_LOG2;
666
+ int segCount = ofs & (CR_TILE_SEG_SIZE - 1);
667
+
668
+ if (ofs >= 0)
669
+ tileSegNext[segIdx] = -1;
670
+ else if (force)
671
+ {
672
+ s_tileStreamCurrOfs[tileInBin] = 0;
673
+ tileFirstSeg[binTileIdx + tileX + tileY * p.widthTiles] = -1;
674
+ }
675
+
676
+ if (segCount != 0)
677
+ tileSegCount[segIdx] = segCount;
678
+
679
+ U32 res = __ballot_sync(actMask, ofs >= 0 | force);
680
+ if (threadIdx.x == 0)
681
+ s_scanTemp[0][(tileInBin >> 5) + 16] = __popc(res);
682
+ }
683
+ }
684
+
685
+ // First warp: Scan-8.
686
+ // One thread: Allocate space for active tiles.
687
+
688
+ __syncthreads();
689
+
690
+ bool scan8 = (thrInBlock < CR_BIN_SQR / 32);
691
+ U32 scan8Mask = __ballot_sync(~0u, scan8);
692
+ if (scan8)
693
+ {
694
+ volatile U32* v = &s_scanTemp[0][thrInBlock + 16];
695
+ U32 sum = v[0];
696
+ #if (CR_BIN_SQR > 1 * 32)
697
+ sum += v[-1]; __syncwarp(scan8Mask); v[0] = sum; __syncwarp(scan8Mask);
698
+ #endif
699
+ #if (CR_BIN_SQR > 2 * 32)
700
+ sum += v[-2]; __syncwarp(scan8Mask); v[0] = sum; __syncwarp(scan8Mask);
701
+ #endif
702
+ #if (CR_BIN_SQR > 4 * 32)
703
+ sum += v[-4]; __syncwarp(scan8Mask); v[0] = sum; __syncwarp(scan8Mask);
704
+ #endif
705
+
706
+ if (thrInBlock == CR_BIN_SQR / 32 - 1)
707
+ s_firstActiveIdx = atomicAdd(&atomics.numActiveTiles, sum);
708
+ }
709
+
710
+ // Tile per thread: Output active tiles.
711
+
712
+ __syncthreads();
713
+
714
+ for (int tileInBin_base = 0; tileInBin_base < CR_BIN_SQR; tileInBin_base += CR_COARSE_WARPS * 32)
715
+ {
716
+ int tileInBin = tileInBin_base + thrInBlock;
717
+ bool act = (tileInBin < CR_BIN_SQR) && (s_tileStreamCurrOfs[tileInBin] >= 0);
718
+ U32 actMask = __ballot_sync(~0u, act);
719
+ if (act)
720
+ {
721
+ int activeIdx = s_firstActiveIdx;
722
+ activeIdx += s_scanTemp[0][(tileInBin >> 5) + 15];
723
+ activeIdx += __popc(actMask & getLaneMaskLt());
724
+ activeTiles[activeIdx] = binTileIdx + globalTileIdx(tileInBin, p.widthTiles);
725
+ }
726
+ }
727
+ }
728
+ }
729
+
730
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Constants.hpp ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+
11
+ //------------------------------------------------------------------------
12
+
13
+ #define CR_MAXVIEWPORT_LOG2 11 // ViewportSize / PixelSize.
14
+ #define CR_SUBPIXEL_LOG2 4 // PixelSize / SubpixelSize.
15
+
16
+ #define CR_MAXBINS_LOG2 4 // ViewportSize / BinSize.
17
+ #define CR_BIN_LOG2 4 // BinSize / TileSize.
18
+ #define CR_TILE_LOG2 3 // TileSize / PixelSize.
19
+
20
+ #define CR_COVER8X8_LUT_SIZE 768 // 64-bit entries.
21
+ #define CR_FLIPBIT_FLIP_Y 2
22
+ #define CR_FLIPBIT_FLIP_X 3
23
+ #define CR_FLIPBIT_SWAP_XY 4
24
+ #define CR_FLIPBIT_COMPL 5
25
+
26
+ #define CR_BIN_STREAMS_LOG2 4
27
+ #define CR_BIN_SEG_LOG2 9 // 32-bit entries.
28
+ #define CR_TILE_SEG_LOG2 5 // 32-bit entries.
29
+
30
+ #define CR_MAXSUBTRIS_LOG2 24 // Triangle structs. Dictated by CoarseRaster.
31
+ #define CR_COARSE_QUEUE_LOG2 10 // Triangles.
32
+
33
+ #define CR_SETUP_WARPS 2
34
+ #define CR_SETUP_OPT_BLOCKS 8
35
+ #define CR_BIN_WARPS 16
36
+ #define CR_COARSE_WARPS 16 // Must be a power of two.
37
+ #define CR_FINE_MAX_WARPS 20
38
+
39
+ #define CR_EMBED_IMAGE_PARAMS 32 // Number of per-image parameter structs embedded in kernel launch parameter block.
40
+
41
+ //------------------------------------------------------------------------
42
+
43
+ #define CR_MAXVIEWPORT_SIZE (1 << CR_MAXVIEWPORT_LOG2)
44
+ #define CR_SUBPIXEL_SIZE (1 << CR_SUBPIXEL_LOG2)
45
+ #define CR_SUBPIXEL_SQR (1 << (CR_SUBPIXEL_LOG2 * 2))
46
+
47
+ #define CR_MAXBINS_SIZE (1 << CR_MAXBINS_LOG2)
48
+ #define CR_MAXBINS_SQR (1 << (CR_MAXBINS_LOG2 * 2))
49
+ #define CR_BIN_SIZE (1 << CR_BIN_LOG2)
50
+ #define CR_BIN_SQR (1 << (CR_BIN_LOG2 * 2))
51
+
52
+ #define CR_MAXTILES_LOG2 (CR_MAXBINS_LOG2 + CR_BIN_LOG2)
53
+ #define CR_MAXTILES_SIZE (1 << CR_MAXTILES_LOG2)
54
+ #define CR_MAXTILES_SQR (1 << (CR_MAXTILES_LOG2 * 2))
55
+ #define CR_TILE_SIZE (1 << CR_TILE_LOG2)
56
+ #define CR_TILE_SQR (1 << (CR_TILE_LOG2 * 2))
57
+
58
+ #define CR_BIN_STREAMS_SIZE (1 << CR_BIN_STREAMS_LOG2)
59
+ #define CR_BIN_SEG_SIZE (1 << CR_BIN_SEG_LOG2)
60
+ #define CR_TILE_SEG_SIZE (1 << CR_TILE_SEG_LOG2)
61
+
62
+ #define CR_MAXSUBTRIS_SIZE (1 << CR_MAXSUBTRIS_LOG2)
63
+ #define CR_COARSE_QUEUE_SIZE (1 << CR_COARSE_QUEUE_LOG2)
64
+
65
+ //------------------------------------------------------------------------
66
+ // When evaluating interpolated Z pixel centers, we may introduce an error
67
+ // of (+-CR_LERP_ERROR) ULPs.
68
+
69
+ #define CR_LERP_ERROR(SAMPLES_LOG2) (2200u << (SAMPLES_LOG2))
70
+ #define CR_DEPTH_MIN CR_LERP_ERROR(3)
71
+ #define CR_DEPTH_MAX (CR_U32_MAX - CR_LERP_ERROR(3))
72
+
73
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/CudaRaster.cpp ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "Defs.hpp"
10
+ #include "../CudaRaster.hpp"
11
+ #include "RasterImpl.hpp"
12
+
13
+ using namespace CR;
14
+
15
+ //------------------------------------------------------------------------
16
+ // Stub interface implementation.
17
+ //------------------------------------------------------------------------
18
+
19
+ CudaRaster::CudaRaster()
20
+ {
21
+ m_impl = new RasterImpl();
22
+ }
23
+
24
+ CudaRaster::~CudaRaster()
25
+ {
26
+ delete m_impl;
27
+ }
28
+
29
+ void CudaRaster::setBufferSize(int width, int height, int numImages)
30
+ {
31
+ m_impl->setBufferSize(Vec3i(width, height, numImages));
32
+ }
33
+
34
+ void CudaRaster::setViewport(int width, int height, int offsetX, int offsetY)
35
+ {
36
+ m_impl->setViewport(Vec2i(width, height), Vec2i(offsetX, offsetY));
37
+ }
38
+
39
+ void CudaRaster::setRenderModeFlags(U32 flags)
40
+ {
41
+ m_impl->setRenderModeFlags(flags);
42
+ }
43
+
44
+ void CudaRaster::deferredClear(U32 clearColor)
45
+ {
46
+ m_impl->deferredClear(clearColor);
47
+ }
48
+
49
+ void CudaRaster::setVertexBuffer(void* vertices, int numVertices)
50
+ {
51
+ m_impl->setVertexBuffer(vertices, numVertices);
52
+ }
53
+
54
+ void CudaRaster::setIndexBuffer(void* indices, int numTriangles)
55
+ {
56
+ m_impl->setIndexBuffer(indices, numTriangles);
57
+ }
58
+
59
+ bool CudaRaster::drawTriangles(const int* ranges, bool peel, cudaStream_t stream)
60
+ {
61
+ return m_impl->drawTriangles((const Vec2i*)ranges, peel, stream);
62
+ }
63
+
64
+ void* CudaRaster::getColorBuffer(void)
65
+ {
66
+ return m_impl->getColorBuffer();
67
+ }
68
+
69
+ void* CudaRaster::getDepthBuffer(void)
70
+ {
71
+ return m_impl->getDepthBuffer();
72
+ }
73
+
74
+ void CudaRaster::swapDepthAndPeel(void)
75
+ {
76
+ m_impl->swapDepthAndPeel();
77
+ }
78
+
79
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Defs.hpp ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+ #include <cuda_runtime.h>
11
+ #include <cstdint>
12
+
13
+ namespace CR
14
+ {
15
+ //------------------------------------------------------------------------
16
+
17
+ #ifndef NULL
18
+ # define NULL 0
19
+ #endif
20
+
21
+ #ifdef __CUDACC__
22
+ # define CR_CUDA 1
23
+ #else
24
+ # define CR_CUDA 0
25
+ #endif
26
+
27
+ #if CR_CUDA
28
+ # define CR_CUDA_FUNC __device__ __inline__
29
+ # define CR_CUDA_CONST __constant__
30
+ #else
31
+ # define CR_CUDA_FUNC inline
32
+ # define CR_CUDA_CONST static const
33
+ #endif
34
+
35
+ #define CR_UNREF(X) ((void)(X))
36
+ #define CR_ARRAY_SIZE(X) ((int)(sizeof(X) / sizeof((X)[0])))
37
+
38
+ //------------------------------------------------------------------------
39
+
40
+ typedef uint8_t U8;
41
+ typedef uint16_t U16;
42
+ typedef uint32_t U32;
43
+ typedef uint64_t U64;
44
+ typedef int8_t S8;
45
+ typedef int16_t S16;
46
+ typedef int32_t S32;
47
+ typedef int64_t S64;
48
+ typedef float F32;
49
+ typedef double F64;
50
+ typedef void (*FuncPtr)(void);
51
+
52
+ //------------------------------------------------------------------------
53
+
54
+ #define CR_U32_MAX (0xFFFFFFFFu)
55
+ #define CR_S32_MIN (~0x7FFFFFFF)
56
+ #define CR_S32_MAX (0x7FFFFFFF)
57
+ #define CR_U64_MAX ((U64)(S64)-1)
58
+ #define CR_S64_MIN ((S64)-1 << 63)
59
+ #define CR_S64_MAX (~((S64)-1 << 63))
60
+ #define CR_F32_MIN (1.175494351e-38f)
61
+ #define CR_F32_MAX (3.402823466e+38f)
62
+ #define CR_F64_MIN (2.2250738585072014e-308)
63
+ #define CR_F64_MAX (1.7976931348623158e+308)
64
+
65
+ //------------------------------------------------------------------------
66
+ // Misc types.
67
+
68
+ class Vec2i
69
+ {
70
+ public:
71
+ Vec2i(int x_, int y_) : x(x_), y(y_) {}
72
+ int x, y;
73
+ };
74
+
75
+ class Vec3i
76
+ {
77
+ public:
78
+ Vec3i(int x_, int y_, int z_) : x(x_), y(y_), z(z_) {}
79
+ int x, y, z;
80
+ };
81
+
82
+ //------------------------------------------------------------------------
83
+ // CUDA utilities.
84
+
85
+ #if CR_CUDA
86
+ # define globalThreadIdx (threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y)))
87
+ #endif
88
+
89
+ //------------------------------------------------------------------------
90
+ } // namespace CR
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/FineRaster.inl ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ //------------------------------------------------------------------------
10
+ // Utility funcs.
11
+ //------------------------------------------------------------------------
12
+
13
+ __device__ __inline__ void initTileZMax(U32& tileZMax, bool& tileZUpd, volatile U32* tileDepth)
14
+ {
15
+ tileZMax = CR_DEPTH_MAX;
16
+ tileZUpd = (::min(tileDepth[threadIdx.x], tileDepth[threadIdx.x + 32]) < tileZMax);
17
+ }
18
+
19
+ __device__ __inline__ void updateTileZMax(U32& tileZMax, bool& tileZUpd, volatile U32* tileDepth, volatile U32* temp)
20
+ {
21
+ // Entry is warp-coherent.
22
+ if (__any_sync(~0u, tileZUpd))
23
+ {
24
+ U32 z = ::max(tileDepth[threadIdx.x], tileDepth[threadIdx.x + 32]); __syncwarp();
25
+ temp[threadIdx.x + 16] = z; __syncwarp();
26
+ z = ::max(z, temp[threadIdx.x + 16 - 1]); __syncwarp(); temp[threadIdx.x + 16] = z; __syncwarp();
27
+ z = ::max(z, temp[threadIdx.x + 16 - 2]); __syncwarp(); temp[threadIdx.x + 16] = z; __syncwarp();
28
+ z = ::max(z, temp[threadIdx.x + 16 - 4]); __syncwarp(); temp[threadIdx.x + 16] = z; __syncwarp();
29
+ z = ::max(z, temp[threadIdx.x + 16 - 8]); __syncwarp(); temp[threadIdx.x + 16] = z; __syncwarp();
30
+ z = ::max(z, temp[threadIdx.x + 16 - 16]); __syncwarp(); temp[threadIdx.x + 16] = z; __syncwarp();
31
+ tileZMax = temp[47];
32
+ tileZUpd = false;
33
+ }
34
+ }
35
+
36
+ //------------------------------------------------------------------------
37
+
38
+ __device__ __inline__ void getTriangle(const CRParams& p, S32& triIdx, S32& dataIdx, uint4& triHeader, S32& segment)
39
+ {
40
+ const CRTriangleHeader* triHeaderPtr = (const CRTriangleHeader*)p.triHeader + blockIdx.z * p.maxSubtris;;
41
+ const S32* tileSegData = (const S32*)p.tileSegData + p.maxTileSegs * CR_TILE_SEG_SIZE * blockIdx.z;
42
+ const S32* tileSegNext = (const S32*)p.tileSegNext + p.maxTileSegs * blockIdx.z;
43
+ const S32* tileSegCount = (const S32*)p.tileSegCount + p.maxTileSegs * blockIdx.z;
44
+
45
+ if (threadIdx.x >= tileSegCount[segment])
46
+ {
47
+ triIdx = -1;
48
+ dataIdx = -1;
49
+ }
50
+ else
51
+ {
52
+ int subtriIdx = tileSegData[segment * CR_TILE_SEG_SIZE + threadIdx.x];
53
+ triIdx = subtriIdx >> 3;
54
+ dataIdx = triIdx;
55
+ subtriIdx &= 7;
56
+ if (subtriIdx != 7)
57
+ dataIdx = triHeaderPtr[triIdx].misc + subtriIdx;
58
+ triHeader = *((uint4*)triHeaderPtr + dataIdx);
59
+ }
60
+
61
+ // advance to next segment
62
+ segment = tileSegNext[segment];
63
+ }
64
+
65
+ //------------------------------------------------------------------------
66
+
67
+ __device__ __inline__ bool earlyZCull(uint4 triHeader, U32 tileZMax)
68
+ {
69
+ U32 zmin = triHeader.w & 0xFFFFF000;
70
+ return (zmin > tileZMax);
71
+ }
72
+
73
+ //------------------------------------------------------------------------
74
+
75
+ __device__ __inline__ U64 trianglePixelCoverage(const CRParams& p, const uint4& triHeader, int tileX, int tileY, volatile U64* s_cover8x8_lut)
76
+ {
77
+ int baseX = (tileX << (CR_TILE_LOG2 + CR_SUBPIXEL_LOG2)) - ((p.widthPixelsVp - 1) << (CR_SUBPIXEL_LOG2 - 1));
78
+ int baseY = (tileY << (CR_TILE_LOG2 + CR_SUBPIXEL_LOG2)) - ((p.heightPixelsVp - 1) << (CR_SUBPIXEL_LOG2 - 1));
79
+
80
+ // extract S16 vertex positions while subtracting tile coordinates
81
+ S32 v0x = sub_s16lo_s16lo(triHeader.x, baseX);
82
+ S32 v0y = sub_s16hi_s16lo(triHeader.x, baseY);
83
+ S32 v01x = sub_s16lo_s16lo(triHeader.y, triHeader.x);
84
+ S32 v01y = sub_s16hi_s16hi(triHeader.y, triHeader.x);
85
+ S32 v20x = sub_s16lo_s16lo(triHeader.x, triHeader.z);
86
+ S32 v20y = sub_s16hi_s16hi(triHeader.x, triHeader.z);
87
+
88
+ // extract flipbits
89
+ U32 f01 = (triHeader.w >> 6) & 0x3C;
90
+ U32 f12 = (triHeader.w >> 2) & 0x3C;
91
+ U32 f20 = (triHeader.w << 2) & 0x3C;
92
+
93
+ // compute per-edge coverage masks
94
+ U64 c01, c12, c20;
95
+ c01 = cover8x8_exact_fast(v0x, v0y, v01x, v01y, f01, s_cover8x8_lut);
96
+ c12 = cover8x8_exact_fast(v0x + v01x, v0y + v01y, -v01x - v20x, -v01y - v20y, f12, s_cover8x8_lut);
97
+ c20 = cover8x8_exact_fast(v0x, v0y, v20x, v20y, f20, s_cover8x8_lut);
98
+
99
+ // combine masks
100
+ return c01 & c12 & c20;
101
+ }
102
+
103
+ //------------------------------------------------------------------------
104
+
105
+ __device__ __inline__ U32 scan32_value(U32 value, volatile U32* temp)
106
+ {
107
+ __syncwarp();
108
+ temp[threadIdx.x + 16] = value; __syncwarp();
109
+ value += temp[threadIdx.x + 16 - 1]; __syncwarp(); temp[threadIdx.x + 16] = value; __syncwarp();
110
+ value += temp[threadIdx.x + 16 - 2]; __syncwarp(); temp[threadIdx.x + 16] = value; __syncwarp();
111
+ value += temp[threadIdx.x + 16 - 4]; __syncwarp(); temp[threadIdx.x + 16] = value; __syncwarp();
112
+ value += temp[threadIdx.x + 16 - 8]; __syncwarp(); temp[threadIdx.x + 16] = value; __syncwarp();
113
+ value += temp[threadIdx.x + 16 - 16]; __syncwarp(); temp[threadIdx.x + 16] = value; __syncwarp();
114
+ return value;
115
+ }
116
+
117
+ __device__ __inline__ volatile const U32& scan32_total(volatile U32* temp)
118
+ {
119
+ return temp[47];
120
+ }
121
+
122
+ //------------------------------------------------------------------------
123
+
124
+ __device__ __inline__ S32 findBit(U64 mask, int idx)
125
+ {
126
+ U32 x = getLo(mask);
127
+ int pop = __popc(x);
128
+ bool p = (pop <= idx);
129
+ if (p) x = getHi(mask);
130
+ if (p) idx -= pop;
131
+ int bit = p ? 32 : 0;
132
+
133
+ pop = __popc(x & 0x0000ffffu);
134
+ p = (pop <= idx);
135
+ if (p) x >>= 16;
136
+ if (p) bit += 16;
137
+ if (p) idx -= pop;
138
+
139
+ U32 tmp = x & 0x000000ffu;
140
+ pop = __popc(tmp);
141
+ p = (pop <= idx);
142
+ if (p) tmp = x & 0x0000ff00u;
143
+ if (p) idx -= pop;
144
+
145
+ return findLeadingOne(tmp) + bit - idx;
146
+ }
147
+
148
+ //------------------------------------------------------------------------
149
+ // Single-sample implementation.
150
+ //------------------------------------------------------------------------
151
+
152
+ __device__ __inline__ void executeROP(U32 color, U32 depth, volatile U32* pColor, volatile U32* pDepth, U32 ropMask)
153
+ {
154
+ atomicMin((U32*)pDepth, depth);
155
+ __syncwarp(ropMask);
156
+ bool act = (depth == *pDepth);
157
+ __syncwarp(ropMask);
158
+ U32 actMask = __ballot_sync(ropMask, act);
159
+ if (act)
160
+ {
161
+ *pDepth = 0;
162
+ __syncwarp(actMask);
163
+ atomicMax((U32*)pDepth, threadIdx.x);
164
+ __syncwarp(actMask);
165
+ if (*pDepth == threadIdx.x)
166
+ {
167
+ *pDepth = depth;
168
+ *pColor = color;
169
+ }
170
+ __syncwarp(actMask);
171
+ }
172
+ }
173
+
174
+ //------------------------------------------------------------------------
175
+
176
+ __device__ __inline__ void fineRasterImpl(const CRParams p)
177
+ {
178
+ // for 20 warps:
179
+ __shared__ volatile U64 s_cover8x8_lut[CR_COVER8X8_LUT_SIZE]; // 6KB
180
+ __shared__ volatile U32 s_tileColor [CR_FINE_MAX_WARPS][CR_TILE_SQR]; // 5KB
181
+ __shared__ volatile U32 s_tileDepth [CR_FINE_MAX_WARPS][CR_TILE_SQR]; // 5KB
182
+ __shared__ volatile U32 s_tilePeel [CR_FINE_MAX_WARPS][CR_TILE_SQR]; // 5KB
183
+ __shared__ volatile U32 s_triDataIdx [CR_FINE_MAX_WARPS][64]; // 5KB CRTriangleData index
184
+ __shared__ volatile U64 s_triangleCov [CR_FINE_MAX_WARPS][64]; // 10KB coverage mask
185
+ __shared__ volatile U32 s_triangleFrag[CR_FINE_MAX_WARPS][64]; // 5KB fragment index
186
+ __shared__ volatile U32 s_temp [CR_FINE_MAX_WARPS][80]; // 6.25KB
187
+ // = 47.25KB total
188
+
189
+ CRAtomics& atomics = p.atomics[blockIdx.z];
190
+ const CRTriangleData* triData = (const CRTriangleData*)p.triData + blockIdx.z * p.maxSubtris;
191
+
192
+ const S32* activeTiles = (const S32*)p.activeTiles + CR_MAXTILES_SQR * blockIdx.z;
193
+ const S32* tileFirstSeg = (const S32*)p.tileFirstSeg + CR_MAXTILES_SQR * blockIdx.z;
194
+
195
+ volatile U32* tileColor = s_tileColor[threadIdx.y];
196
+ volatile U32* tileDepth = s_tileDepth[threadIdx.y];
197
+ volatile U32* tilePeel = s_tilePeel[threadIdx.y];
198
+ volatile U32* triDataIdx = s_triDataIdx[threadIdx.y];
199
+ volatile U64* triangleCov = s_triangleCov[threadIdx.y];
200
+ volatile U32* triangleFrag = s_triangleFrag[threadIdx.y];
201
+ volatile U32* temp = s_temp[threadIdx.y];
202
+
203
+ if (atomics.numSubtris > p.maxSubtris || atomics.numBinSegs > p.maxBinSegs || atomics.numTileSegs > p.maxTileSegs)
204
+ return;
205
+
206
+ temp[threadIdx.x] = 0; // first 16 elements of temp are always zero
207
+ cover8x8_setupLUT(s_cover8x8_lut);
208
+ __syncthreads();
209
+
210
+ // loop over tiles
211
+ for (;;)
212
+ {
213
+ // pick a tile
214
+ if (threadIdx.x == 0)
215
+ temp[16] = atomicAdd(&atomics.fineCounter, 1);
216
+ __syncwarp();
217
+ int activeIdx = temp[16];
218
+ if (activeIdx >= atomics.numActiveTiles)
219
+ break;
220
+
221
+ int tileIdx = activeTiles[activeIdx];
222
+ S32 segment = tileFirstSeg[tileIdx];
223
+ int tileY = tileIdx / p.widthTiles;
224
+ int tileX = tileIdx - tileY * p.widthTiles;
225
+ int px = (tileX << CR_TILE_LOG2) + (threadIdx.x & (CR_TILE_SIZE - 1));
226
+ int py = (tileY << CR_TILE_LOG2) + (threadIdx.x >> CR_TILE_LOG2);
227
+
228
+ // initialize per-tile state
229
+ int triRead = 0, triWrite = 0;
230
+ int fragRead = 0, fragWrite = 0;
231
+ if (threadIdx.x == 0)
232
+ triangleFrag[63] = 0; // "previous triangle"
233
+
234
+ // deferred clear => clear tile
235
+ if (p.deferredClear)
236
+ {
237
+ tileColor[threadIdx.x] = p.clearColor;
238
+ tileDepth[threadIdx.x] = p.clearDepth;
239
+ tileColor[threadIdx.x + 32] = p.clearColor;
240
+ tileDepth[threadIdx.x + 32] = p.clearDepth;
241
+ }
242
+ else // otherwise => read tile from framebuffer
243
+ {
244
+ U32* pColor = (U32*)p.colorBuffer + p.strideX * p.strideY * blockIdx.z;
245
+ U32* pDepth = (U32*)p.depthBuffer + p.strideX * p.strideY * blockIdx.z;
246
+ tileColor[threadIdx.x] = pColor[px + p.strideX * py];
247
+ tileDepth[threadIdx.x] = pDepth[px + p.strideX * py];
248
+ tileColor[threadIdx.x + 32] = pColor[px + p.strideX * (py + 4)];
249
+ tileDepth[threadIdx.x + 32] = pDepth[px + p.strideX * (py + 4)];
250
+ }
251
+
252
+ // read peeling inputs if enabled
253
+ if (p.renderModeFlags & CudaRaster::RenderModeFlag_EnableDepthPeeling)
254
+ {
255
+ U32* pPeel = (U32*)p.peelBuffer + p.strideX * p.strideY * blockIdx.z;
256
+ tilePeel[threadIdx.x] = pPeel[px + p.strideX * py];
257
+ tilePeel[threadIdx.x + 32] = pPeel[px + p.strideX * (py + 4)];
258
+ }
259
+
260
+ U32 tileZMax;
261
+ bool tileZUpd;
262
+ initTileZMax(tileZMax, tileZUpd, tileDepth);
263
+
264
+ // process fragments
265
+ for(;;)
266
+ {
267
+ // need to queue more fragments?
268
+ if (fragWrite - fragRead < 32 && segment >= 0)
269
+ {
270
+ // update tile z - coherent over warp
271
+ updateTileZMax(tileZMax, tileZUpd, tileDepth, temp);
272
+
273
+ // read triangles
274
+ do
275
+ {
276
+ // read triangle index and data, advance to next segment
277
+ S32 triIdx, dataIdx;
278
+ uint4 triHeader;
279
+ getTriangle(p, triIdx, dataIdx, triHeader, segment);
280
+
281
+ // early z cull
282
+ if (triIdx >= 0 && earlyZCull(triHeader, tileZMax))
283
+ triIdx = -1;
284
+
285
+ // determine coverage
286
+ U64 coverage = trianglePixelCoverage(p, triHeader, tileX, tileY, s_cover8x8_lut);
287
+ S32 pop = (triIdx == -1) ? 0 : __popcll(coverage);
288
+
289
+ // fragment count scan
290
+ U32 frag = scan32_value(pop, temp);
291
+ frag += fragWrite; // frag now holds cumulative fragment count
292
+ fragWrite += scan32_total(temp);
293
+
294
+ // queue non-empty triangles
295
+ U32 goodMask = __ballot_sync(~0u, pop != 0);
296
+ if (pop != 0)
297
+ {
298
+ int idx = (triWrite + __popc(goodMask & getLaneMaskLt())) & 63;
299
+ triDataIdx [idx] = dataIdx;
300
+ triangleFrag[idx] = frag;
301
+ triangleCov [idx] = coverage;
302
+ }
303
+ triWrite += __popc(goodMask);
304
+ }
305
+ while (fragWrite - fragRead < 32 && segment >= 0);
306
+ }
307
+ __syncwarp();
308
+
309
+ // end of segment?
310
+ if (fragRead == fragWrite)
311
+ break;
312
+
313
+ // clear triangle boundaries
314
+ temp[threadIdx.x + 16] = 0;
315
+ __syncwarp();
316
+
317
+ // tag triangle boundaries
318
+ if (triRead + threadIdx.x < triWrite)
319
+ {
320
+ int idx = triangleFrag[(triRead + threadIdx.x) & 63] - fragRead;
321
+ if (idx <= 32)
322
+ temp[idx + 16 - 1] = 1;
323
+ }
324
+ __syncwarp();
325
+
326
+ int ropLaneIdx = threadIdx.x;
327
+ U32 boundaryMask = __ballot_sync(~0u, temp[ropLaneIdx + 16]);
328
+
329
+ // distribute fragments
330
+ bool hasFragment = (ropLaneIdx < fragWrite - fragRead);
331
+ U32 fragmentMask = __ballot_sync(~0u, hasFragment);
332
+ if (hasFragment)
333
+ {
334
+ int triBufIdx = (triRead + __popc(boundaryMask & getLaneMaskLt())) & 63;
335
+ int fragIdx = add_sub(fragRead, ropLaneIdx, triangleFrag[(triBufIdx - 1) & 63]);
336
+ U64 coverage = triangleCov[triBufIdx];
337
+ int pixelInTile = findBit(coverage, fragIdx);
338
+ int dataIdx = triDataIdx[triBufIdx];
339
+
340
+ // determine pixel position
341
+ U32 pixelX = (tileX << CR_TILE_LOG2) + (pixelInTile & 7);
342
+ U32 pixelY = (tileY << CR_TILE_LOG2) + (pixelInTile >> 3);
343
+
344
+ // depth test
345
+ U32 depth = 0;
346
+ uint4 td = *((uint4*)triData + dataIdx * (sizeof(CRTriangleData) >> 4));
347
+
348
+ depth = td.x * pixelX + td.y * pixelY + td.z;
349
+ bool zkill = (p.renderModeFlags & CudaRaster::RenderModeFlag_EnableDepthPeeling) && (depth <= tilePeel[pixelInTile]);
350
+ if (!zkill)
351
+ {
352
+ U32 oldDepth = tileDepth[pixelInTile];
353
+ if (depth > oldDepth)
354
+ zkill = true;
355
+ else if (oldDepth == tileZMax)
356
+ tileZUpd = true; // we are replacing previous zmax => need to update
357
+ }
358
+
359
+ U32 ropMask = __ballot_sync(fragmentMask, !zkill);
360
+ if (!zkill)
361
+ executeROP(td.w, depth, &tileColor[pixelInTile], &tileDepth[pixelInTile], ropMask);
362
+ }
363
+ // no need to sync, as next up is updateTileZMax that does internal warp sync
364
+
365
+ // update counters
366
+ fragRead = ::min(fragRead + 32, fragWrite);
367
+ triRead += __popc(boundaryMask);
368
+ }
369
+
370
+ // Write tile back to the framebuffer.
371
+ if (true)
372
+ {
373
+ int px = (tileX << CR_TILE_LOG2) + (threadIdx.x & (CR_TILE_SIZE - 1));
374
+ int py = (tileY << CR_TILE_LOG2) + (threadIdx.x >> CR_TILE_LOG2);
375
+ U32* pColor = (U32*)p.colorBuffer + p.strideX * p.strideY * blockIdx.z;
376
+ U32* pDepth = (U32*)p.depthBuffer + p.strideX * p.strideY * blockIdx.z;
377
+ pColor[px + p.strideX * py] = tileColor[threadIdx.x];
378
+ pDepth[px + p.strideX * py] = tileDepth[threadIdx.x];
379
+ pColor[px + p.strideX * (py + 4)] = tileColor[threadIdx.x + 32];
380
+ pDepth[px + p.strideX * (py + 4)] = tileDepth[threadIdx.x + 32];
381
+ }
382
+ }
383
+ }
384
+
385
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/PrivateDefs.hpp ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+ #include "Defs.hpp"
11
+ #include "Constants.hpp"
12
+
13
+ namespace CR
14
+ {
15
+ //------------------------------------------------------------------------
16
+ // Projected triangle.
17
+ //------------------------------------------------------------------------
18
+
19
+ struct CRTriangleHeader
20
+ {
21
+ S16 v0x; // Subpixels relative to viewport center. Valid if triSubtris = 1.
22
+ S16 v0y;
23
+ S16 v1x;
24
+ S16 v1y;
25
+ S16 v2x;
26
+ S16 v2y;
27
+
28
+ U32 misc; // triSubtris=1: (zmin:20, f01:4, f12:4, f20:4), triSubtris>=2: (subtriBase)
29
+ };
30
+
31
+ //------------------------------------------------------------------------
32
+
33
+ struct CRTriangleData
34
+ {
35
+ U32 zx; // zx * sampleX + zy * sampleY + zb = lerp(CR_DEPTH_MIN, CR_DEPTH_MAX, (clipZ / clipW + 1) / 2)
36
+ U32 zy;
37
+ U32 zb;
38
+ U32 id; // Triangle id.
39
+ };
40
+
41
+ //------------------------------------------------------------------------
42
+ // Device-side structures.
43
+ //------------------------------------------------------------------------
44
+
45
+ struct CRAtomics
46
+ {
47
+ // Setup.
48
+ S32 numSubtris; // = numTris
49
+
50
+ // Bin.
51
+ S32 binCounter; // = 0
52
+ S32 numBinSegs; // = 0
53
+
54
+ // Coarse.
55
+ S32 coarseCounter; // = 0
56
+ S32 numTileSegs; // = 0
57
+ S32 numActiveTiles; // = 0
58
+
59
+ // Fine.
60
+ S32 fineCounter; // = 0
61
+ };
62
+
63
+ //------------------------------------------------------------------------
64
+
65
+ struct CRImageParams
66
+ {
67
+ S32 triOffset; // First triangle index to draw.
68
+ S32 triCount; // Number of triangles to draw.
69
+ S32 binBatchSize; // Number of triangles per batch.
70
+ };
71
+
72
+ //------------------------------------------------------------------------
73
+
74
+ struct CRParams
75
+ {
76
+ // Common.
77
+
78
+ CRAtomics* atomics; // Work counters. Per-image.
79
+ S32 numImages; // Batch size.
80
+ S32 totalCount; // In range mode, total number of triangles to render.
81
+ S32 instanceMode; // 0 = range mode, 1 = instance mode.
82
+
83
+ S32 numVertices; // Number of vertices in input buffer, not counting multiples in instance mode.
84
+ S32 numTriangles; // Number of triangles in input buffer.
85
+ void* vertexBuffer; // numVertices * float4(x, y, z, w)
86
+ void* indexBuffer; // numTriangles * int3(vi0, vi1, vi2)
87
+
88
+ S32 widthPixels; // Render buffer size in pixels. Must be multiple of tile size (8x8).
89
+ S32 heightPixels;
90
+ S32 widthPixelsVp; // Viewport size in pixels.
91
+ S32 heightPixelsVp;
92
+ S32 widthBins; // widthPixels / CR_BIN_SIZE
93
+ S32 heightBins; // heightPixels / CR_BIN_SIZE
94
+ S32 numBins; // widthBins * heightBins
95
+
96
+ F32 xs; // Vertex position adjustments for tiled rendering.
97
+ F32 ys;
98
+ F32 xo;
99
+ F32 yo;
100
+
101
+ S32 widthTiles; // widthPixels / CR_TILE_SIZE
102
+ S32 heightTiles; // heightPixels / CR_TILE_SIZE
103
+ S32 numTiles; // widthTiles * heightTiles
104
+
105
+ U32 renderModeFlags;
106
+ S32 deferredClear; // 1 = Clear framebuffer before rendering triangles.
107
+ U32 clearColor;
108
+ U32 clearDepth;
109
+
110
+ // These are uniform across batch.
111
+
112
+ S32 maxSubtris;
113
+ S32 maxBinSegs;
114
+ S32 maxTileSegs;
115
+
116
+ // Setup output / bin input.
117
+
118
+ void* triSubtris; // maxSubtris * U8
119
+ void* triHeader; // maxSubtris * CRTriangleHeader
120
+ void* triData; // maxSubtris * CRTriangleData
121
+
122
+ // Bin output / coarse input.
123
+
124
+ void* binSegData; // maxBinSegs * CR_BIN_SEG_SIZE * S32
125
+ void* binSegNext; // maxBinSegs * S32
126
+ void* binSegCount; // maxBinSegs * S32
127
+ void* binFirstSeg; // CR_MAXBINS_SQR * CR_BIN_STREAMS_SIZE * (S32 segIdx), -1 = none
128
+ void* binTotal; // CR_MAXBINS_SQR * CR_BIN_STREAMS_SIZE * (S32 numTris)
129
+
130
+ // Coarse output / fine input.
131
+
132
+ void* tileSegData; // maxTileSegs * CR_TILE_SEG_SIZE * S32
133
+ void* tileSegNext; // maxTileSegs * S32
134
+ void* tileSegCount; // maxTileSegs * S32
135
+ void* activeTiles; // CR_MAXTILES_SQR * (S32 tileIdx)
136
+ void* tileFirstSeg; // CR_MAXTILES_SQR * (S32 segIdx), -1 = none
137
+
138
+ // Surface buffers. Outer tile offset is baked into pointers.
139
+
140
+ void* colorBuffer; // sizePixels.x * sizePixels.y * numImages * U32
141
+ void* depthBuffer; // sizePixels.x * sizePixels.y * numImages * U32
142
+ void* peelBuffer; // sizePixels.x * sizePixels.y * numImages * U32, only if peeling enabled.
143
+ S32 strideX; // horizontal size in pixels
144
+ S32 strideY; // vertical stride in pixels
145
+
146
+ // Per-image parameters for first images are embedded here to avoid extra memcpy for small batches.
147
+
148
+ CRImageParams imageParamsFirst[CR_EMBED_IMAGE_PARAMS];
149
+ const CRImageParams* imageParamsExtra; // After CR_EMBED_IMAGE_PARAMS.
150
+ };
151
+
152
+ //------------------------------------------------------------------------
153
+ }
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/RasterImpl.cpp ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "../../framework.h"
10
+ #include "PrivateDefs.hpp"
11
+ #include "Constants.hpp"
12
+ #include "RasterImpl.hpp"
13
+ #include <cuda_runtime.h>
14
+
15
+ using namespace CR;
16
+ using std::min;
17
+ using std::max;
18
+
19
+ //------------------------------------------------------------------------
20
+ // Kernel prototypes and variables.
21
+
22
+ void triangleSetupKernel (const CRParams p);
23
+ void binRasterKernel (const CRParams p);
24
+ void coarseRasterKernel (const CRParams p);
25
+ void fineRasterKernel (const CRParams p);
26
+
27
+ //------------------------------------------------------------------------
28
+
29
+ RasterImpl::RasterImpl(void)
30
+ : m_renderModeFlags (0),
31
+ m_deferredClear (false),
32
+ m_clearColor (0),
33
+ m_vertexPtr (NULL),
34
+ m_indexPtr (NULL),
35
+ m_numVertices (0),
36
+ m_numTriangles (0),
37
+ m_bufferSizesReported (0),
38
+
39
+ m_numImages (0),
40
+ m_bufferSizePixels (0, 0),
41
+ m_bufferSizeVp (0, 0),
42
+ m_sizePixels (0, 0),
43
+ m_sizeVp (0, 0),
44
+ m_offsetPixels (0, 0),
45
+ m_sizeBins (0, 0),
46
+ m_numBins (0),
47
+ m_sizeTiles (0, 0),
48
+ m_numTiles (0),
49
+
50
+ m_numSMs (1),
51
+ m_numCoarseBlocksPerSM (1),
52
+ m_numFineBlocksPerSM (1),
53
+ m_numFineWarpsPerBlock (1),
54
+
55
+ m_maxSubtris (1),
56
+ m_maxBinSegs (1),
57
+ m_maxTileSegs (1)
58
+ {
59
+ // Query relevant device attributes.
60
+
61
+ int currentDevice = 0;
62
+ NVDR_CHECK_CUDA_ERROR(cudaGetDevice(&currentDevice));
63
+ NVDR_CHECK_CUDA_ERROR(cudaDeviceGetAttribute(&m_numSMs, cudaDevAttrMultiProcessorCount, currentDevice));
64
+ cudaFuncAttributes attr;
65
+ NVDR_CHECK_CUDA_ERROR(cudaFuncGetAttributes(&attr, (void*)fineRasterKernel));
66
+ m_numFineWarpsPerBlock = min(attr.maxThreadsPerBlock / 32, CR_FINE_MAX_WARPS);
67
+ NVDR_CHECK_CUDA_ERROR(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&m_numCoarseBlocksPerSM, (void*)coarseRasterKernel, 32 * CR_COARSE_WARPS, 0));
68
+ NVDR_CHECK_CUDA_ERROR(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&m_numFineBlocksPerSM, (void*)fineRasterKernel, 32 * m_numFineWarpsPerBlock, 0));
69
+
70
+ // Setup functions.
71
+
72
+ NVDR_CHECK_CUDA_ERROR(cudaFuncSetCacheConfig((void*)triangleSetupKernel, cudaFuncCachePreferShared));
73
+ NVDR_CHECK_CUDA_ERROR(cudaFuncSetCacheConfig((void*)binRasterKernel, cudaFuncCachePreferShared));
74
+ NVDR_CHECK_CUDA_ERROR(cudaFuncSetCacheConfig((void*)coarseRasterKernel, cudaFuncCachePreferShared));
75
+ NVDR_CHECK_CUDA_ERROR(cudaFuncSetCacheConfig((void*)fineRasterKernel, cudaFuncCachePreferShared));
76
+ }
77
+
78
+ //------------------------------------------------------------------------
79
+
80
+ RasterImpl::~RasterImpl(void)
81
+ {
82
+ // Empty.
83
+ }
84
+
85
+ //------------------------------------------------------------------------
86
+
87
+ void RasterImpl::setBufferSize(Vec3i size)
88
+ {
89
+ // Internal buffer width and height must be divisible by tile size.
90
+ int w = (size.x + CR_TILE_SIZE - 1) & (-CR_TILE_SIZE);
91
+ int h = (size.y + CR_TILE_SIZE - 1) & (-CR_TILE_SIZE);
92
+
93
+ m_bufferSizePixels = Vec2i(w, h);
94
+ m_bufferSizeVp = Vec2i(size.x, size.y);
95
+ m_numImages = size.z;
96
+
97
+ m_colorBuffer.reset(w * h * size.z * sizeof(U32));
98
+ m_depthBuffer.reset(w * h * size.z * sizeof(U32));
99
+ }
100
+
101
+ //------------------------------------------------------------------------
102
+
103
+ void RasterImpl::setViewport(Vec2i size, Vec2i offset)
104
+ {
105
+ // Offset must be divisible by tile size.
106
+ NVDR_CHECK((offset.x & (CR_TILE_SIZE - 1)) == 0 && (offset.y & (CR_TILE_SIZE - 1)) == 0, "invalid viewport offset");
107
+
108
+ // Round internal viewport size to multiples of tile size.
109
+ int w = (size.x + CR_TILE_SIZE - 1) & (-CR_TILE_SIZE);
110
+ int h = (size.y + CR_TILE_SIZE - 1) & (-CR_TILE_SIZE);
111
+
112
+ m_sizePixels = Vec2i(w, h);
113
+ m_offsetPixels = offset;
114
+ m_sizeVp = Vec2i(size.x, size.y);
115
+ m_sizeTiles.x = m_sizePixels.x >> CR_TILE_LOG2;
116
+ m_sizeTiles.y = m_sizePixels.y >> CR_TILE_LOG2;
117
+ m_numTiles = m_sizeTiles.x * m_sizeTiles.y;
118
+ m_sizeBins.x = (m_sizeTiles.x + CR_BIN_SIZE - 1) >> CR_BIN_LOG2;
119
+ m_sizeBins.y = (m_sizeTiles.y + CR_BIN_SIZE - 1) >> CR_BIN_LOG2;
120
+ m_numBins = m_sizeBins.x * m_sizeBins.y;
121
+ }
122
+
123
+ void RasterImpl::swapDepthAndPeel(void)
124
+ {
125
+ m_peelBuffer.reset(m_depthBuffer.getSize()); // Ensure equal size and valid pointer.
126
+
127
+ void* tmp = m_depthBuffer.getPtr();
128
+ m_depthBuffer.setPtr(m_peelBuffer.getPtr());
129
+ m_peelBuffer.setPtr(tmp);
130
+ }
131
+
132
+ //------------------------------------------------------------------------
133
+
134
+ bool RasterImpl::drawTriangles(const Vec2i* ranges, bool peel, cudaStream_t stream)
135
+ {
136
+ bool instanceMode = (!ranges);
137
+
138
+ int maxSubtrisSlack = 4096; // x 81B = 324KB
139
+ int maxBinSegsSlack = 256; // x 2137B = 534KB
140
+ int maxTileSegsSlack = 4096; // x 136B = 544KB
141
+
142
+ // Resize atomics as needed.
143
+ m_crAtomics .grow(m_numImages * sizeof(CRAtomics));
144
+ m_crAtomicsHost.grow(m_numImages * sizeof(CRAtomics));
145
+
146
+ // Size of these buffers doesn't depend on input.
147
+ m_binFirstSeg .grow(m_numImages * CR_MAXBINS_SQR * CR_BIN_STREAMS_SIZE * sizeof(S32));
148
+ m_binTotal .grow(m_numImages * CR_MAXBINS_SQR * CR_BIN_STREAMS_SIZE * sizeof(S32));
149
+ m_activeTiles .grow(m_numImages * CR_MAXTILES_SQR * sizeof(S32));
150
+ m_tileFirstSeg .grow(m_numImages * CR_MAXTILES_SQR * sizeof(S32));
151
+
152
+ // Construct per-image parameters and determine worst-case buffer sizes.
153
+ m_crImageParamsHost.grow(m_numImages * sizeof(CRImageParams));
154
+ CRImageParams* imageParams = (CRImageParams*)m_crImageParamsHost.getPtr();
155
+ for (int i=0; i < m_numImages; i++)
156
+ {
157
+ CRImageParams& ip = imageParams[i];
158
+
159
+ int roundSize = CR_BIN_WARPS * 32;
160
+ int minBatches = CR_BIN_STREAMS_SIZE * 2;
161
+ int maxRounds = 32;
162
+
163
+ ip.triOffset = instanceMode ? 0 : ranges[i].x;
164
+ ip.triCount = instanceMode ? m_numTriangles : ranges[i].y;
165
+ ip.binBatchSize = min(max(ip.triCount / (roundSize * minBatches), 1), maxRounds) * roundSize;
166
+
167
+ m_maxSubtris = max(m_maxSubtris, min(ip.triCount + maxSubtrisSlack, CR_MAXSUBTRIS_SIZE));
168
+ m_maxBinSegs = max(m_maxBinSegs, max(m_numBins * CR_BIN_STREAMS_SIZE, (ip.triCount - 1) / CR_BIN_SEG_SIZE + 1) + maxBinSegsSlack);
169
+ m_maxTileSegs = max(m_maxTileSegs, max(m_numTiles, (ip.triCount - 1) / CR_TILE_SEG_SIZE + 1) + maxTileSegsSlack);
170
+ }
171
+
172
+ // Retry until successful.
173
+
174
+ for (;;)
175
+ {
176
+ // Allocate buffers.
177
+ m_triSubtris.reset(m_numImages * m_maxSubtris * sizeof(U8));
178
+ m_triHeader .reset(m_numImages * m_maxSubtris * sizeof(CRTriangleHeader));
179
+ m_triData .reset(m_numImages * m_maxSubtris * sizeof(CRTriangleData));
180
+
181
+ m_binSegData .reset(m_numImages * m_maxBinSegs * CR_BIN_SEG_SIZE * sizeof(S32));
182
+ m_binSegNext .reset(m_numImages * m_maxBinSegs * sizeof(S32));
183
+ m_binSegCount.reset(m_numImages * m_maxBinSegs * sizeof(S32));
184
+
185
+ m_tileSegData .reset(m_numImages * m_maxTileSegs * CR_TILE_SEG_SIZE * sizeof(S32));
186
+ m_tileSegNext .reset(m_numImages * m_maxTileSegs * sizeof(S32));
187
+ m_tileSegCount.reset(m_numImages * m_maxTileSegs * sizeof(S32));
188
+
189
+ // Report if buffers grow from last time.
190
+ size_t sizesTotal = getTotalBufferSizes();
191
+ if (sizesTotal > m_bufferSizesReported)
192
+ {
193
+ size_t sizesMB = ((sizesTotal - 1) >> 20) + 1; // Round up.
194
+ sizesMB = ((sizesMB + 9) / 10) * 10; // 10MB granularity enough in this day and age.
195
+ LOG(INFO) << "Internal buffers grown to " << sizesMB << " MB";
196
+ m_bufferSizesReported = sizesMB << 20;
197
+ }
198
+
199
+ // Launch stages. Blocks until everything is done.
200
+ launchStages(instanceMode, peel, stream);
201
+
202
+ // Peeling iteration cannot fail, so no point checking things further.
203
+ if (peel)
204
+ break;
205
+
206
+ // Atomics after coarse stage are now available.
207
+ CRAtomics* atomics = (CRAtomics*)m_crAtomicsHost.getPtr();
208
+
209
+ // Success?
210
+ bool failed = false;
211
+ for (int i=0; i < m_numImages; i++)
212
+ {
213
+ const CRAtomics& a = atomics[i];
214
+ failed = failed || (a.numSubtris > m_maxSubtris) || (a.numBinSegs > m_maxBinSegs) || (a.numTileSegs > m_maxTileSegs);
215
+ }
216
+ if (!failed)
217
+ break; // Success!
218
+
219
+ // If we were already at maximum capacity, no can do.
220
+ if (m_maxSubtris == CR_MAXSUBTRIS_SIZE)
221
+ return false;
222
+
223
+ // Enlarge buffers and try again.
224
+ for (int i=0; i < m_numImages; i++)
225
+ {
226
+ const CRAtomics& a = atomics[i];
227
+ m_maxSubtris = max(m_maxSubtris, min(a.numSubtris + maxSubtrisSlack, CR_MAXSUBTRIS_SIZE));
228
+ m_maxBinSegs = max(m_maxBinSegs, a.numBinSegs + maxBinSegsSlack);
229
+ m_maxTileSegs = max(m_maxTileSegs, a.numTileSegs + maxTileSegsSlack);
230
+ }
231
+ }
232
+
233
+ m_deferredClear = false;
234
+ return true; // Success.
235
+ }
236
+
237
+ //------------------------------------------------------------------------
238
+
239
+ size_t RasterImpl::getTotalBufferSizes(void) const
240
+ {
241
+ return
242
+ m_colorBuffer.getSize() + m_depthBuffer.getSize() + // Don't include atomics and image params.
243
+ m_triSubtris.getSize() + m_triHeader.getSize() + m_triData.getSize() +
244
+ m_binFirstSeg.getSize() + m_binTotal.getSize() + m_binSegData.getSize() + m_binSegNext.getSize() + m_binSegCount.getSize() +
245
+ m_activeTiles.getSize() + m_tileFirstSeg.getSize() + m_tileSegData.getSize() + m_tileSegNext.getSize() + m_tileSegCount.getSize();
246
+ }
247
+
248
+ //------------------------------------------------------------------------
249
+
250
+ void RasterImpl::launchStages(bool instanceMode, bool peel, cudaStream_t stream)
251
+ {
252
+ CRImageParams* imageParams = (CRImageParams*)m_crImageParamsHost.getPtr();
253
+
254
+ // Unless peeling, initialize atomics to mostly zero.
255
+ CRAtomics* atomics = (CRAtomics*)m_crAtomicsHost.getPtr();
256
+ if (!peel)
257
+ {
258
+ memset(atomics, 0, m_numImages * sizeof(CRAtomics));
259
+ for (int i=0; i < m_numImages; i++)
260
+ atomics[i].numSubtris = imageParams[i].triCount;
261
+ }
262
+
263
+ // Copy to device. If peeling, this is the state after coarse raster launch on first iteration.
264
+ NVDR_CHECK_CUDA_ERROR(cudaMemcpyAsync(m_crAtomics.getPtr(), atomics, m_numImages * sizeof(CRAtomics), cudaMemcpyHostToDevice, stream));
265
+
266
+ // Copy per-image parameters if there are more than fits in launch parameter block and we haven't done it already.
267
+ if (!peel && m_numImages > CR_EMBED_IMAGE_PARAMS)
268
+ {
269
+ int numImageParamsExtra = m_numImages - CR_EMBED_IMAGE_PARAMS;
270
+ m_crImageParamsExtra.grow(numImageParamsExtra * sizeof(CRImageParams));
271
+ NVDR_CHECK_CUDA_ERROR(cudaMemcpyAsync(m_crImageParamsExtra.getPtr(), imageParams + CR_EMBED_IMAGE_PARAMS, numImageParamsExtra * sizeof(CRImageParams), cudaMemcpyHostToDevice, stream));
272
+ }
273
+
274
+ // Set global parameters.
275
+ CRParams p;
276
+ {
277
+ p.atomics = (CRAtomics*)m_crAtomics.getPtr();
278
+ p.numImages = m_numImages;
279
+ p.totalCount = 0; // Only relevant in range mode.
280
+ p.instanceMode = instanceMode ? 1 : 0;
281
+
282
+ p.numVertices = m_numVertices;
283
+ p.numTriangles = m_numTriangles;
284
+ p.vertexBuffer = m_vertexPtr;
285
+ p.indexBuffer = m_indexPtr;
286
+
287
+ p.widthPixels = m_sizePixels.x;
288
+ p.heightPixels = m_sizePixels.y;
289
+ p.widthPixelsVp = m_sizeVp.x;
290
+ p.heightPixelsVp = m_sizeVp.y;
291
+ p.widthBins = m_sizeBins.x;
292
+ p.heightBins = m_sizeBins.y;
293
+ p.numBins = m_numBins;
294
+
295
+ p.xs = (float)m_bufferSizeVp.x / (float)m_sizeVp.x;
296
+ p.ys = (float)m_bufferSizeVp.y / (float)m_sizeVp.y;
297
+ p.xo = (float)(m_bufferSizeVp.x - m_sizeVp.x - 2 * m_offsetPixels.x) / (float)m_sizeVp.x;
298
+ p.yo = (float)(m_bufferSizeVp.y - m_sizeVp.y - 2 * m_offsetPixels.y) / (float)m_sizeVp.y;
299
+
300
+ p.widthTiles = m_sizeTiles.x;
301
+ p.heightTiles = m_sizeTiles.y;
302
+ p.numTiles = m_numTiles;
303
+
304
+ p.renderModeFlags = m_renderModeFlags;
305
+ p.deferredClear = m_deferredClear ? 1 : 0;
306
+ p.clearColor = m_clearColor;
307
+ p.clearDepth = CR_DEPTH_MAX;
308
+
309
+ p.maxSubtris = m_maxSubtris;
310
+ p.maxBinSegs = m_maxBinSegs;
311
+ p.maxTileSegs = m_maxTileSegs;
312
+
313
+ p.triSubtris = m_triSubtris.getPtr();
314
+ p.triHeader = m_triHeader.getPtr();
315
+ p.triData = m_triData.getPtr();
316
+ p.binSegData = m_binSegData.getPtr();
317
+ p.binSegNext = m_binSegNext.getPtr();
318
+ p.binSegCount = m_binSegCount.getPtr();
319
+ p.binFirstSeg = m_binFirstSeg.getPtr();
320
+ p.binTotal = m_binTotal.getPtr();
321
+ p.tileSegData = m_tileSegData.getPtr();
322
+ p.tileSegNext = m_tileSegNext.getPtr();
323
+ p.tileSegCount = m_tileSegCount.getPtr();
324
+ p.activeTiles = m_activeTiles.getPtr();
325
+ p.tileFirstSeg = m_tileFirstSeg.getPtr();
326
+
327
+ size_t byteOffset = ((size_t)m_offsetPixels.x + (size_t)m_offsetPixels.y * (size_t)p.strideX) * sizeof(U32);
328
+ p.colorBuffer = m_colorBuffer.getPtr(byteOffset);
329
+ p.depthBuffer = m_depthBuffer.getPtr(byteOffset);
330
+ p.peelBuffer = (m_renderModeFlags & CudaRaster::RenderModeFlag_EnableDepthPeeling) ? m_peelBuffer.getPtr(byteOffset) : 0;
331
+ p.strideX = m_bufferSizePixels.x;
332
+ p.strideY = m_bufferSizePixels.y;
333
+
334
+ memcpy(&p.imageParamsFirst, imageParams, min(m_numImages, CR_EMBED_IMAGE_PARAMS) * sizeof(CRImageParams));
335
+ p.imageParamsExtra = (CRImageParams*)m_crImageParamsExtra.getPtr();
336
+ }
337
+
338
+ // Setup block sizes.
339
+
340
+ dim3 brBlock(32, CR_BIN_WARPS);
341
+ dim3 crBlock(32, CR_COARSE_WARPS);
342
+ dim3 frBlock(32, m_numFineWarpsPerBlock);
343
+ void* args[] = {&p};
344
+
345
+ // Launch stages from setup to coarse and copy atomics to host only if this is not a single-tile peeling iteration.
346
+ if (!peel)
347
+ {
348
+ if (instanceMode)
349
+ {
350
+ int setupBlocks = (m_numTriangles - 1) / (32 * CR_SETUP_WARPS) + 1;
351
+ NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel((void*)triangleSetupKernel, dim3(setupBlocks, 1, m_numImages), dim3(32, CR_SETUP_WARPS), args, 0, stream));
352
+ }
353
+ else
354
+ {
355
+ for (int i=0; i < m_numImages; i++)
356
+ p.totalCount += imageParams[i].triCount;
357
+ int setupBlocks = (p.totalCount - 1) / (32 * CR_SETUP_WARPS) + 1;
358
+ NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel((void*)triangleSetupKernel, dim3(setupBlocks, 1, 1), dim3(32, CR_SETUP_WARPS), args, 0, stream));
359
+ }
360
+ NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel((void*)binRasterKernel, dim3(CR_BIN_STREAMS_SIZE, 1, m_numImages), brBlock, args, 0, stream));
361
+ NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel((void*)coarseRasterKernel, dim3(m_numSMs * m_numCoarseBlocksPerSM, 1, m_numImages), crBlock, args, 0, stream));
362
+ NVDR_CHECK_CUDA_ERROR(cudaMemcpyAsync(m_crAtomicsHost.getPtr(), m_crAtomics.getPtr(), sizeof(CRAtomics) * m_numImages, cudaMemcpyDeviceToHost, stream));
363
+ }
364
+
365
+ // Fine rasterizer is launched always.
366
+ NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel((void*)fineRasterKernel, dim3(m_numSMs * m_numFineBlocksPerSM, 1, m_numImages), frBlock, args, 0, stream));
367
+ NVDR_CHECK_CUDA_ERROR(cudaStreamSynchronize(stream));
368
+ }
369
+
370
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/RasterImpl.hpp ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+ #include "PrivateDefs.hpp"
11
+ #include "Buffer.hpp"
12
+ #include "../CudaRaster.hpp"
13
+
14
+ namespace CR
15
+ {
16
+ //------------------------------------------------------------------------
17
+
18
+ class RasterImpl
19
+ {
20
+ public:
21
+ RasterImpl (void);
22
+ ~RasterImpl (void);
23
+
24
+ void setBufferSize (Vec3i size);
25
+ void setViewport (Vec2i size, Vec2i offset);
26
+ void setRenderModeFlags (U32 flags) { m_renderModeFlags = flags; }
27
+ void deferredClear (U32 color) { m_deferredClear = true; m_clearColor = color; }
28
+ void setVertexBuffer (void* ptr, int numVertices) { m_vertexPtr = ptr; m_numVertices = numVertices; } // GPU pointer.
29
+ void setIndexBuffer (void* ptr, int numTriangles) { m_indexPtr = ptr; m_numTriangles = numTriangles; } // GPU pointer.
30
+ bool drawTriangles (const Vec2i* ranges, bool peel, cudaStream_t stream);
31
+ void* getColorBuffer (void) { return m_colorBuffer.getPtr(); } // GPU pointer.
32
+ void* getDepthBuffer (void) { return m_depthBuffer.getPtr(); } // GPU pointer.
33
+ void swapDepthAndPeel (void);
34
+ size_t getTotalBufferSizes (void) const;
35
+
36
+ private:
37
+ void launchStages (bool instanceMode, bool peel, cudaStream_t stream);
38
+
39
+ // State.
40
+
41
+ unsigned int m_renderModeFlags;
42
+ bool m_deferredClear;
43
+ unsigned int m_clearColor;
44
+ void* m_vertexPtr;
45
+ void* m_indexPtr;
46
+ int m_numVertices; // Input buffer size.
47
+ int m_numTriangles; // Input buffer size.
48
+ size_t m_bufferSizesReported; // Previously reported buffer sizes.
49
+
50
+ // Surfaces.
51
+
52
+ Buffer m_colorBuffer;
53
+ Buffer m_depthBuffer;
54
+ Buffer m_peelBuffer;
55
+ int m_numImages;
56
+ Vec2i m_bufferSizePixels; // Internal buffer size.
57
+ Vec2i m_bufferSizeVp; // Total viewport size.
58
+ Vec2i m_sizePixels; // Internal size at which all computation is done, buffers reserved, etc.
59
+ Vec2i m_sizeVp; // Size to which output will be cropped outside, determines viewport size.
60
+ Vec2i m_offsetPixels; // Viewport offset for tiled rendering.
61
+ Vec2i m_sizeBins;
62
+ S32 m_numBins;
63
+ Vec2i m_sizeTiles;
64
+ S32 m_numTiles;
65
+
66
+ // Launch sizes etc.
67
+
68
+ S32 m_numSMs;
69
+ S32 m_numCoarseBlocksPerSM;
70
+ S32 m_numFineBlocksPerSM;
71
+ S32 m_numFineWarpsPerBlock;
72
+
73
+ // Global intermediate buffers. Individual images have offsets to these.
74
+
75
+ Buffer m_crAtomics;
76
+ HostBuffer m_crAtomicsHost;
77
+ HostBuffer m_crImageParamsHost;
78
+ Buffer m_crImageParamsExtra;
79
+ Buffer m_triSubtris;
80
+ Buffer m_triHeader;
81
+ Buffer m_triData;
82
+ Buffer m_binFirstSeg;
83
+ Buffer m_binTotal;
84
+ Buffer m_binSegData;
85
+ Buffer m_binSegNext;
86
+ Buffer m_binSegCount;
87
+ Buffer m_activeTiles;
88
+ Buffer m_tileFirstSeg;
89
+ Buffer m_tileSegData;
90
+ Buffer m_tileSegNext;
91
+ Buffer m_tileSegCount;
92
+
93
+ // Actual buffer sizes.
94
+
95
+ S32 m_maxSubtris;
96
+ S32 m_maxBinSegs;
97
+ S32 m_maxTileSegs;
98
+ };
99
+
100
+ //------------------------------------------------------------------------
101
+ } // namespace CR
102
+
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/RasterImpl_.cu ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "../CudaRaster.hpp"
10
+ #include "PrivateDefs.hpp"
11
+ #include "Constants.hpp"
12
+ #include "Util.inl"
13
+
14
+ namespace CR
15
+ {
16
+
17
+ //------------------------------------------------------------------------
18
+ // Stage implementations.
19
+ //------------------------------------------------------------------------
20
+
21
+ #include "TriangleSetup.inl"
22
+ #include "BinRaster.inl"
23
+ #include "CoarseRaster.inl"
24
+ #include "FineRaster.inl"
25
+
26
+ }
27
+
28
+ //------------------------------------------------------------------------
29
+ // Stage entry points.
30
+ //------------------------------------------------------------------------
31
+
32
+ __global__ void __launch_bounds__(CR_SETUP_WARPS * 32, CR_SETUP_OPT_BLOCKS) triangleSetupKernel (const CR::CRParams p) { CR::triangleSetupImpl(p); }
33
+ __global__ void __launch_bounds__(CR_BIN_WARPS * 32, 1) binRasterKernel (const CR::CRParams p) { CR::binRasterImpl(p); }
34
+ __global__ void __launch_bounds__(CR_COARSE_WARPS * 32, 1) coarseRasterKernel (const CR::CRParams p) { CR::coarseRasterImpl(p); }
35
+ __global__ void __launch_bounds__(CR_FINE_MAX_WARPS * 32, 1) fineRasterKernel (const CR::CRParams p) { CR::fineRasterImpl(p); }
36
+
37
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/TriangleSetup.inl ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ //------------------------------------------------------------------------
10
+
11
+ __device__ __inline__ void snapTriangle(
12
+ const CRParams& p,
13
+ float4 v0, float4 v1, float4 v2,
14
+ int2& p0, int2& p1, int2& p2, float3& rcpW, int2& lo, int2& hi)
15
+ {
16
+ F32 viewScaleX = (F32)(p.widthPixelsVp << (CR_SUBPIXEL_LOG2 - 1));
17
+ F32 viewScaleY = (F32)(p.heightPixelsVp << (CR_SUBPIXEL_LOG2 - 1));
18
+ rcpW = make_float3(1.0f / v0.w, 1.0f / v1.w, 1.0f / v2.w);
19
+ p0 = make_int2(f32_to_s32_sat(v0.x * rcpW.x * viewScaleX), f32_to_s32_sat(v0.y * rcpW.x * viewScaleY));
20
+ p1 = make_int2(f32_to_s32_sat(v1.x * rcpW.y * viewScaleX), f32_to_s32_sat(v1.y * rcpW.y * viewScaleY));
21
+ p2 = make_int2(f32_to_s32_sat(v2.x * rcpW.z * viewScaleX), f32_to_s32_sat(v2.y * rcpW.z * viewScaleY));
22
+ lo = make_int2(min_min(p0.x, p1.x, p2.x), min_min(p0.y, p1.y, p2.y));
23
+ hi = make_int2(max_max(p0.x, p1.x, p2.x), max_max(p0.y, p1.y, p2.y));
24
+ }
25
+
26
+ //------------------------------------------------------------------------
27
+
28
+ __device__ __inline__ U32 cover8x8_selectFlips(S32 dx, S32 dy) // 10 instr
29
+ {
30
+ U32 flips = 0;
31
+ if (dy > 0 || (dy == 0 && dx <= 0))
32
+ flips ^= (1 << CR_FLIPBIT_FLIP_X) ^ (1 << CR_FLIPBIT_FLIP_Y) ^ (1 << CR_FLIPBIT_COMPL);
33
+ if (dx > 0)
34
+ flips ^= (1 << CR_FLIPBIT_FLIP_X) ^ (1 << CR_FLIPBIT_FLIP_Y);
35
+ if (::abs(dx) < ::abs(dy))
36
+ flips ^= (1 << CR_FLIPBIT_SWAP_XY) ^ (1 << CR_FLIPBIT_FLIP_Y);
37
+ return flips;
38
+ }
39
+
40
+ //------------------------------------------------------------------------
41
+
42
+ __device__ __inline__ bool prepareTriangle(
43
+ const CRParams& p,
44
+ int2 p0, int2 p1, int2 p2, int2 lo, int2 hi,
45
+ int2& d1, int2& d2, S32& area)
46
+ {
47
+ // Backfacing or degenerate => cull.
48
+
49
+ d1 = make_int2(p1.x - p0.x, p1.y - p0.y);
50
+ d2 = make_int2(p2.x - p0.x, p2.y - p0.y);
51
+ area = d1.x * d2.y - d1.y * d2.x;
52
+
53
+ if (area == 0)
54
+ return false; // Degenerate.
55
+
56
+ if (area < 0 && (p.renderModeFlags & CudaRaster::RenderModeFlag_EnableBackfaceCulling) != 0)
57
+ return false; // Backfacing.
58
+
59
+ // AABB falls between samples => cull.
60
+
61
+ int sampleSize = 1 << CR_SUBPIXEL_LOG2;
62
+ int biasX = (p.widthPixelsVp << (CR_SUBPIXEL_LOG2 - 1)) - (sampleSize >> 1);
63
+ int biasY = (p.heightPixelsVp << (CR_SUBPIXEL_LOG2 - 1)) - (sampleSize >> 1);
64
+ int lox = (int)add_add(lo.x, sampleSize - 1, biasX) & -sampleSize;
65
+ int loy = (int)add_add(lo.y, sampleSize - 1, biasY) & -sampleSize;
66
+ int hix = (hi.x + biasX) & -sampleSize;
67
+ int hiy = (hi.y + biasY) & -sampleSize;
68
+
69
+ if (lox > hix || loy > hiy)
70
+ return false; // Between pixels.
71
+
72
+ // AABB covers 1 or 2 samples => cull if they are not covered.
73
+
74
+ int diff = add_sub(hix, hiy, lox) - loy;
75
+ if (diff <= sampleSize)
76
+ {
77
+ int2 t0 = make_int2(add_sub(p0.x, biasX, lox), add_sub(p0.y, biasY, loy));
78
+ int2 t1 = make_int2(add_sub(p1.x, biasX, lox), add_sub(p1.y, biasY, loy));
79
+ int2 t2 = make_int2(add_sub(p2.x, biasX, lox), add_sub(p2.y, biasY, loy));
80
+ S32 e0 = t0.x * t1.y - t0.y * t1.x;
81
+ S32 e1 = t1.x * t2.y - t1.y * t2.x;
82
+ S32 e2 = t2.x * t0.y - t2.y * t0.x;
83
+ if (area < 0)
84
+ {
85
+ e0 = -e0;
86
+ e1 = -e1;
87
+ e2 = -e2;
88
+ }
89
+
90
+ if (e0 < 0 || e1 < 0 || e2 < 0)
91
+ {
92
+ if (diff == 0)
93
+ return false; // Between pixels.
94
+
95
+ t0 = make_int2(add_sub(p0.x, biasX, hix), add_sub(p0.y, biasY, hiy));
96
+ t1 = make_int2(add_sub(p1.x, biasX, hix), add_sub(p1.y, biasY, hiy));
97
+ t2 = make_int2(add_sub(p2.x, biasX, hix), add_sub(p2.y, biasY, hiy));
98
+ e0 = t0.x * t1.y - t0.y * t1.x;
99
+ e1 = t1.x * t2.y - t1.y * t2.x;
100
+ e2 = t2.x * t0.y - t2.y * t0.x;
101
+ if (area < 0)
102
+ {
103
+ e0 = -e0;
104
+ e1 = -e1;
105
+ e2 = -e2;
106
+ }
107
+
108
+ if (e0 < 0 || e1 < 0 || e2 < 0)
109
+ return false; // Between pixels.
110
+ }
111
+ }
112
+
113
+ // Otherwise => proceed to output the triangle.
114
+
115
+ return true; // Visible.
116
+ }
117
+
118
+ //------------------------------------------------------------------------
119
+
120
+ __device__ __inline__ void setupTriangle(
121
+ const CRParams& p,
122
+ CRTriangleHeader* th, CRTriangleData* td, int triId,
123
+ float v0z, float v1z, float v2z,
124
+ int2 p0, int2 p1, int2 p2, float3 rcpW,
125
+ int2 d1, int2 d2, S32 area)
126
+ {
127
+ // Swap vertices 1 and 2 if area is negative. Only executed if backface culling is
128
+ // disabled (if it is enabled, we never come here with area < 0).
129
+
130
+ if (area < 0)
131
+ {
132
+ swap(d1, d2);
133
+ swap(p1, p2);
134
+ swap(v1z, v2z);
135
+ swap(rcpW.y, rcpW.z);
136
+ area = -area;
137
+ }
138
+
139
+ int2 wv0;
140
+ wv0.x = p0.x + (p.widthPixelsVp << (CR_SUBPIXEL_LOG2 - 1));
141
+ wv0.y = p0.y + (p.heightPixelsVp << (CR_SUBPIXEL_LOG2 - 1));
142
+
143
+ // Setup depth plane equation.
144
+
145
+ F32 zcoef = (F32)(CR_DEPTH_MAX - CR_DEPTH_MIN) * 0.5f;
146
+ F32 zbias = (F32)(CR_DEPTH_MAX + CR_DEPTH_MIN) * 0.5f;
147
+ float3 zvert = make_float3(
148
+ (v0z * zcoef) * rcpW.x + zbias,
149
+ (v1z * zcoef) * rcpW.y + zbias,
150
+ (v2z * zcoef) * rcpW.z + zbias
151
+ );
152
+ int2 zv0 = make_int2(
153
+ wv0.x - (1 << (CR_SUBPIXEL_LOG2 - 1)),
154
+ wv0.y - (1 << (CR_SUBPIXEL_LOG2 - 1))
155
+ );
156
+ uint3 zpleq = setupPleq(zvert, zv0, d1, d2, 1.0f / (F32)area);
157
+
158
+ U32 zmin = f32_to_u32_sat(fminf(fminf(zvert.x, zvert.y), zvert.z) - (F32)CR_LERP_ERROR(0));
159
+
160
+ // Write CRTriangleData.
161
+
162
+ *(uint4*)td = make_uint4(zpleq.x, zpleq.y, zpleq.z, triId);
163
+
164
+ // Determine flipbits.
165
+
166
+ U32 f01 = cover8x8_selectFlips(d1.x, d1.y);
167
+ U32 f12 = cover8x8_selectFlips(d2.x - d1.x, d2.y - d1.y);
168
+ U32 f20 = cover8x8_selectFlips(-d2.x, -d2.y);
169
+
170
+ // Write CRTriangleHeader.
171
+
172
+ *(uint4*)th = make_uint4(
173
+ prmt(p0.x, p0.y, 0x5410),
174
+ prmt(p1.x, p1.y, 0x5410),
175
+ prmt(p2.x, p2.y, 0x5410),
176
+ (zmin & 0xfffff000u) | (f01 << 6) | (f12 << 2) | (f20 >> 2));
177
+ }
178
+
179
+ //------------------------------------------------------------------------
180
+
181
+ __device__ __inline__ void triangleSetupImpl(const CRParams p)
182
+ {
183
+ __shared__ F32 s_bary[CR_SETUP_WARPS * 32][18];
184
+ F32* bary = s_bary[threadIdx.x + threadIdx.y * 32];
185
+
186
+ // Compute task and image indices.
187
+
188
+ int taskIdx = threadIdx.x + 32 * (threadIdx.y + CR_SETUP_WARPS * blockIdx.x);
189
+ int imageIdx = 0;
190
+ if (p.instanceMode)
191
+ {
192
+ imageIdx = blockIdx.z;
193
+ if (taskIdx >= p.numTriangles)
194
+ return;
195
+ }
196
+ else
197
+ {
198
+ while (imageIdx < p.numImages)
199
+ {
200
+ int count = getImageParams(p, imageIdx).triCount;
201
+ if (taskIdx < count)
202
+ break;
203
+ taskIdx -= count;
204
+ imageIdx += 1;
205
+ }
206
+ if (imageIdx == p.numImages)
207
+ return;
208
+ }
209
+
210
+ // Per-image data structures.
211
+
212
+ const CRImageParams& ip = getImageParams(p, imageIdx);
213
+ CRAtomics& atomics = p.atomics[imageIdx];
214
+
215
+ const int* indexBuffer = (const int*)p.indexBuffer;
216
+ U8* triSubtris = (U8*)p.triSubtris + imageIdx * p.maxSubtris;
217
+ CRTriangleHeader* triHeader = (CRTriangleHeader*)p.triHeader + imageIdx * p.maxSubtris;
218
+ CRTriangleData* triData = (CRTriangleData*)p.triData + imageIdx * p.maxSubtris;
219
+
220
+ // Determine triangle index.
221
+
222
+ int triIdx = taskIdx;
223
+ if (!p.instanceMode)
224
+ triIdx += ip.triOffset;
225
+
226
+ // Read vertex indices.
227
+
228
+ if ((U32)triIdx >= (U32)p.numTriangles)
229
+ {
230
+ // Bad triangle index.
231
+ triSubtris[taskIdx] = 0;
232
+ return;
233
+ }
234
+
235
+ uint4 vidx;
236
+ vidx.x = indexBuffer[triIdx * 3 + 0];
237
+ vidx.y = indexBuffer[triIdx * 3 + 1];
238
+ vidx.z = indexBuffer[triIdx * 3 + 2];
239
+ vidx.w = triIdx + 1; // Triangle index.
240
+
241
+ if (vidx.x >= (U32)p.numVertices ||
242
+ vidx.y >= (U32)p.numVertices ||
243
+ vidx.z >= (U32)p.numVertices)
244
+ {
245
+ // Bad vertex index.
246
+ triSubtris[taskIdx] = 0;
247
+ return;
248
+ }
249
+
250
+ // Read vertex positions.
251
+
252
+ const float4* vertexBuffer = (const float4*)p.vertexBuffer;
253
+ if (p.instanceMode)
254
+ vertexBuffer += p.numVertices * imageIdx; // Instance offset.
255
+
256
+ float4 v0 = vertexBuffer[vidx.x];
257
+ float4 v1 = vertexBuffer[vidx.y];
258
+ float4 v2 = vertexBuffer[vidx.z];
259
+
260
+ // Adjust vertex positions according to current viewport size and offset.
261
+
262
+ v0.x = v0.x * p.xs + v0.w * p.xo;
263
+ v0.y = v0.y * p.ys + v0.w * p.yo;
264
+ v1.x = v1.x * p.xs + v1.w * p.xo;
265
+ v1.y = v1.y * p.ys + v1.w * p.yo;
266
+ v2.x = v2.x * p.xs + v2.w * p.xo;
267
+ v2.y = v2.y * p.ys + v2.w * p.yo;
268
+
269
+ // Outside view frustum => cull.
270
+
271
+ if (v0.w < fabsf(v0.x) | v0.w < fabsf(v0.y) | v0.w < fabsf(v0.z))
272
+ {
273
+ if ((v0.w < +v0.x & v1.w < +v1.x & v2.w < +v2.x) |
274
+ (v0.w < -v0.x & v1.w < -v1.x & v2.w < -v2.x) |
275
+ (v0.w < +v0.y & v1.w < +v1.y & v2.w < +v2.y) |
276
+ (v0.w < -v0.y & v1.w < -v1.y & v2.w < -v2.y) |
277
+ (v0.w < +v0.z & v1.w < +v1.z & v2.w < +v2.z) |
278
+ (v0.w < -v0.z & v1.w < -v1.z & v2.w < -v2.z))
279
+ {
280
+ triSubtris[taskIdx] = 0;
281
+ return;
282
+ }
283
+ }
284
+
285
+ // Inside depth range => try to snap vertices.
286
+
287
+ if (v0.w >= fabsf(v0.z) & v1.w >= fabsf(v1.z) & v2.w >= fabsf(v2.z))
288
+ {
289
+ // Inside S16 range and small enough => fast path.
290
+ // Note: aabbLimit comes from the fact that cover8x8
291
+ // does not support guardband with maximal viewport.
292
+
293
+ int2 p0, p1, p2, lo, hi;
294
+ float3 rcpW;
295
+
296
+ snapTriangle(p, v0, v1, v2, p0, p1, p2, rcpW, lo, hi);
297
+ S32 loxy = ::min(lo.x, lo.y);
298
+ S32 hixy = ::max(hi.x, hi.y);
299
+ S32 aabbLimit = (1 << (CR_MAXVIEWPORT_LOG2 + CR_SUBPIXEL_LOG2)) - 1;
300
+
301
+ if (loxy >= -32768 && hixy <= 32767 && hixy - loxy <= aabbLimit)
302
+ {
303
+ int2 d1, d2;
304
+ S32 area;
305
+ bool res = prepareTriangle(p, p0, p1, p2, lo, hi, d1, d2, area);
306
+ triSubtris[taskIdx] = res ? 1 : 0;
307
+
308
+ if (res)
309
+ setupTriangle(
310
+ p,
311
+ &triHeader[taskIdx], &triData[taskIdx], vidx.w,
312
+ v0.z, v1.z, v2.z,
313
+ p0, p1, p2, rcpW,
314
+ d1, d2, area);
315
+
316
+ return;
317
+ }
318
+ }
319
+
320
+ // Clip to view frustum.
321
+
322
+ float4 ov0 = v0;
323
+ float4 od1 = make_float4(v1.x - v0.x, v1.y - v0.y, v1.z - v0.z, v1.w - v0.w);
324
+ float4 od2 = make_float4(v2.x - v0.x, v2.y - v0.y, v2.z - v0.z, v2.w - v0.w);
325
+ int numVerts = clipTriangleWithFrustum(bary, &ov0.x, &v1.x, &v2.x, &od1.x, &od2.x);
326
+
327
+ // Count non-culled subtriangles.
328
+
329
+ v0.x = ov0.x + od1.x * bary[0] + od2.x * bary[1];
330
+ v0.y = ov0.y + od1.y * bary[0] + od2.y * bary[1];
331
+ v0.z = ov0.z + od1.z * bary[0] + od2.z * bary[1];
332
+ v0.w = ov0.w + od1.w * bary[0] + od2.w * bary[1];
333
+ v1.x = ov0.x + od1.x * bary[2] + od2.x * bary[3];
334
+ v1.y = ov0.y + od1.y * bary[2] + od2.y * bary[3];
335
+ v1.z = ov0.z + od1.z * bary[2] + od2.z * bary[3];
336
+ v1.w = ov0.w + od1.w * bary[2] + od2.w * bary[3];
337
+ float4 tv1 = v1;
338
+
339
+ int numSubtris = 0;
340
+ for (int i = 2; i < numVerts; i++)
341
+ {
342
+ v2.x = ov0.x + od1.x * bary[i * 2 + 0] + od2.x * bary[i * 2 + 1];
343
+ v2.y = ov0.y + od1.y * bary[i * 2 + 0] + od2.y * bary[i * 2 + 1];
344
+ v2.z = ov0.z + od1.z * bary[i * 2 + 0] + od2.z * bary[i * 2 + 1];
345
+ v2.w = ov0.w + od1.w * bary[i * 2 + 0] + od2.w * bary[i * 2 + 1];
346
+
347
+ int2 p0, p1, p2, lo, hi, d1, d2;
348
+ float3 rcpW;
349
+ S32 area;
350
+
351
+ snapTriangle(p, v0, v1, v2, p0, p1, p2, rcpW, lo, hi);
352
+ if (prepareTriangle(p, p0, p1, p2, lo, hi, d1, d2, area))
353
+ numSubtris++;
354
+
355
+ v1 = v2;
356
+ }
357
+
358
+ triSubtris[taskIdx] = numSubtris;
359
+
360
+ // Multiple subtriangles => allocate.
361
+
362
+ int subtriBase = taskIdx;
363
+ if (numSubtris > 1)
364
+ {
365
+ subtriBase = atomicAdd(&atomics.numSubtris, numSubtris);
366
+ triHeader[taskIdx].misc = subtriBase;
367
+ if (subtriBase + numSubtris > p.maxSubtris)
368
+ numVerts = 0;
369
+ }
370
+
371
+ // Setup subtriangles.
372
+
373
+ v1 = tv1;
374
+ for (int i = 2; i < numVerts; i++)
375
+ {
376
+ v2.x = ov0.x + od1.x * bary[i * 2 + 0] + od2.x * bary[i * 2 + 1];
377
+ v2.y = ov0.y + od1.y * bary[i * 2 + 0] + od2.y * bary[i * 2 + 1];
378
+ v2.z = ov0.z + od1.z * bary[i * 2 + 0] + od2.z * bary[i * 2 + 1];
379
+ v2.w = ov0.w + od1.w * bary[i * 2 + 0] + od2.w * bary[i * 2 + 1];
380
+
381
+ int2 p0, p1, p2, lo, hi, d1, d2;
382
+ float3 rcpW;
383
+ S32 area;
384
+
385
+ snapTriangle(p, v0, v1, v2, p0, p1, p2, rcpW, lo, hi);
386
+ if (prepareTriangle(p, p0, p1, p2, lo, hi, d1, d2, area))
387
+ {
388
+ setupTriangle(
389
+ p,
390
+ &triHeader[subtriBase], &triData[subtriBase], vidx.w,
391
+ v0.z, v1.z, v2.z,
392
+ p0, p1, p2, rcpW,
393
+ d1, d2, area);
394
+
395
+ subtriBase++;
396
+ }
397
+
398
+ v1 = v2;
399
+ }
400
+ }
401
+
402
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/cudaraster/impl/Util.inl ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "PrivateDefs.hpp"
10
+
11
+ namespace CR
12
+ {
13
+ //------------------------------------------------------------------------
14
+
15
+ template<class T> __device__ __inline__ void swap(T& a, T& b) { T t = a; a = b; b = t; }
16
+
17
+ __device__ __inline__ U32 getLo (U64 a) { return __double2loint(__longlong_as_double(a)); }
18
+ __device__ __inline__ S32 getLo (S64 a) { return __double2loint(__longlong_as_double(a)); }
19
+ __device__ __inline__ U32 getHi (U64 a) { return __double2hiint(__longlong_as_double(a)); }
20
+ __device__ __inline__ S32 getHi (S64 a) { return __double2hiint(__longlong_as_double(a)); }
21
+ __device__ __inline__ U64 combineLoHi (U32 lo, U32 hi) { return __double_as_longlong(__hiloint2double(hi, lo)); }
22
+ __device__ __inline__ S64 combineLoHi (S32 lo, S32 hi) { return __double_as_longlong(__hiloint2double(hi, lo)); }
23
+ __device__ __inline__ U32 getLaneMaskLt (void) { U32 r; asm("mov.u32 %0, %lanemask_lt;" : "=r"(r)); return r; }
24
+ __device__ __inline__ U32 getLaneMaskLe (void) { U32 r; asm("mov.u32 %0, %lanemask_le;" : "=r"(r)); return r; }
25
+ __device__ __inline__ U32 getLaneMaskGt (void) { U32 r; asm("mov.u32 %0, %lanemask_gt;" : "=r"(r)); return r; }
26
+ __device__ __inline__ U32 getLaneMaskGe (void) { U32 r; asm("mov.u32 %0, %lanemask_ge;" : "=r"(r)); return r; }
27
+ __device__ __inline__ int findLeadingOne (U32 v) { U32 r; asm("bfind.u32 %0, %1;" : "=r"(r) : "r"(v)); return r; }
28
+ __device__ __inline__ bool singleLane (void) { return ((::__ballot_sync(~0u, true) & getLaneMaskLt()) == 0); }
29
+
30
+ __device__ __inline__ void add_add_carry (U32& rlo, U32 alo, U32 blo, U32& rhi, U32 ahi, U32 bhi) { U64 r = combineLoHi(alo, ahi) + combineLoHi(blo, bhi); rlo = getLo(r); rhi = getHi(r); }
31
+ __device__ __inline__ S32 f32_to_s32_sat (F32 a) { S32 v; asm("cvt.rni.sat.s32.f32 %0, %1;" : "=r"(v) : "f"(a)); return v; }
32
+ __device__ __inline__ U32 f32_to_u32_sat (F32 a) { U32 v; asm("cvt.rni.sat.u32.f32 %0, %1;" : "=r"(v) : "f"(a)); return v; }
33
+ __device__ __inline__ U32 f32_to_u32_sat_rmi (F32 a) { U32 v; asm("cvt.rmi.sat.u32.f32 %0, %1;" : "=r"(v) : "f"(a)); return v; }
34
+ __device__ __inline__ U32 f32_to_u8_sat (F32 a) { U32 v; asm("cvt.rni.sat.u8.f32 %0, %1;" : "=r"(v) : "f"(a)); return v; }
35
+ __device__ __inline__ S64 f32_to_s64 (F32 a) { S64 v; asm("cvt.rni.s64.f32 %0, %1;" : "=l"(v) : "f"(a)); return v; }
36
+ __device__ __inline__ S32 add_s16lo_s16lo (S32 a, S32 b) { S32 v; asm("vadd.s32.s32.s32 %0, %1.h0, %2.h0;" : "=r"(v) : "r"(a), "r"(b)); return v; }
37
+ __device__ __inline__ S32 add_s16hi_s16lo (S32 a, S32 b) { S32 v; asm("vadd.s32.s32.s32 %0, %1.h1, %2.h0;" : "=r"(v) : "r"(a), "r"(b)); return v; }
38
+ __device__ __inline__ S32 add_s16lo_s16hi (S32 a, S32 b) { S32 v; asm("vadd.s32.s32.s32 %0, %1.h0, %2.h1;" : "=r"(v) : "r"(a), "r"(b)); return v; }
39
+ __device__ __inline__ S32 add_s16hi_s16hi (S32 a, S32 b) { S32 v; asm("vadd.s32.s32.s32 %0, %1.h1, %2.h1;" : "=r"(v) : "r"(a), "r"(b)); return v; }
40
+ __device__ __inline__ S32 sub_s16lo_s16lo (S32 a, S32 b) { S32 v; asm("vsub.s32.s32.s32 %0, %1.h0, %2.h0;" : "=r"(v) : "r"(a), "r"(b)); return v; }
41
+ __device__ __inline__ S32 sub_s16hi_s16lo (S32 a, S32 b) { S32 v; asm("vsub.s32.s32.s32 %0, %1.h1, %2.h0;" : "=r"(v) : "r"(a), "r"(b)); return v; }
42
+ __device__ __inline__ S32 sub_s16lo_s16hi (S32 a, S32 b) { S32 v; asm("vsub.s32.s32.s32 %0, %1.h0, %2.h1;" : "=r"(v) : "r"(a), "r"(b)); return v; }
43
+ __device__ __inline__ S32 sub_s16hi_s16hi (S32 a, S32 b) { S32 v; asm("vsub.s32.s32.s32 %0, %1.h1, %2.h1;" : "=r"(v) : "r"(a), "r"(b)); return v; }
44
+ __device__ __inline__ S32 sub_u16lo_u16lo (U32 a, U32 b) { S32 v; asm("vsub.s32.u32.u32 %0, %1.h0, %2.h0;" : "=r"(v) : "r"(a), "r"(b)); return v; }
45
+ __device__ __inline__ S32 sub_u16hi_u16lo (U32 a, U32 b) { S32 v; asm("vsub.s32.u32.u32 %0, %1.h1, %2.h0;" : "=r"(v) : "r"(a), "r"(b)); return v; }
46
+ __device__ __inline__ S32 sub_u16lo_u16hi (U32 a, U32 b) { S32 v; asm("vsub.s32.u32.u32 %0, %1.h0, %2.h1;" : "=r"(v) : "r"(a), "r"(b)); return v; }
47
+ __device__ __inline__ S32 sub_u16hi_u16hi (U32 a, U32 b) { S32 v; asm("vsub.s32.u32.u32 %0, %1.h1, %2.h1;" : "=r"(v) : "r"(a), "r"(b)); return v; }
48
+ __device__ __inline__ U32 add_b0 (U32 a, U32 b) { U32 v; asm("vadd.u32.u32.u32 %0, %1.b0, %2;" : "=r"(v) : "r"(a), "r"(b)); return v; }
49
+ __device__ __inline__ U32 add_b1 (U32 a, U32 b) { U32 v; asm("vadd.u32.u32.u32 %0, %1.b1, %2;" : "=r"(v) : "r"(a), "r"(b)); return v; }
50
+ __device__ __inline__ U32 add_b2 (U32 a, U32 b) { U32 v; asm("vadd.u32.u32.u32 %0, %1.b2, %2;" : "=r"(v) : "r"(a), "r"(b)); return v; }
51
+ __device__ __inline__ U32 add_b3 (U32 a, U32 b) { U32 v; asm("vadd.u32.u32.u32 %0, %1.b3, %2;" : "=r"(v) : "r"(a), "r"(b)); return v; }
52
+ __device__ __inline__ U32 vmad_b0 (U32 a, U32 b, U32 c) { U32 v; asm("vmad.u32.u32.u32 %0, %1.b0, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
53
+ __device__ __inline__ U32 vmad_b1 (U32 a, U32 b, U32 c) { U32 v; asm("vmad.u32.u32.u32 %0, %1.b1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
54
+ __device__ __inline__ U32 vmad_b2 (U32 a, U32 b, U32 c) { U32 v; asm("vmad.u32.u32.u32 %0, %1.b2, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
55
+ __device__ __inline__ U32 vmad_b3 (U32 a, U32 b, U32 c) { U32 v; asm("vmad.u32.u32.u32 %0, %1.b3, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
56
+ __device__ __inline__ U32 vmad_b0_b3 (U32 a, U32 b, U32 c) { U32 v; asm("vmad.u32.u32.u32 %0, %1.b0, %2.b3, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
57
+ __device__ __inline__ U32 vmad_b1_b3 (U32 a, U32 b, U32 c) { U32 v; asm("vmad.u32.u32.u32 %0, %1.b1, %2.b3, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
58
+ __device__ __inline__ U32 vmad_b2_b3 (U32 a, U32 b, U32 c) { U32 v; asm("vmad.u32.u32.u32 %0, %1.b2, %2.b3, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
59
+ __device__ __inline__ U32 vmad_b3_b3 (U32 a, U32 b, U32 c) { U32 v; asm("vmad.u32.u32.u32 %0, %1.b3, %2.b3, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
60
+ __device__ __inline__ U32 add_mask8 (U32 a, U32 b) { U32 v; U32 z=0; asm("vadd.u32.u32.u32 %0.b0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(z)); return v; }
61
+ __device__ __inline__ U32 sub_mask8 (U32 a, U32 b) { U32 v; U32 z=0; asm("vsub.u32.u32.u32 %0.b0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(z)); return v; }
62
+ __device__ __inline__ S32 max_max (S32 a, S32 b, S32 c) { S32 v; asm("vmax.s32.s32.s32.max %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
63
+ __device__ __inline__ S32 min_min (S32 a, S32 b, S32 c) { S32 v; asm("vmin.s32.s32.s32.min %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
64
+ __device__ __inline__ S32 max_add (S32 a, S32 b, S32 c) { S32 v; asm("vmax.s32.s32.s32.add %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
65
+ __device__ __inline__ S32 min_add (S32 a, S32 b, S32 c) { S32 v; asm("vmin.s32.s32.s32.add %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
66
+ __device__ __inline__ U32 add_add (U32 a, U32 b, U32 c) { U32 v; asm("vadd.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
67
+ __device__ __inline__ U32 sub_add (U32 a, U32 b, U32 c) { U32 v; asm("vsub.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
68
+ __device__ __inline__ U32 add_sub (U32 a, U32 b, U32 c) { U32 v; asm("vsub.u32.u32.u32.add %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(c), "r"(b)); return v; }
69
+ __device__ __inline__ S32 add_clamp_0_x (S32 a, S32 b, S32 c) { S32 v; asm("vadd.u32.s32.s32.sat.min %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
70
+ __device__ __inline__ S32 add_clamp_b0 (S32 a, S32 b, S32 c) { S32 v; asm("vadd.u32.s32.s32.sat %0.b0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
71
+ __device__ __inline__ S32 add_clamp_b2 (S32 a, S32 b, S32 c) { S32 v; asm("vadd.u32.s32.s32.sat %0.b2, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
72
+ __device__ __inline__ U32 prmt (U32 a, U32 b, U32 c) { U32 v; asm("prmt.b32 %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
73
+ __device__ __inline__ S32 u32lo_sext (U32 a) { U32 v; asm("cvt.s16.u32 %0, %1;" : "=r"(v) : "r"(a)); return v; }
74
+ __device__ __inline__ U32 slct (U32 a, U32 b, S32 c) { U32 v; asm("slct.u32.s32 %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
75
+ __device__ __inline__ S32 slct (S32 a, S32 b, S32 c) { S32 v; asm("slct.s32.s32 %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; }
76
+ __device__ __inline__ F32 slct (F32 a, F32 b, S32 c) { F32 v; asm("slct.f32.s32 %0, %1, %2, %3;" : "=f"(v) : "f"(a), "f"(b), "r"(c)); return v; }
77
+ __device__ __inline__ U32 isetge (S32 a, S32 b) { U32 v; asm("set.ge.u32.s32 %0, %1, %2;" : "=r"(v) : "r"(a), "r"(b)); return v; }
78
+ __device__ __inline__ F64 rcp_approx (F64 a) { F64 v; asm("rcp.approx.ftz.f64 %0, %1;" : "=d"(v) : "d"(a)); return v; }
79
+ __device__ __inline__ F32 fma_rm (F32 a, F32 b, F32 c) { F32 v; asm("fma.rm.f32 %0, %1, %2, %3;" : "=f"(v) : "f"(a), "f"(b), "f"(c)); return v; }
80
+ __device__ __inline__ U32 idiv_fast (U32 a, U32 b);
81
+
82
+ __device__ __inline__ uint3 setupPleq (float3 values, int2 v0, int2 d1, int2 d2, F32 areaRcp);
83
+
84
+ __device__ __inline__ void cover8x8_setupLUT (volatile U64* lut);
85
+ __device__ __inline__ U64 cover8x8_exact_fast (S32 ox, S32 oy, S32 dx, S32 dy, U32 flips, volatile const U64* lut); // Assumes viewport <= 2^11, subpixels <= 2^4, no guardband.
86
+ __device__ __inline__ U64 cover8x8_lookupMask (S64 yinit, U32 yinc, U32 flips, volatile const U64* lut);
87
+
88
+ __device__ __inline__ U64 cover8x8_exact_noLUT (S32 ox, S32 oy, S32 dx, S32 dy); // optimized reference implementation, does not require look-up table
89
+ __device__ __inline__ U64 cover8x8_conservative_noLUT (S32 ox, S32 oy, S32 dx, S32 dy);
90
+ __device__ __inline__ U64 cover8x8_generateMask_noLUT (S32 curr, S32 dx, S32 dy);
91
+
92
+ template <class T> __device__ __inline__ void sortShared(T* ptr, int numItems); // Assumes that numItems <= threadsInBlock. Must sync before & after the call.
93
+
94
+ __device__ __inline__ const CRImageParams& getImageParams(const CRParams& p, int idx)
95
+ {
96
+ return (idx < CR_EMBED_IMAGE_PARAMS) ? p.imageParamsFirst[idx] : p.imageParamsExtra[idx - CR_EMBED_IMAGE_PARAMS];
97
+ }
98
+
99
+ //------------------------------------------------------------------------
100
+
101
+ __device__ __inline__ int clipPolygonWithPlane(F32* baryOut, const F32* baryIn, int numIn, F32 v0, F32 v1, F32 v2)
102
+ {
103
+ int numOut = 0;
104
+ if (numIn >= 3)
105
+ {
106
+ int ai = (numIn - 1) * 2;
107
+ F32 av = v0 + v1 * baryIn[ai + 0] + v2 * baryIn[ai + 1];
108
+ for (int bi = 0; bi < numIn * 2; bi += 2)
109
+ {
110
+ F32 bv = v0 + v1 * baryIn[bi + 0] + v2 * baryIn[bi + 1];
111
+ if (av * bv < 0.0f)
112
+ {
113
+ F32 bc = av / (av - bv);
114
+ F32 ac = 1.0f - bc;
115
+ baryOut[numOut + 0] = baryIn[ai + 0] * ac + baryIn[bi + 0] * bc;
116
+ baryOut[numOut + 1] = baryIn[ai + 1] * ac + baryIn[bi + 1] * bc;
117
+ numOut += 2;
118
+ }
119
+ if (bv >= 0.0f)
120
+ {
121
+ baryOut[numOut + 0] = baryIn[bi + 0];
122
+ baryOut[numOut + 1] = baryIn[bi + 1];
123
+ numOut += 2;
124
+ }
125
+ ai = bi;
126
+ av = bv;
127
+ }
128
+ }
129
+ return (numOut >> 1);
130
+ }
131
+
132
+ //------------------------------------------------------------------------
133
+
134
+ __device__ __inline__ int clipTriangleWithFrustum(F32* bary, const F32* v0, const F32* v1, const F32* v2, const F32* d1, const F32* d2)
135
+ {
136
+ int num = 3;
137
+ bary[0] = 0.0f, bary[1] = 0.0f;
138
+ bary[2] = 1.0f, bary[3] = 0.0f;
139
+ bary[4] = 0.0f, bary[5] = 1.0f;
140
+
141
+ if ((v0[3] < fabsf(v0[0])) | (v1[3] < fabsf(v1[0])) | (v2[3] < fabsf(v2[0])))
142
+ {
143
+ F32 temp[18];
144
+ num = clipPolygonWithPlane(temp, bary, num, v0[3] + v0[0], d1[3] + d1[0], d2[3] + d2[0]);
145
+ num = clipPolygonWithPlane(bary, temp, num, v0[3] - v0[0], d1[3] - d1[0], d2[3] - d2[0]);
146
+ }
147
+ if ((v0[3] < fabsf(v0[1])) | (v1[3] < fabsf(v1[1])) | (v2[3] < fabsf(v2[1])))
148
+ {
149
+ F32 temp[18];
150
+ num = clipPolygonWithPlane(temp, bary, num, v0[3] + v0[1], d1[3] + d1[1], d2[3] + d2[1]);
151
+ num = clipPolygonWithPlane(bary, temp, num, v0[3] - v0[1], d1[3] - d1[1], d2[3] - d2[1]);
152
+ }
153
+ if ((v0[3] < fabsf(v0[2])) | (v1[3] < fabsf(v1[2])) | (v2[3] < fabsf(v2[2])))
154
+ {
155
+ F32 temp[18];
156
+ num = clipPolygonWithPlane(temp, bary, num, v0[3] + v0[2], d1[3] + d1[2], d2[3] + d2[2]);
157
+ num = clipPolygonWithPlane(bary, temp, num, v0[3] - v0[2], d1[3] - d1[2], d2[3] - d2[2]);
158
+ }
159
+ return num;
160
+ }
161
+
162
+ //------------------------------------------------------------------------
163
+
164
+ __device__ __inline__ U32 idiv_fast(U32 a, U32 b)
165
+ {
166
+ return f32_to_u32_sat_rmi(((F32)a + 0.5f) / (F32)b);
167
+ }
168
+
169
+ //------------------------------------------------------------------------
170
+
171
+ __device__ __inline__ U32 toABGR(float4 color)
172
+ {
173
+ // 11 instructions: 4*FFMA, 4*F2I, 3*PRMT
174
+ U32 x = f32_to_u32_sat_rmi(fma_rm(color.x, (1 << 24) * 255.0f, (1 << 24) * 0.5f));
175
+ U32 y = f32_to_u32_sat_rmi(fma_rm(color.y, (1 << 24) * 255.0f, (1 << 24) * 0.5f));
176
+ U32 z = f32_to_u32_sat_rmi(fma_rm(color.z, (1 << 24) * 255.0f, (1 << 24) * 0.5f));
177
+ U32 w = f32_to_u32_sat_rmi(fma_rm(color.w, (1 << 24) * 255.0f, (1 << 24) * 0.5f));
178
+ return prmt(prmt(x, y, 0x0073), prmt(z, w, 0x0073), 0x5410);
179
+ }
180
+
181
+ //------------------------------------------------------------------------
182
+ // v0 = subpixels relative to the bottom-left sampling point
183
+
184
+ __device__ __inline__ uint3 setupPleq(float3 values, int2 v0, int2 d1, int2 d2, F32 areaRcp)
185
+ {
186
+ F32 mx = fmaxf(fmaxf(values.x, values.y), values.z);
187
+ int sh = ::min(::max((__float_as_int(mx) >> 23) - (127 + 22), 0), 8);
188
+ S32 t0 = (U32)values.x >> sh;
189
+ S32 t1 = ((U32)values.y >> sh) - t0;
190
+ S32 t2 = ((U32)values.z >> sh) - t0;
191
+
192
+ U32 rcpMant = (__float_as_int(areaRcp) & 0x007FFFFF) | 0x00800000;
193
+ int rcpShift = (23 + 127) - (__float_as_int(areaRcp) >> 23);
194
+
195
+ uint3 pleq;
196
+ S64 xc = ((S64)t1 * d2.y - (S64)t2 * d1.y) * rcpMant;
197
+ S64 yc = ((S64)t2 * d1.x - (S64)t1 * d2.x) * rcpMant;
198
+ pleq.x = (U32)(xc >> (rcpShift - (sh + CR_SUBPIXEL_LOG2)));
199
+ pleq.y = (U32)(yc >> (rcpShift - (sh + CR_SUBPIXEL_LOG2)));
200
+
201
+ S32 centerX = (v0.x * 2 + min_min(d1.x, d2.x, 0) + max_max(d1.x, d2.x, 0)) >> (CR_SUBPIXEL_LOG2 + 1);
202
+ S32 centerY = (v0.y * 2 + min_min(d1.y, d2.y, 0) + max_max(d1.y, d2.y, 0)) >> (CR_SUBPIXEL_LOG2 + 1);
203
+ S32 vcx = v0.x - (centerX << CR_SUBPIXEL_LOG2);
204
+ S32 vcy = v0.y - (centerY << CR_SUBPIXEL_LOG2);
205
+
206
+ pleq.z = t0 << sh;
207
+ pleq.z -= (U32)(((xc >> 13) * vcx + (yc >> 13) * vcy) >> (rcpShift - (sh + 13)));
208
+ pleq.z -= pleq.x * centerX + pleq.y * centerY;
209
+ return pleq;
210
+ }
211
+
212
+ //------------------------------------------------------------------------
213
+
214
+ __device__ __inline__ void cover8x8_setupLUT(volatile U64* lut)
215
+ {
216
+ for (S32 lutIdx = threadIdx.x + blockDim.x * threadIdx.y; lutIdx < CR_COVER8X8_LUT_SIZE; lutIdx += blockDim.x * blockDim.y)
217
+ {
218
+ int half = (lutIdx < (12 << 5)) ? 0 : 1;
219
+ int yint = (lutIdx >> 5) - half * 12 - 3;
220
+ U32 shape = ((lutIdx >> 2) & 7) << (31 - 2);
221
+ S32 slctSwapXY = lutIdx << (31 - 1);
222
+ S32 slctNegX = lutIdx << (31 - 0);
223
+ S32 slctCompl = slctSwapXY ^ slctNegX;
224
+
225
+ U64 mask = 0;
226
+ int xlo = half * 4;
227
+ int xhi = xlo + 4;
228
+ for (int x = xlo; x < xhi; x++)
229
+ {
230
+ int ylo = slct(0, ::max(yint, 0), slctCompl);
231
+ int yhi = slct(::min(yint, 8), 8, slctCompl);
232
+ for (int y = ylo; y < yhi; y++)
233
+ {
234
+ int xx = slct(x, y, slctSwapXY);
235
+ int yy = slct(y, x, slctSwapXY);
236
+ xx = slct(xx, 7 - xx, slctNegX);
237
+ mask |= (U64)1 << (xx + yy * 8);
238
+ }
239
+ yint += shape >> 31;
240
+ shape <<= 1;
241
+ }
242
+ lut[lutIdx] = mask;
243
+ }
244
+ }
245
+
246
+ //------------------------------------------------------------------------
247
+
248
+ __device__ __inline__ U64 cover8x8_exact_fast(S32 ox, S32 oy, S32 dx, S32 dy, U32 flips, volatile const U64* lut) // 52 instr
249
+ {
250
+ F32 yinitBias = (F32)(1 << (31 - CR_MAXVIEWPORT_LOG2 - CR_SUBPIXEL_LOG2 * 2));
251
+ F32 yinitScale = (F32)(1 << (32 - CR_SUBPIXEL_LOG2));
252
+ F32 yincScale = 65536.0f * 65536.0f;
253
+
254
+ S32 slctFlipY = flips << (31 - CR_FLIPBIT_FLIP_Y);
255
+ S32 slctFlipX = flips << (31 - CR_FLIPBIT_FLIP_X);
256
+ S32 slctSwapXY = flips << (31 - CR_FLIPBIT_SWAP_XY);
257
+
258
+ // Evaluate cross product.
259
+
260
+ S32 t = ox * dy - oy * dx;
261
+ F32 det = (F32)slct(t, t - dy * (7 << CR_SUBPIXEL_LOG2), slctFlipX);
262
+ if (flips >= (1 << CR_FLIPBIT_COMPL))
263
+ det = -det;
264
+
265
+ // Represent Y as a function of X.
266
+
267
+ F32 xrcp = 1.0f / (F32)::abs(slct(dx, dy, slctSwapXY));
268
+ F32 yzero = det * yinitScale * xrcp + yinitBias;
269
+ S64 yinit = f32_to_s64(slct(yzero, -yzero, slctFlipY));
270
+ U32 yinc = f32_to_u32_sat((F32)::abs(slct(dy, dx, slctSwapXY)) * xrcp * yincScale);
271
+
272
+ // Lookup.
273
+
274
+ return cover8x8_lookupMask(yinit, yinc, flips, lut);
275
+ }
276
+
277
+ //------------------------------------------------------------------------
278
+
279
+ __device__ __inline__ U64 cover8x8_lookupMask(S64 yinit, U32 yinc, U32 flips, volatile const U64* lut)
280
+ {
281
+ // First half.
282
+
283
+ U32 yfrac = getLo(yinit);
284
+ U32 shape = add_clamp_0_x(getHi(yinit) + 4, 0, 11);
285
+ add_add_carry(yfrac, yfrac, yinc, shape, shape, shape);
286
+ add_add_carry(yfrac, yfrac, yinc, shape, shape, shape);
287
+ add_add_carry(yfrac, yfrac, yinc, shape, shape, shape);
288
+ int oct = flips & ((1 << CR_FLIPBIT_FLIP_X) | (1 << CR_FLIPBIT_SWAP_XY));
289
+ U64 mask = *(U64*)((U8*)lut + oct + (shape << 5));
290
+
291
+ // Second half.
292
+
293
+ add_add_carry(yfrac, yfrac, yinc, shape, shape, shape);
294
+ shape = add_clamp_0_x(getHi(yinit) + 4, __popc(shape & 15), 11);
295
+ add_add_carry(yfrac, yfrac, yinc, shape, shape, shape);
296
+ add_add_carry(yfrac, yfrac, yinc, shape, shape, shape);
297
+ add_add_carry(yfrac, yfrac, yinc, shape, shape, shape);
298
+ mask |= *(U64*)((U8*)lut + oct + (shape << 5) + (12 << 8));
299
+ return (flips >= (1 << CR_FLIPBIT_COMPL)) ? ~mask : mask;
300
+ }
301
+
302
+ //------------------------------------------------------------------------
303
+
304
+ __device__ __inline__ U64 cover8x8_exact_noLUT(S32 ox, S32 oy, S32 dx, S32 dy)
305
+ {
306
+ S32 curr = ox * dy - oy * dx;
307
+ if (dy > 0 || (dy == 0 && dx <= 0)) curr--; // exclusive
308
+ return cover8x8_generateMask_noLUT(curr, dx, dy);
309
+ }
310
+
311
+ //------------------------------------------------------------------------
312
+
313
+ __device__ __inline__ U64 cover8x8_conservative_noLUT(S32 ox, S32 oy, S32 dx, S32 dy)
314
+ {
315
+ S32 curr = ox * dy - oy * dx;
316
+ if (dy > 0 || (dy == 0 && dx <= 0)) curr--; // exclusive
317
+ curr += (::abs(dx) + ::abs(dy)) << (CR_SUBPIXEL_LOG2 - 1);
318
+ return cover8x8_generateMask_noLUT(curr, dx, dy);
319
+ }
320
+
321
+ //------------------------------------------------------------------------
322
+
323
+ __device__ __inline__ U64 cover8x8_generateMask_noLUT(S32 curr, S32 dx, S32 dy)
324
+ {
325
+ curr += (dx - dy) * (7 << CR_SUBPIXEL_LOG2);
326
+ S32 stepX = dy << (CR_SUBPIXEL_LOG2 + 1);
327
+ S32 stepYorig = -dx - dy * 7;
328
+ S32 stepY = stepYorig << (CR_SUBPIXEL_LOG2 + 1);
329
+
330
+ U32 hi = isetge(curr, 0);
331
+ U32 frac = curr + curr;
332
+ for (int i = 62; i >= 32; i--)
333
+ add_add_carry(frac, frac, ((i & 7) == 7) ? stepY : stepX, hi, hi, hi);
334
+
335
+ U32 lo = 0;
336
+ for (int i = 31; i >= 0; i--)
337
+ add_add_carry(frac, frac, ((i & 7) == 7) ? stepY : stepX, lo, lo, lo);
338
+
339
+ lo ^= lo >> 1, hi ^= hi >> 1;
340
+ lo ^= lo >> 2, hi ^= hi >> 2;
341
+ lo ^= lo >> 4, hi ^= hi >> 4;
342
+ lo ^= lo >> 8, hi ^= hi >> 8;
343
+ lo ^= lo >> 16, hi ^= hi >> 16;
344
+
345
+ if (dy < 0)
346
+ {
347
+ lo ^= 0x55AA55AA;
348
+ hi ^= 0x55AA55AA;
349
+ }
350
+ if (stepYorig < 0)
351
+ {
352
+ lo ^= 0xFF00FF00;
353
+ hi ^= 0x00FF00FF;
354
+ }
355
+ if ((hi & 1) != 0)
356
+ lo = ~lo;
357
+
358
+ return combineLoHi(lo, hi);
359
+ }
360
+
361
+ //------------------------------------------------------------------------
362
+
363
+ template <class T> __device__ __inline__ void sortShared(T* ptr, int numItems)
364
+ {
365
+ int thrInBlock = threadIdx.x + threadIdx.y * blockDim.x;
366
+ int range = 16;
367
+
368
+ // Use transposition sort within each 16-wide subrange.
369
+
370
+ int base = thrInBlock * 2;
371
+ bool act = (base < numItems - 1);
372
+ U32 actMask = __ballot_sync(~0u, act);
373
+ if (act)
374
+ {
375
+ bool tryOdd = (base < numItems - 2 && (~base & (range - 2)) != 0);
376
+ T mid = ptr[base + 1];
377
+
378
+ for (int iter = 0; iter < range; iter += 2)
379
+ {
380
+ // Evens.
381
+
382
+ T tmp = ptr[base + 0];
383
+ if (tmp > mid)
384
+ {
385
+ ptr[base + 0] = mid;
386
+ mid = tmp;
387
+ }
388
+ __syncwarp(actMask);
389
+
390
+ // Odds.
391
+
392
+ if (tryOdd)
393
+ {
394
+ tmp = ptr[base + 2];
395
+ if (mid > tmp)
396
+ {
397
+ ptr[base + 2] = mid;
398
+ mid = tmp;
399
+ }
400
+ }
401
+ __syncwarp(actMask);
402
+ }
403
+ ptr[base + 1] = mid;
404
+ }
405
+
406
+ // Multiple subranges => Merge hierarchically.
407
+
408
+ for (; range < numItems; range <<= 1)
409
+ {
410
+ // Assuming that we would insert the current item into the other
411
+ // subrange, use binary search to find the appropriate slot.
412
+
413
+ __syncthreads();
414
+
415
+ T item;
416
+ int slot;
417
+ if (thrInBlock < numItems)
418
+ {
419
+ item = ptr[thrInBlock];
420
+ slot = (thrInBlock & -range) ^ range;
421
+ if (slot < numItems)
422
+ {
423
+ T tmp = ptr[slot];
424
+ bool inclusive = ((thrInBlock & range) != 0);
425
+ if (tmp < item || (inclusive && tmp == item))
426
+ {
427
+ for (int step = (range >> 1); step != 0; step >>= 1)
428
+ {
429
+ int probe = slot + step;
430
+ if (probe < numItems)
431
+ {
432
+ tmp = ptr[probe];
433
+ if (tmp < item || (inclusive && tmp == item))
434
+ slot = probe;
435
+ }
436
+ }
437
+ slot++;
438
+ }
439
+ }
440
+ }
441
+
442
+ // Store the item at an appropriate place.
443
+
444
+ __syncthreads();
445
+
446
+ if (thrInBlock < numItems)
447
+ ptr[slot + (thrInBlock & (range * 2 - 1)) - range] = item;
448
+ }
449
+ }
450
+
451
+ //------------------------------------------------------------------------
452
+ }
extensions/nvdiffrast/nvdiffrast/common/framework.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+
11
+ // Framework-specific macros to enable code sharing.
12
+
13
+ //------------------------------------------------------------------------
14
+ // Tensorflow.
15
+
16
+ #ifdef NVDR_TENSORFLOW
17
+ #define EIGEN_USE_GPU
18
+ #include "tensorflow/core/framework/op.h"
19
+ #include "tensorflow/core/framework/op_kernel.h"
20
+ #include "tensorflow/core/framework/shape_inference.h"
21
+ #include "tensorflow/core/platform/default/logging.h"
22
+ using namespace tensorflow;
23
+ using namespace tensorflow::shape_inference;
24
+ #define NVDR_CTX_ARGS OpKernelContext* _nvdr_ctx
25
+ #define NVDR_CTX_PARAMS _nvdr_ctx
26
+ #define NVDR_CHECK(COND, ERR) OP_REQUIRES(_nvdr_ctx, COND, errors::Internal(ERR))
27
+ #define NVDR_CHECK_CUDA_ERROR(CUDA_CALL) OP_CHECK_CUDA_ERROR(_nvdr_ctx, CUDA_CALL)
28
+ #define NVDR_CHECK_GL_ERROR(GL_CALL) OP_CHECK_GL_ERROR(_nvdr_ctx, GL_CALL)
29
+ #endif
30
+
31
+ //------------------------------------------------------------------------
32
+ // PyTorch.
33
+
34
+ #ifdef NVDR_TORCH
35
+ #ifndef __CUDACC__
36
+ #include <torch/extension.h>
37
+ #include <ATen/cuda/CUDAContext.h>
38
+ #include <ATen/cuda/CUDAUtils.h>
39
+ #include <c10/cuda/CUDAGuard.h>
40
+ #include <pybind11/numpy.h>
41
+ #endif
42
+ #define NVDR_CTX_ARGS int _nvdr_ctx_dummy
43
+ #define NVDR_CTX_PARAMS 0
44
+ #define NVDR_CHECK(COND, ERR) do { TORCH_CHECK(COND, ERR) } while(0)
45
+ #define NVDR_CHECK_CUDA_ERROR(CUDA_CALL) do { cudaError_t err = CUDA_CALL; TORCH_CHECK(!err, "Cuda error: ", cudaGetLastError(), "[", #CUDA_CALL, ";]"); } while(0)
46
+ #define NVDR_CHECK_GL_ERROR(GL_CALL) do { GL_CALL; GLenum err = glGetError(); TORCH_CHECK(err == GL_NO_ERROR, "OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]"); } while(0)
47
+ #endif
48
+
49
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/glutil.cpp ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ //------------------------------------------------------------------------
10
+ // Common.
11
+ //------------------------------------------------------------------------
12
+
13
+ #include "framework.h"
14
+ #include "glutil.h"
15
+ #include <iostream>
16
+ #include <iomanip>
17
+
18
+ // Create the function pointers.
19
+ #define GLUTIL_EXT(return_type, name, ...) return_type (GLAPIENTRY* name)(__VA_ARGS__) = 0;
20
+ #include "glutil_extlist.h"
21
+ #undef GLUTIL_EXT
22
+
23
+ // Track initialization status.
24
+ static volatile bool s_glExtInitialized = false;
25
+
26
+ // Error strings.
27
+ const char* getGLErrorString(GLenum err)
28
+ {
29
+ switch(err)
30
+ {
31
+ case GL_NO_ERROR: return "GL_NO_ERROR";
32
+ case GL_INVALID_ENUM: return "GL_INVALID_ENUM";
33
+ case GL_INVALID_VALUE: return "GL_INVALID_VALUE";
34
+ case GL_INVALID_OPERATION: return "GL_INVALID_OPERATION";
35
+ case GL_STACK_OVERFLOW: return "GL_STACK_OVERFLOW";
36
+ case GL_STACK_UNDERFLOW: return "GL_STACK_UNDERFLOW";
37
+ case GL_OUT_OF_MEMORY: return "GL_OUT_OF_MEMORY";
38
+ case GL_INVALID_FRAMEBUFFER_OPERATION: return "GL_INVALID_FRAMEBUFFER_OPERATION";
39
+ case GL_TABLE_TOO_LARGE: return "GL_TABLE_TOO_LARGE";
40
+ case GL_CONTEXT_LOST: return "GL_CONTEXT_LOST";
41
+ }
42
+ return "Unknown error";
43
+ }
44
+
45
+ //------------------------------------------------------------------------
46
+ // Windows.
47
+ //------------------------------------------------------------------------
48
+
49
+ #ifdef _WIN32
50
+
51
+ static CRITICAL_SECTION getInitializedCriticalSection(void)
52
+ {
53
+ CRITICAL_SECTION cs;
54
+ InitializeCriticalSection(&cs);
55
+ return cs;
56
+ }
57
+
58
+ static CRITICAL_SECTION s_getProcAddressMutex = getInitializedCriticalSection();
59
+
60
+ static void safeGetProcAddress(const char* name, PROC* pfn)
61
+ {
62
+ PROC result = wglGetProcAddress(name);
63
+ if (!result)
64
+ {
65
+ LeaveCriticalSection(&s_getProcAddressMutex); // Prepare for thread exit.
66
+ LOG(FATAL) << "wglGetProcAddress() failed for '" << name << "'";
67
+ exit(1); // Should never get here but make sure we exit.
68
+ }
69
+ *pfn = result;
70
+ }
71
+
72
+ static void initializeGLExtensions(void)
73
+ {
74
+ // Use critical section for thread safety.
75
+ EnterCriticalSection(&s_getProcAddressMutex);
76
+
77
+ // Only dig function pointers if not done already.
78
+ if (!s_glExtInitialized)
79
+ {
80
+ // Generate code to populate the function pointers.
81
+ #define GLUTIL_EXT(return_type, name, ...) safeGetProcAddress(#name, (PROC*)&name);
82
+ #include "glutil_extlist.h"
83
+ #undef GLUTIL_EXT
84
+
85
+ // Mark as initialized.
86
+ s_glExtInitialized = true;
87
+ }
88
+
89
+ // Done.
90
+ LeaveCriticalSection(&s_getProcAddressMutex);
91
+ return;
92
+ }
93
+
94
+ void setGLContext(GLContext& glctx)
95
+ {
96
+ if (!glctx.hglrc)
97
+ LOG(FATAL) << "setGLContext() called with null gltcx";
98
+ if (!wglMakeCurrent(glctx.hdc, glctx.hglrc))
99
+ LOG(FATAL) << "wglMakeCurrent() failed when setting GL context";
100
+
101
+ if (glctx.extInitialized)
102
+ return;
103
+ initializeGLExtensions();
104
+ glctx.extInitialized = 1;
105
+ }
106
+
107
+ void releaseGLContext(void)
108
+ {
109
+ if (!wglMakeCurrent(NULL, NULL))
110
+ LOG(FATAL) << "wglMakeCurrent() failed when releasing GL context";
111
+ }
112
+
113
+ extern "C" int set_gpu(const char*); // In setgpu.lib
114
+ GLContext createGLContext(int cudaDeviceIdx)
115
+ {
116
+ if (cudaDeviceIdx >= 0)
117
+ {
118
+ char pciBusId[256] = "";
119
+ LOG(INFO) << "Creating GL context for Cuda device " << cudaDeviceIdx;
120
+ if (cudaDeviceGetPCIBusId(pciBusId, 255, cudaDeviceIdx))
121
+ {
122
+ LOG(INFO) << "PCI bus id query failed";
123
+ }
124
+ else
125
+ {
126
+ int res = set_gpu(pciBusId);
127
+ LOG(INFO) << "Selecting device with PCI bus id " << pciBusId << " - " << (res ? "failed, expect crash or major slowdown" : "success");
128
+ }
129
+ }
130
+
131
+ HINSTANCE hInstance = GetModuleHandle(NULL);
132
+ WNDCLASS wc = {};
133
+ wc.style = CS_OWNDC;
134
+ wc.lpfnWndProc = DefWindowProc;
135
+ wc.hInstance = hInstance;
136
+ wc.lpszClassName = "__DummyGLClassCPP";
137
+ int res = RegisterClass(&wc);
138
+
139
+ HWND hwnd = CreateWindow(
140
+ "__DummyGLClassCPP", // lpClassName
141
+ "__DummyGLWindowCPP", // lpWindowName
142
+ WS_OVERLAPPEDWINDOW, // dwStyle
143
+ CW_USEDEFAULT, // x
144
+ CW_USEDEFAULT, // y
145
+ 0, 0, // nWidth, nHeight
146
+ NULL, NULL, // hWndParent, hMenu
147
+ hInstance, // hInstance
148
+ NULL // lpParam
149
+ );
150
+
151
+ PIXELFORMATDESCRIPTOR pfd = {};
152
+ pfd.dwFlags = PFD_SUPPORT_OPENGL;
153
+ pfd.iPixelType = PFD_TYPE_RGBA;
154
+ pfd.iLayerType = PFD_MAIN_PLANE;
155
+ pfd.cColorBits = 32;
156
+ pfd.cDepthBits = 24;
157
+ pfd.cStencilBits = 8;
158
+
159
+ HDC hdc = GetDC(hwnd);
160
+ int pixelformat = ChoosePixelFormat(hdc, &pfd);
161
+ SetPixelFormat(hdc, pixelformat, &pfd);
162
+
163
+ HGLRC hglrc = wglCreateContext(hdc);
164
+ LOG(INFO) << std::hex << std::setfill('0')
165
+ << "WGL OpenGL context created (hdc: 0x" << std::setw(8) << (uint32_t)(uintptr_t)hdc
166
+ << ", hglrc: 0x" << std::setw(8) << (uint32_t)(uintptr_t)hglrc << ")";
167
+
168
+ GLContext glctx = {hdc, hglrc, 0};
169
+ return glctx;
170
+ }
171
+
172
+ void destroyGLContext(GLContext& glctx)
173
+ {
174
+ if (!glctx.hglrc)
175
+ LOG(FATAL) << "destroyGLContext() called with null gltcx";
176
+
177
+ // If this is the current context, release it.
178
+ if (wglGetCurrentContext() == glctx.hglrc)
179
+ releaseGLContext();
180
+
181
+ HWND hwnd = WindowFromDC(glctx.hdc);
182
+ if (!hwnd)
183
+ LOG(FATAL) << "WindowFromDC() failed";
184
+ if (!ReleaseDC(hwnd, glctx.hdc))
185
+ LOG(FATAL) << "ReleaseDC() failed";
186
+ if (!wglDeleteContext(glctx.hglrc))
187
+ LOG(FATAL) << "wglDeleteContext() failed";
188
+ if (!DestroyWindow(hwnd))
189
+ LOG(FATAL) << "DestroyWindow() failed";
190
+
191
+ LOG(INFO) << std::hex << std::setfill('0')
192
+ << "WGL OpenGL context destroyed (hdc: 0x" << std::setw(8) << (uint32_t)(uintptr_t)glctx.hdc
193
+ << ", hglrc: 0x" << std::setw(8) << (uint32_t)(uintptr_t)glctx.hglrc << ")";
194
+
195
+ memset(&glctx, 0, sizeof(GLContext));
196
+ }
197
+
198
+ #endif // _WIN32
199
+
200
+ //------------------------------------------------------------------------
201
+ // Linux.
202
+ //------------------------------------------------------------------------
203
+
204
+ #ifdef __linux__
205
+
206
+ static pthread_mutex_t s_getProcAddressMutex;
207
+
208
+ typedef void (*PROCFN)();
209
+
210
+ static void safeGetProcAddress(const char* name, PROCFN* pfn)
211
+ {
212
+ PROCFN result = eglGetProcAddress(name);
213
+ if (!result)
214
+ {
215
+ pthread_mutex_unlock(&s_getProcAddressMutex); // Prepare for thread exit.
216
+ LOG(FATAL) << "wglGetProcAddress() failed for '" << name << "'";
217
+ exit(1); // Should never get here but make sure we exit.
218
+ }
219
+ *pfn = result;
220
+ }
221
+
222
+ static void initializeGLExtensions(void)
223
+ {
224
+ pthread_mutex_lock(&s_getProcAddressMutex);
225
+
226
+ // Only dig function pointers if not done already.
227
+ if (!s_glExtInitialized)
228
+ {
229
+ // Generate code to populate the function pointers.
230
+ #define GLUTIL_EXT(return_type, name, ...) safeGetProcAddress(#name, (PROCFN*)&name);
231
+ #include "glutil_extlist.h"
232
+ #undef GLUTIL_EXT
233
+
234
+ // Mark as initialized.
235
+ s_glExtInitialized = true;
236
+ }
237
+
238
+ pthread_mutex_unlock(&s_getProcAddressMutex);
239
+ return;
240
+ }
241
+
242
+ void setGLContext(GLContext& glctx)
243
+ {
244
+ if (!glctx.context)
245
+ LOG(FATAL) << "setGLContext() called with null gltcx";
246
+
247
+ if (!eglMakeCurrent(glctx.display, EGL_NO_SURFACE, EGL_NO_SURFACE, glctx.context))
248
+ LOG(ERROR) << "eglMakeCurrent() failed when setting GL context";
249
+
250
+ if (glctx.extInitialized)
251
+ return;
252
+ initializeGLExtensions();
253
+ glctx.extInitialized = 1;
254
+ }
255
+
256
+ void releaseGLContext(void)
257
+ {
258
+ EGLDisplay display = eglGetCurrentDisplay();
259
+ if (display == EGL_NO_DISPLAY)
260
+ LOG(WARNING) << "releaseGLContext() called with no active display";
261
+ if (!eglMakeCurrent(display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT))
262
+ LOG(FATAL) << "eglMakeCurrent() failed when releasing GL context";
263
+ }
264
+
265
+ static EGLDisplay getCudaDisplay(int cudaDeviceIdx)
266
+ {
267
+ typedef EGLBoolean (*eglQueryDevicesEXT_t)(EGLint, EGLDeviceEXT, EGLint*);
268
+ typedef EGLBoolean (*eglQueryDeviceAttribEXT_t)(EGLDeviceEXT, EGLint, EGLAttrib*);
269
+ typedef EGLDisplay (*eglGetPlatformDisplayEXT_t)(EGLenum, void*, const EGLint*);
270
+
271
+ eglQueryDevicesEXT_t eglQueryDevicesEXT = (eglQueryDevicesEXT_t)eglGetProcAddress("eglQueryDevicesEXT");
272
+ if (!eglQueryDevicesEXT)
273
+ {
274
+ LOG(INFO) << "eglGetProcAddress(\"eglQueryDevicesEXT\") failed";
275
+ return 0;
276
+ }
277
+
278
+ eglQueryDeviceAttribEXT_t eglQueryDeviceAttribEXT = (eglQueryDeviceAttribEXT_t)eglGetProcAddress("eglQueryDeviceAttribEXT");
279
+ if (!eglQueryDeviceAttribEXT)
280
+ {
281
+ LOG(INFO) << "eglGetProcAddress(\"eglQueryDeviceAttribEXT\") failed";
282
+ return 0;
283
+ }
284
+
285
+ eglGetPlatformDisplayEXT_t eglGetPlatformDisplayEXT = (eglGetPlatformDisplayEXT_t)eglGetProcAddress("eglGetPlatformDisplayEXT");
286
+ if (!eglGetPlatformDisplayEXT)
287
+ {
288
+ LOG(INFO) << "eglGetProcAddress(\"eglGetPlatformDisplayEXT\") failed";
289
+ return 0;
290
+ }
291
+
292
+ int num_devices = 0;
293
+ eglQueryDevicesEXT(0, 0, &num_devices);
294
+ if (!num_devices)
295
+ return 0;
296
+
297
+ EGLDisplay display = 0;
298
+ EGLDeviceEXT* devices = (EGLDeviceEXT*)malloc(num_devices * sizeof(void*));
299
+ eglQueryDevicesEXT(num_devices, devices, &num_devices);
300
+ for (int i=0; i < num_devices; i++)
301
+ {
302
+ EGLDeviceEXT device = devices[i];
303
+ intptr_t value = -1;
304
+ if (eglQueryDeviceAttribEXT(device, EGL_CUDA_DEVICE_NV, &value) && value == cudaDeviceIdx)
305
+ {
306
+ display = eglGetPlatformDisplayEXT(EGL_PLATFORM_DEVICE_EXT, device, 0);
307
+ break;
308
+ }
309
+ }
310
+
311
+ free(devices);
312
+ return display;
313
+ }
314
+
315
+ GLContext createGLContext(int cudaDeviceIdx)
316
+ {
317
+ EGLDisplay display = 0;
318
+
319
+ if (cudaDeviceIdx >= 0)
320
+ {
321
+ char pciBusId[256] = "";
322
+ LOG(INFO) << "Creating GL context for Cuda device " << cudaDeviceIdx;
323
+ display = getCudaDisplay(cudaDeviceIdx);
324
+ if (!display)
325
+ LOG(INFO) << "Failed, falling back to default display";
326
+ }
327
+
328
+ if (!display)
329
+ {
330
+ display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
331
+ if (display == EGL_NO_DISPLAY)
332
+ LOG(FATAL) << "eglGetDisplay() failed";
333
+ }
334
+
335
+ EGLint major;
336
+ EGLint minor;
337
+ if (!eglInitialize(display, &major, &minor))
338
+ LOG(FATAL) << "eglInitialize() failed";
339
+
340
+ // Choose configuration.
341
+
342
+ const EGLint context_attribs[] = {
343
+ EGL_RED_SIZE, 8,
344
+ EGL_GREEN_SIZE, 8,
345
+ EGL_BLUE_SIZE, 8,
346
+ EGL_ALPHA_SIZE, 8,
347
+ EGL_DEPTH_SIZE, 24,
348
+ EGL_STENCIL_SIZE, 8,
349
+ EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,
350
+ EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
351
+ EGL_NONE
352
+ };
353
+
354
+ EGLConfig config;
355
+ EGLint num_config;
356
+ if (!eglChooseConfig(display, context_attribs, &config, 1, &num_config))
357
+ LOG(FATAL) << "eglChooseConfig() failed";
358
+
359
+ // Create GL context.
360
+
361
+ if (!eglBindAPI(EGL_OPENGL_API))
362
+ LOG(FATAL) << "eglBindAPI() failed";
363
+
364
+ EGLContext context = eglCreateContext(display, config, EGL_NO_CONTEXT, NULL);
365
+ if (context == EGL_NO_CONTEXT)
366
+ LOG(FATAL) << "eglCreateContext() failed";
367
+
368
+ // Done.
369
+
370
+ LOG(INFO) << "EGL " << (int)minor << "." << (int)major << " OpenGL context created (disp: 0x"
371
+ << std::hex << std::setfill('0')
372
+ << std::setw(16) << (uintptr_t)display
373
+ << ", ctx: 0x" << std::setw(16) << (uintptr_t)context << ")";
374
+
375
+ GLContext glctx = {display, context, 0};
376
+ return glctx;
377
+ }
378
+
379
+ void destroyGLContext(GLContext& glctx)
380
+ {
381
+ if (!glctx.context)
382
+ LOG(FATAL) << "destroyGLContext() called with null gltcx";
383
+
384
+ // If this is the current context, release it.
385
+ if (eglGetCurrentContext() == glctx.context)
386
+ releaseGLContext();
387
+
388
+ if (!eglDestroyContext(glctx.display, glctx.context))
389
+ LOG(ERROR) << "eglDestroyContext() failed";
390
+
391
+ LOG(INFO) << "EGL OpenGL context destroyed (disp: 0x"
392
+ << std::hex << std::setfill('0')
393
+ << std::setw(16) << (uintptr_t)glctx.display
394
+ << ", ctx: 0x" << std::setw(16) << (uintptr_t)glctx.context << ")";
395
+
396
+ memset(&glctx, 0, sizeof(GLContext));
397
+ }
398
+
399
+ //------------------------------------------------------------------------
400
+
401
+ #endif // __linux__
402
+
403
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/glutil.h ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+
11
+ //------------------------------------------------------------------------
12
+ // Windows-specific headers and types.
13
+ //------------------------------------------------------------------------
14
+
15
+ #ifdef _WIN32
16
+ #define NOMINMAX
17
+ #include <windows.h> // Required by gl.h in Windows.
18
+ #define GLAPIENTRY APIENTRY
19
+
20
+ struct GLContext
21
+ {
22
+ HDC hdc;
23
+ HGLRC hglrc;
24
+ int extInitialized;
25
+ };
26
+
27
+ #endif // _WIN32
28
+
29
+ //------------------------------------------------------------------------
30
+ // Linux-specific headers and types.
31
+ //------------------------------------------------------------------------
32
+
33
+ #ifdef __linux__
34
+ #define EGL_NO_X11 // X11/Xlib.h has "#define Status int" which breaks Tensorflow. Avoid it.
35
+ #define MESA_EGL_NO_X11_HEADERS
36
+ #include <EGL/egl.h>
37
+ #include <EGL/eglext.h>
38
+ #define GLAPIENTRY
39
+
40
+ struct GLContext
41
+ {
42
+ EGLDisplay display;
43
+ EGLContext context;
44
+ int extInitialized;
45
+ };
46
+
47
+ #endif // __linux__
48
+
49
+ //------------------------------------------------------------------------
50
+ // OpenGL, CUDA interop, GL extensions.
51
+ //------------------------------------------------------------------------
52
+ #define GL_GLEXT_LEGACY
53
+ #include <GL/gl.h>
54
+ #include <cuda_gl_interop.h>
55
+
56
+ // Constants.
57
+ #ifndef GL_VERSION_1_2
58
+ #define GL_CLAMP_TO_EDGE 0x812F
59
+ #define GL_TEXTURE_3D 0x806F
60
+ #endif
61
+ #ifndef GL_VERSION_1_5
62
+ #define GL_ARRAY_BUFFER 0x8892
63
+ #define GL_DYNAMIC_DRAW 0x88E8
64
+ #define GL_ELEMENT_ARRAY_BUFFER 0x8893
65
+ #endif
66
+ #ifndef GL_VERSION_2_0
67
+ #define GL_FRAGMENT_SHADER 0x8B30
68
+ #define GL_INFO_LOG_LENGTH 0x8B84
69
+ #define GL_LINK_STATUS 0x8B82
70
+ #define GL_VERTEX_SHADER 0x8B31
71
+ #endif
72
+ #ifndef GL_VERSION_3_0
73
+ #define GL_MAJOR_VERSION 0x821B
74
+ #define GL_MINOR_VERSION 0x821C
75
+ #define GL_RGBA32F 0x8814
76
+ #define GL_TEXTURE_2D_ARRAY 0x8C1A
77
+ #endif
78
+ #ifndef GL_VERSION_3_2
79
+ #define GL_GEOMETRY_SHADER 0x8DD9
80
+ #endif
81
+ #ifndef GL_ARB_framebuffer_object
82
+ #define GL_COLOR_ATTACHMENT0 0x8CE0
83
+ #define GL_COLOR_ATTACHMENT1 0x8CE1
84
+ #define GL_DEPTH_STENCIL 0x84F9
85
+ #define GL_DEPTH_STENCIL_ATTACHMENT 0x821A
86
+ #define GL_DEPTH24_STENCIL8 0x88F0
87
+ #define GL_FRAMEBUFFER 0x8D40
88
+ #define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506
89
+ #define GL_UNSIGNED_INT_24_8 0x84FA
90
+ #endif
91
+ #ifndef GL_ARB_imaging
92
+ #define GL_TABLE_TOO_LARGE 0x8031
93
+ #endif
94
+ #ifndef GL_KHR_robustness
95
+ #define GL_CONTEXT_LOST 0x0507
96
+ #endif
97
+
98
+ // Declare function pointers to OpenGL extension functions.
99
+ #define GLUTIL_EXT(return_type, name, ...) extern return_type (GLAPIENTRY* name)(__VA_ARGS__);
100
+ #include "glutil_extlist.h"
101
+ #undef GLUTIL_EXT
102
+
103
+ //------------------------------------------------------------------------
104
+ // Common functions.
105
+ //------------------------------------------------------------------------
106
+
107
+ void setGLContext (GLContext& glctx);
108
+ void releaseGLContext (void);
109
+ GLContext createGLContext (int cudaDeviceIdx);
110
+ void destroyGLContext (GLContext& glctx);
111
+ const char* getGLErrorString (GLenum err);
112
+
113
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/glutil_extlist.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #ifndef GL_VERSION_1_2
10
+ GLUTIL_EXT(void, glTexImage3D, GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
11
+ #endif
12
+ #ifndef GL_VERSION_1_5
13
+ GLUTIL_EXT(void, glBindBuffer, GLenum target, GLuint buffer);
14
+ GLUTIL_EXT(void, glBufferData, GLenum target, ptrdiff_t size, const void* data, GLenum usage);
15
+ GLUTIL_EXT(void, glGenBuffers, GLsizei n, GLuint* buffers);
16
+ #endif
17
+ #ifndef GL_VERSION_2_0
18
+ GLUTIL_EXT(void, glAttachShader, GLuint program, GLuint shader);
19
+ GLUTIL_EXT(void, glCompileShader, GLuint shader);
20
+ GLUTIL_EXT(GLuint, glCreateProgram, void);
21
+ GLUTIL_EXT(GLuint, glCreateShader, GLenum type);
22
+ GLUTIL_EXT(void, glDrawBuffers, GLsizei n, const GLenum* bufs);
23
+ GLUTIL_EXT(void, glEnableVertexAttribArray, GLuint index);
24
+ GLUTIL_EXT(void, glGetProgramInfoLog, GLuint program, GLsizei bufSize, GLsizei* length, char* infoLog);
25
+ GLUTIL_EXT(void, glGetProgramiv, GLuint program, GLenum pname, GLint* param);
26
+ GLUTIL_EXT(void, glLinkProgram, GLuint program);
27
+ GLUTIL_EXT(void, glShaderSource, GLuint shader, GLsizei count, const char *const* string, const GLint* length);
28
+ GLUTIL_EXT(void, glUniform1f, GLint location, GLfloat v0);
29
+ GLUTIL_EXT(void, glUniform2f, GLint location, GLfloat v0, GLfloat v1);
30
+ GLUTIL_EXT(void, glUseProgram, GLuint program);
31
+ GLUTIL_EXT(void, glVertexAttribPointer, GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void* pointer);
32
+ #endif
33
+ #ifndef GL_VERSION_3_2
34
+ GLUTIL_EXT(void, glFramebufferTexture, GLenum target, GLenum attachment, GLuint texture, GLint level);
35
+ #endif
36
+ #ifndef GL_ARB_framebuffer_object
37
+ GLUTIL_EXT(void, glBindFramebuffer, GLenum target, GLuint framebuffer);
38
+ GLUTIL_EXT(void, glGenFramebuffers, GLsizei n, GLuint* framebuffers);
39
+ #endif
40
+ #ifndef GL_ARB_vertex_array_object
41
+ GLUTIL_EXT(void, glBindVertexArray, GLuint array);
42
+ GLUTIL_EXT(void, glGenVertexArrays, GLsizei n, GLuint* arrays);
43
+ #endif
44
+ #ifndef GL_ARB_multi_draw_indirect
45
+ GLUTIL_EXT(void, glMultiDrawElementsIndirect, GLenum mode, GLenum type, const void *indirect, GLsizei primcount, GLsizei stride);
46
+ #endif
47
+
48
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/interpolate.cu ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "common.h"
10
+ #include "interpolate.h"
11
+
12
+ //------------------------------------------------------------------------
13
+ // Forward kernel.
14
+
15
+ template <bool ENABLE_DA>
16
+ static __forceinline__ __device__ void InterpolateFwdKernelTemplate(const InterpolateKernelParams p)
17
+ {
18
+ // Calculate pixel position.
19
+ int px = blockIdx.x * blockDim.x + threadIdx.x;
20
+ int py = blockIdx.y * blockDim.y + threadIdx.y;
21
+ int pz = blockIdx.z;
22
+ if (px >= p.width || py >= p.height || pz >= p.depth)
23
+ return;
24
+
25
+ // Pixel index.
26
+ int pidx = px + p.width * (py + p.height * pz);
27
+
28
+ // Output ptrs.
29
+ float* out = p.out + pidx * p.numAttr;
30
+ float2* outDA = ENABLE_DA ? (((float2*)p.outDA) + pidx * p.numDiffAttr) : 0;
31
+
32
+ // Fetch rasterizer output.
33
+ float4 r = ((float4*)p.rast)[pidx];
34
+ int triIdx = float_to_triidx(r.w) - 1;
35
+ bool triValid = (triIdx >= 0 && triIdx < p.numTriangles);
36
+
37
+ // If no geometry in entire warp, zero the output and exit.
38
+ // Otherwise force barys to zero and output with live threads.
39
+ if (__all_sync(0xffffffffu, !triValid))
40
+ {
41
+ for (int i=0; i < p.numAttr; i++)
42
+ out[i] = 0.f;
43
+ if (ENABLE_DA)
44
+ for (int i=0; i < p.numDiffAttr; i++)
45
+ outDA[i] = make_float2(0.f, 0.f);
46
+ return;
47
+ }
48
+
49
+ // Fetch vertex indices.
50
+ int vi0 = triValid ? p.tri[triIdx * 3 + 0] : 0;
51
+ int vi1 = triValid ? p.tri[triIdx * 3 + 1] : 0;
52
+ int vi2 = triValid ? p.tri[triIdx * 3 + 2] : 0;
53
+
54
+ // Bail out if corrupt indices.
55
+ if (vi0 < 0 || vi0 >= p.numVertices ||
56
+ vi1 < 0 || vi1 >= p.numVertices ||
57
+ vi2 < 0 || vi2 >= p.numVertices)
58
+ return;
59
+
60
+ // In instance mode, adjust vertex indices by minibatch index unless broadcasting.
61
+ if (p.instance_mode && !p.attrBC)
62
+ {
63
+ vi0 += pz * p.numVertices;
64
+ vi1 += pz * p.numVertices;
65
+ vi2 += pz * p.numVertices;
66
+ }
67
+
68
+ // Pointers to attributes.
69
+ const float* a0 = p.attr + vi0 * p.numAttr;
70
+ const float* a1 = p.attr + vi1 * p.numAttr;
71
+ const float* a2 = p.attr + vi2 * p.numAttr;
72
+
73
+ // Barys. If no triangle, force all to zero -> output is zero.
74
+ float b0 = triValid ? r.x : 0.f;
75
+ float b1 = triValid ? r.y : 0.f;
76
+ float b2 = triValid ? (1.f - r.x - r.y) : 0.f;
77
+
78
+ // Interpolate and write attributes.
79
+ for (int i=0; i < p.numAttr; i++)
80
+ out[i] = b0*a0[i] + b1*a1[i] + b2*a2[i];
81
+
82
+ // No diff attrs? Exit.
83
+ if (!ENABLE_DA)
84
+ return;
85
+
86
+ // Read bary pixel differentials if we have a triangle.
87
+ float4 db = make_float4(0.f, 0.f, 0.f, 0.f);
88
+ if (triValid)
89
+ db = ((float4*)p.rastDB)[pidx];
90
+
91
+ // Unpack a bit.
92
+ float dudx = db.x;
93
+ float dudy = db.y;
94
+ float dvdx = db.z;
95
+ float dvdy = db.w;
96
+
97
+ // Calculate the pixel differentials of chosen attributes.
98
+ for (int i=0; i < p.numDiffAttr; i++)
99
+ {
100
+ // Input attribute index.
101
+ int j = p.diff_attrs_all ? i : p.diffAttrs[i];
102
+ if (j < 0)
103
+ j += p.numAttr; // Python-style negative indices.
104
+
105
+ // Zero output if invalid index.
106
+ float dsdx = 0.f;
107
+ float dsdy = 0.f;
108
+ if (j >= 0 && j < p.numAttr)
109
+ {
110
+ float s0 = a0[j];
111
+ float s1 = a1[j];
112
+ float s2 = a2[j];
113
+ float dsdu = s0 - s2;
114
+ float dsdv = s1 - s2;
115
+ dsdx = dudx*dsdu + dvdx*dsdv;
116
+ dsdy = dudy*dsdu + dvdy*dsdv;
117
+ }
118
+
119
+ // Write.
120
+ outDA[i] = make_float2(dsdx, dsdy);
121
+ }
122
+ }
123
+
124
+ // Template specializations.
125
+ __global__ void InterpolateFwdKernel (const InterpolateKernelParams p) { InterpolateFwdKernelTemplate<false>(p); }
126
+ __global__ void InterpolateFwdKernelDa(const InterpolateKernelParams p) { InterpolateFwdKernelTemplate<true>(p); }
127
+
128
+ //------------------------------------------------------------------------
129
+ // Gradient kernel.
130
+
131
+ template <bool ENABLE_DA>
132
+ static __forceinline__ __device__ void InterpolateGradKernelTemplate(const InterpolateKernelParams p)
133
+ {
134
+ // Temporary space for coalesced atomics.
135
+ CA_DECLARE_TEMP(IP_GRAD_MAX_KERNEL_BLOCK_WIDTH * IP_GRAD_MAX_KERNEL_BLOCK_HEIGHT);
136
+
137
+ // Calculate pixel position.
138
+ int px = blockIdx.x * blockDim.x + threadIdx.x;
139
+ int py = blockIdx.y * blockDim.y + threadIdx.y;
140
+ int pz = blockIdx.z;
141
+ if (px >= p.width || py >= p.height || pz >= p.depth)
142
+ return;
143
+
144
+ // Pixel index.
145
+ int pidx = px + p.width * (py + p.height * pz);
146
+
147
+ // Fetch triangle ID. If none, output zero bary/db gradients and exit.
148
+ float4 r = ((float4*)p.rast)[pidx];
149
+ int triIdx = float_to_triidx(r.w) - 1;
150
+ if (triIdx < 0 || triIdx >= p.numTriangles)
151
+ {
152
+ ((float4*)p.gradRaster)[pidx] = make_float4(0.f, 0.f, 0.f, 0.f);
153
+ if (ENABLE_DA)
154
+ ((float4*)p.gradRasterDB)[pidx] = make_float4(0.f, 0.f, 0.f, 0.f);
155
+ return;
156
+ }
157
+
158
+ // Fetch vertex indices.
159
+ int vi0 = p.tri[triIdx * 3 + 0];
160
+ int vi1 = p.tri[triIdx * 3 + 1];
161
+ int vi2 = p.tri[triIdx * 3 + 2];
162
+
163
+ // Bail out if corrupt indices.
164
+ if (vi0 < 0 || vi0 >= p.numVertices ||
165
+ vi1 < 0 || vi1 >= p.numVertices ||
166
+ vi2 < 0 || vi2 >= p.numVertices)
167
+ return;
168
+
169
+ // In instance mode, adjust vertex indices by minibatch index unless broadcasting.
170
+ if (p.instance_mode && !p.attrBC)
171
+ {
172
+ vi0 += pz * p.numVertices;
173
+ vi1 += pz * p.numVertices;
174
+ vi2 += pz * p.numVertices;
175
+ }
176
+
177
+ // Initialize coalesced atomics.
178
+ CA_SET_GROUP(triIdx);
179
+
180
+ // Pointers to inputs.
181
+ const float* a0 = p.attr + vi0 * p.numAttr;
182
+ const float* a1 = p.attr + vi1 * p.numAttr;
183
+ const float* a2 = p.attr + vi2 * p.numAttr;
184
+ const float* pdy = p.dy + pidx * p.numAttr;
185
+
186
+ // Pointers to outputs.
187
+ float* ga0 = p.gradAttr + vi0 * p.numAttr;
188
+ float* ga1 = p.gradAttr + vi1 * p.numAttr;
189
+ float* ga2 = p.gradAttr + vi2 * p.numAttr;
190
+
191
+ // Barys and bary gradient accumulators.
192
+ float b0 = r.x;
193
+ float b1 = r.y;
194
+ float b2 = 1.f - r.x - r.y;
195
+ float gb0 = 0.f;
196
+ float gb1 = 0.f;
197
+
198
+ // Loop over attributes and accumulate attribute gradients.
199
+ for (int i=0; i < p.numAttr; i++)
200
+ {
201
+ float y = pdy[i];
202
+ float s0 = a0[i];
203
+ float s1 = a1[i];
204
+ float s2 = a2[i];
205
+ gb0 += y * (s0 - s2);
206
+ gb1 += y * (s1 - s2);
207
+ caAtomicAdd(ga0 + i, b0 * y);
208
+ caAtomicAdd(ga1 + i, b1 * y);
209
+ caAtomicAdd(ga2 + i, b2 * y);
210
+ }
211
+
212
+ // Write the bary gradients.
213
+ ((float4*)p.gradRaster)[pidx] = make_float4(gb0, gb1, 0.f, 0.f);
214
+
215
+ // If pixel differentials disabled, we're done.
216
+ if (!ENABLE_DA)
217
+ return;
218
+
219
+ // Calculate gradients based on attribute pixel differentials.
220
+ const float2* dda = ((float2*)p.dda) + pidx * p.numDiffAttr;
221
+ float gdudx = 0.f;
222
+ float gdudy = 0.f;
223
+ float gdvdx = 0.f;
224
+ float gdvdy = 0.f;
225
+
226
+ // Read bary pixel differentials.
227
+ float4 db = ((float4*)p.rastDB)[pidx];
228
+ float dudx = db.x;
229
+ float dudy = db.y;
230
+ float dvdx = db.z;
231
+ float dvdy = db.w;
232
+
233
+ for (int i=0; i < p.numDiffAttr; i++)
234
+ {
235
+ // Input attribute index.
236
+ int j = p.diff_attrs_all ? i : p.diffAttrs[i];
237
+ if (j < 0)
238
+ j += p.numAttr; // Python-style negative indices.
239
+
240
+ // Check that index is valid.
241
+ if (j >= 0 && j < p.numAttr)
242
+ {
243
+ float2 dsdxy = dda[i];
244
+ float dsdx = dsdxy.x;
245
+ float dsdy = dsdxy.y;
246
+
247
+ float s0 = a0[j];
248
+ float s1 = a1[j];
249
+ float s2 = a2[j];
250
+
251
+ // Gradients of db.
252
+ float dsdu = s0 - s2;
253
+ float dsdv = s1 - s2;
254
+ gdudx += dsdu * dsdx;
255
+ gdudy += dsdu * dsdy;
256
+ gdvdx += dsdv * dsdx;
257
+ gdvdy += dsdv * dsdy;
258
+
259
+ // Gradients of attributes.
260
+ float du = dsdx*dudx + dsdy*dudy;
261
+ float dv = dsdx*dvdx + dsdy*dvdy;
262
+ caAtomicAdd(ga0 + j, du);
263
+ caAtomicAdd(ga1 + j, dv);
264
+ caAtomicAdd(ga2 + j, -du - dv);
265
+ }
266
+ }
267
+
268
+ // Write.
269
+ ((float4*)p.gradRasterDB)[pidx] = make_float4(gdudx, gdudy, gdvdx, gdvdy);
270
+ }
271
+
272
+ // Template specializations.
273
+ __global__ void InterpolateGradKernel (const InterpolateKernelParams p) { InterpolateGradKernelTemplate<false>(p); }
274
+ __global__ void InterpolateGradKernelDa(const InterpolateKernelParams p) { InterpolateGradKernelTemplate<true>(p); }
275
+
276
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/interpolate.h ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+
11
+ //------------------------------------------------------------------------
12
+ // Constants and helpers.
13
+
14
+ #define IP_FWD_MAX_KERNEL_BLOCK_WIDTH 8
15
+ #define IP_FWD_MAX_KERNEL_BLOCK_HEIGHT 8
16
+ #define IP_GRAD_MAX_KERNEL_BLOCK_WIDTH 8
17
+ #define IP_GRAD_MAX_KERNEL_BLOCK_HEIGHT 8
18
+ #define IP_MAX_DIFF_ATTRS 32
19
+
20
+ //------------------------------------------------------------------------
21
+ // CUDA kernel params.
22
+
23
+ struct InterpolateKernelParams
24
+ {
25
+ const int* tri; // Incoming triangle buffer.
26
+ const float* attr; // Incoming attribute buffer.
27
+ const float* rast; // Incoming rasterizer output buffer.
28
+ const float* rastDB; // Incoming rasterizer output buffer for bary derivatives.
29
+ const float* dy; // Incoming attribute gradients.
30
+ const float* dda; // Incoming attr diff gradients.
31
+ float* out; // Outgoing interpolated attributes.
32
+ float* outDA; // Outgoing texcoord major axis lengths.
33
+ float* gradAttr; // Outgoing attribute gradients.
34
+ float* gradRaster; // Outgoing rasterizer gradients.
35
+ float* gradRasterDB; // Outgoing rasterizer bary diff gradients.
36
+ int numTriangles; // Number of triangles.
37
+ int numVertices; // Number of vertices.
38
+ int numAttr; // Number of total vertex attributes.
39
+ int numDiffAttr; // Number of attributes to differentiate.
40
+ int width; // Image width.
41
+ int height; // Image height.
42
+ int depth; // Minibatch size.
43
+ int attrBC; // 0=normal, 1=attr is broadcast.
44
+ int instance_mode; // 0=normal, 1=instance mode.
45
+ int diff_attrs_all; // 0=normal, 1=produce pixel differentials for all attributes.
46
+ int diffAttrs[IP_MAX_DIFF_ATTRS]; // List of attributes to differentiate.
47
+ };
48
+
49
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/rasterize.cu ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "common.h"
10
+ #include "rasterize.h"
11
+
12
+ //------------------------------------------------------------------------
13
+ // Cuda forward rasterizer pixel shader kernel.
14
+
15
+ __global__ void RasterizeCudaFwdShaderKernel(const RasterizeCudaFwdShaderParams p)
16
+ {
17
+ // Calculate pixel position.
18
+ int px = blockIdx.x * blockDim.x + threadIdx.x;
19
+ int py = blockIdx.y * blockDim.y + threadIdx.y;
20
+ int pz = blockIdx.z;
21
+ if (px >= p.width_out || py >= p.height_out || pz >= p.depth)
22
+ return;
23
+
24
+ // Pixel indices.
25
+ int pidx_in = px + p.width_in * (py + p.height_in * pz);
26
+ int pidx_out = px + p.width_out * (py + p.height_out * pz);
27
+
28
+ // Fetch triangle idx.
29
+ int triIdx = p.in_idx[pidx_in] - 1;
30
+ if (triIdx < 0 || triIdx >= p.numTriangles)
31
+ {
32
+ // No or corrupt triangle.
33
+ ((float4*)p.out)[pidx_out] = make_float4(0.0, 0.0, 0.0, 0.0); // Clear out.
34
+ ((float4*)p.out_db)[pidx_out] = make_float4(0.0, 0.0, 0.0, 0.0); // Clear out_db.
35
+ return;
36
+ }
37
+
38
+ // Fetch vertex indices.
39
+ int vi0 = p.tri[triIdx * 3 + 0];
40
+ int vi1 = p.tri[triIdx * 3 + 1];
41
+ int vi2 = p.tri[triIdx * 3 + 2];
42
+
43
+ // Bail out if vertex indices are corrupt.
44
+ if (vi0 < 0 || vi0 >= p.numVertices ||
45
+ vi1 < 0 || vi1 >= p.numVertices ||
46
+ vi2 < 0 || vi2 >= p.numVertices)
47
+ return;
48
+
49
+ // In instance mode, adjust vertex indices by minibatch index.
50
+ if (p.instance_mode)
51
+ {
52
+ vi0 += pz * p.numVertices;
53
+ vi1 += pz * p.numVertices;
54
+ vi2 += pz * p.numVertices;
55
+ }
56
+
57
+ // Fetch vertex positions.
58
+ float4 p0 = ((float4*)p.pos)[vi0];
59
+ float4 p1 = ((float4*)p.pos)[vi1];
60
+ float4 p2 = ((float4*)p.pos)[vi2];
61
+
62
+ // Evaluate edge functions.
63
+ float fx = p.xs * (float)px + p.xo;
64
+ float fy = p.ys * (float)py + p.yo;
65
+ float p0x = p0.x - fx * p0.w;
66
+ float p0y = p0.y - fy * p0.w;
67
+ float p1x = p1.x - fx * p1.w;
68
+ float p1y = p1.y - fy * p1.w;
69
+ float p2x = p2.x - fx * p2.w;
70
+ float p2y = p2.y - fy * p2.w;
71
+ float a0 = p1x*p2y - p1y*p2x;
72
+ float a1 = p2x*p0y - p2y*p0x;
73
+ float a2 = p0x*p1y - p0y*p1x;
74
+
75
+ // Perspective correct, normalized barycentrics.
76
+ float iw = 1.f / (a0 + a1 + a2);
77
+ float b0 = a0 * iw;
78
+ float b1 = a1 * iw;
79
+
80
+ // Compute z/w for depth buffer.
81
+ float z = p0.z * a0 + p1.z * a1 + p2.z * a2;
82
+ float w = p0.w * a0 + p1.w * a1 + p2.w * a2;
83
+ float zw = z / w;
84
+
85
+ // Clamps to avoid NaNs.
86
+ b0 = __saturatef(b0); // Clamp to [+0.0, 1.0].
87
+ b1 = __saturatef(b1); // Clamp to [+0.0, 1.0].
88
+ zw = fmaxf(fminf(zw, 1.f), -1.f);
89
+
90
+ // Emit output.
91
+ ((float4*)p.out)[pidx_out] = make_float4(b0, b1, zw, triidx_to_float(triIdx + 1));
92
+
93
+ // Calculate bary pixel differentials.
94
+ float dfxdx = p.xs * iw;
95
+ float dfydy = p.ys * iw;
96
+ float da0dx = p2.y*p1.w - p1.y*p2.w;
97
+ float da0dy = p1.x*p2.w - p2.x*p1.w;
98
+ float da1dx = p0.y*p2.w - p2.y*p0.w;
99
+ float da1dy = p2.x*p0.w - p0.x*p2.w;
100
+ float da2dx = p1.y*p0.w - p0.y*p1.w;
101
+ float da2dy = p0.x*p1.w - p1.x*p0.w;
102
+ float datdx = da0dx + da1dx + da2dx;
103
+ float datdy = da0dy + da1dy + da2dy;
104
+ float dudx = dfxdx * (b0 * datdx - da0dx);
105
+ float dudy = dfydy * (b0 * datdy - da0dy);
106
+ float dvdx = dfxdx * (b1 * datdx - da1dx);
107
+ float dvdy = dfydy * (b1 * datdy - da1dy);
108
+
109
+ // Emit bary pixel differentials.
110
+ ((float4*)p.out_db)[pidx_out] = make_float4(dudx, dudy, dvdx, dvdy);
111
+ }
112
+
113
+ //------------------------------------------------------------------------
114
+ // Gradient Cuda kernel.
115
+
116
+ template <bool ENABLE_DB>
117
+ static __forceinline__ __device__ void RasterizeGradKernelTemplate(const RasterizeGradParams p)
118
+ {
119
+ // Temporary space for coalesced atomics.
120
+ CA_DECLARE_TEMP(RAST_GRAD_MAX_KERNEL_BLOCK_WIDTH * RAST_GRAD_MAX_KERNEL_BLOCK_HEIGHT);
121
+
122
+ // Calculate pixel position.
123
+ int px = blockIdx.x * blockDim.x + threadIdx.x;
124
+ int py = blockIdx.y * blockDim.y + threadIdx.y;
125
+ int pz = blockIdx.z;
126
+ if (px >= p.width || py >= p.height || pz >= p.depth)
127
+ return;
128
+
129
+ // Pixel index.
130
+ int pidx = px + p.width * (py + p.height * pz);
131
+
132
+ // Read triangle idx and dy.
133
+ float2 dy = ((float2*)p.dy)[pidx * 2];
134
+ float4 ddb = ENABLE_DB ? ((float4*)p.ddb)[pidx] : make_float4(0.f, 0.f, 0.f, 0.f);
135
+ int triIdx = float_to_triidx(((float*)p.out)[pidx * 4 + 3]) - 1;
136
+
137
+ // Exit if nothing to do.
138
+ if (triIdx < 0 || triIdx >= p.numTriangles)
139
+ return; // No or corrupt triangle.
140
+ int grad_all_dy = __float_as_int(dy.x) | __float_as_int(dy.y); // Bitwise OR of all incoming gradients.
141
+ int grad_all_ddb = 0;
142
+ if (ENABLE_DB)
143
+ grad_all_ddb = __float_as_int(ddb.x) | __float_as_int(ddb.y) | __float_as_int(ddb.z) | __float_as_int(ddb.w);
144
+ if (((grad_all_dy | grad_all_ddb) << 1) == 0)
145
+ return; // All incoming gradients are +0/-0.
146
+
147
+ // Fetch vertex indices.
148
+ int vi0 = p.tri[triIdx * 3 + 0];
149
+ int vi1 = p.tri[triIdx * 3 + 1];
150
+ int vi2 = p.tri[triIdx * 3 + 2];
151
+
152
+ // Bail out if vertex indices are corrupt.
153
+ if (vi0 < 0 || vi0 >= p.numVertices ||
154
+ vi1 < 0 || vi1 >= p.numVertices ||
155
+ vi2 < 0 || vi2 >= p.numVertices)
156
+ return;
157
+
158
+ // In instance mode, adjust vertex indices by minibatch index.
159
+ if (p.instance_mode)
160
+ {
161
+ vi0 += pz * p.numVertices;
162
+ vi1 += pz * p.numVertices;
163
+ vi2 += pz * p.numVertices;
164
+ }
165
+
166
+ // Initialize coalesced atomics.
167
+ CA_SET_GROUP(triIdx);
168
+
169
+ // Fetch vertex positions.
170
+ float4 p0 = ((float4*)p.pos)[vi0];
171
+ float4 p1 = ((float4*)p.pos)[vi1];
172
+ float4 p2 = ((float4*)p.pos)[vi2];
173
+
174
+ // Evaluate edge functions.
175
+ float fx = p.xs * (float)px + p.xo;
176
+ float fy = p.ys * (float)py + p.yo;
177
+ float p0x = p0.x - fx * p0.w;
178
+ float p0y = p0.y - fy * p0.w;
179
+ float p1x = p1.x - fx * p1.w;
180
+ float p1y = p1.y - fy * p1.w;
181
+ float p2x = p2.x - fx * p2.w;
182
+ float p2y = p2.y - fy * p2.w;
183
+ float a0 = p1x*p2y - p1y*p2x;
184
+ float a1 = p2x*p0y - p2y*p0x;
185
+ float a2 = p0x*p1y - p0y*p1x;
186
+
187
+ // Compute inverse area with epsilon.
188
+ float at = a0 + a1 + a2;
189
+ float ep = copysignf(1e-6f, at); // ~1 pixel in 1k x 1k image.
190
+ float iw = 1.f / (at + ep);
191
+
192
+ // Perspective correct, normalized barycentrics.
193
+ float b0 = a0 * iw;
194
+ float b1 = a1 * iw;
195
+
196
+ // Position gradients.
197
+ float gb0 = dy.x * iw;
198
+ float gb1 = dy.y * iw;
199
+ float gbb = gb0 * b0 + gb1 * b1;
200
+ float gp0x = gbb * (p2y - p1y) - gb1 * p2y;
201
+ float gp1x = gbb * (p0y - p2y) + gb0 * p2y;
202
+ float gp2x = gbb * (p1y - p0y) - gb0 * p1y + gb1 * p0y;
203
+ float gp0y = gbb * (p1x - p2x) + gb1 * p2x;
204
+ float gp1y = gbb * (p2x - p0x) - gb0 * p2x;
205
+ float gp2y = gbb * (p0x - p1x) + gb0 * p1x - gb1 * p0x;
206
+ float gp0w = -fx * gp0x - fy * gp0y;
207
+ float gp1w = -fx * gp1x - fy * gp1y;
208
+ float gp2w = -fx * gp2x - fy * gp2y;
209
+
210
+ // Bary differential gradients.
211
+ if (ENABLE_DB && ((grad_all_ddb) << 1) != 0)
212
+ {
213
+ float dfxdX = p.xs * iw;
214
+ float dfydY = p.ys * iw;
215
+ ddb.x *= dfxdX;
216
+ ddb.y *= dfydY;
217
+ ddb.z *= dfxdX;
218
+ ddb.w *= dfydY;
219
+
220
+ float da0dX = p1.y * p2.w - p2.y * p1.w;
221
+ float da1dX = p2.y * p0.w - p0.y * p2.w;
222
+ float da2dX = p0.y * p1.w - p1.y * p0.w;
223
+ float da0dY = p2.x * p1.w - p1.x * p2.w;
224
+ float da1dY = p0.x * p2.w - p2.x * p0.w;
225
+ float da2dY = p1.x * p0.w - p0.x * p1.w;
226
+ float datdX = da0dX + da1dX + da2dX;
227
+ float datdY = da0dY + da1dY + da2dY;
228
+
229
+ float x01 = p0.x - p1.x;
230
+ float x12 = p1.x - p2.x;
231
+ float x20 = p2.x - p0.x;
232
+ float y01 = p0.y - p1.y;
233
+ float y12 = p1.y - p2.y;
234
+ float y20 = p2.y - p0.y;
235
+ float w01 = p0.w - p1.w;
236
+ float w12 = p1.w - p2.w;
237
+ float w20 = p2.w - p0.w;
238
+
239
+ float a0p1 = fy * p2.x - fx * p2.y;
240
+ float a0p2 = fx * p1.y - fy * p1.x;
241
+ float a1p0 = fx * p2.y - fy * p2.x;
242
+ float a1p2 = fy * p0.x - fx * p0.y;
243
+
244
+ float wdudX = 2.f * b0 * datdX - da0dX;
245
+ float wdudY = 2.f * b0 * datdY - da0dY;
246
+ float wdvdX = 2.f * b1 * datdX - da1dX;
247
+ float wdvdY = 2.f * b1 * datdY - da1dY;
248
+
249
+ float c0 = iw * (ddb.x * wdudX + ddb.y * wdudY + ddb.z * wdvdX + ddb.w * wdvdY);
250
+ float cx = c0 * fx - ddb.x * b0 - ddb.z * b1;
251
+ float cy = c0 * fy - ddb.y * b0 - ddb.w * b1;
252
+ float cxy = iw * (ddb.x * datdX + ddb.y * datdY);
253
+ float czw = iw * (ddb.z * datdX + ddb.w * datdY);
254
+
255
+ gp0x += c0 * y12 - cy * w12 + czw * p2y + ddb.w * p2.w;
256
+ gp1x += c0 * y20 - cy * w20 - cxy * p2y - ddb.y * p2.w;
257
+ gp2x += c0 * y01 - cy * w01 + cxy * p1y - czw * p0y + ddb.y * p1.w - ddb.w * p0.w;
258
+ gp0y += cx * w12 - c0 * x12 - czw * p2x - ddb.z * p2.w;
259
+ gp1y += cx * w20 - c0 * x20 + cxy * p2x + ddb.x * p2.w;
260
+ gp2y += cx * w01 - c0 * x01 - cxy * p1x + czw * p0x - ddb.x * p1.w + ddb.z * p0.w;
261
+ gp0w += cy * x12 - cx * y12 - czw * a1p0 + ddb.z * p2.y - ddb.w * p2.x;
262
+ gp1w += cy * x20 - cx * y20 - cxy * a0p1 - ddb.x * p2.y + ddb.y * p2.x;
263
+ gp2w += cy * x01 - cx * y01 - cxy * a0p2 - czw * a1p2 + ddb.x * p1.y - ddb.y * p1.x - ddb.z * p0.y + ddb.w * p0.x;
264
+ }
265
+
266
+ // Accumulate using coalesced atomics.
267
+ caAtomicAdd3_xyw(p.grad + 4 * vi0, gp0x, gp0y, gp0w);
268
+ caAtomicAdd3_xyw(p.grad + 4 * vi1, gp1x, gp1y, gp1w);
269
+ caAtomicAdd3_xyw(p.grad + 4 * vi2, gp2x, gp2y, gp2w);
270
+ }
271
+
272
+ // Template specializations.
273
+ __global__ void RasterizeGradKernel (const RasterizeGradParams p) { RasterizeGradKernelTemplate<false>(p); }
274
+ __global__ void RasterizeGradKernelDb(const RasterizeGradParams p) { RasterizeGradKernelTemplate<true>(p); }
275
+
276
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/rasterize.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+
11
+ //------------------------------------------------------------------------
12
+ // Constants and helpers.
13
+
14
+ #define RAST_CUDA_FWD_SHADER_KERNEL_BLOCK_WIDTH 8
15
+ #define RAST_CUDA_FWD_SHADER_KERNEL_BLOCK_HEIGHT 8
16
+ #define RAST_GRAD_MAX_KERNEL_BLOCK_WIDTH 8
17
+ #define RAST_GRAD_MAX_KERNEL_BLOCK_HEIGHT 8
18
+
19
+ //------------------------------------------------------------------------
20
+ // CUDA forward rasterizer shader kernel params.
21
+
22
+ struct RasterizeCudaFwdShaderParams
23
+ {
24
+ const float* pos; // Vertex positions.
25
+ const int* tri; // Triangle indices.
26
+ const int* in_idx; // Triangle idx buffer from rasterizer.
27
+ float* out; // Main output buffer.
28
+ float* out_db; // Bary pixel gradient output buffer.
29
+ int numTriangles; // Number of triangles.
30
+ int numVertices; // Number of vertices.
31
+ int width_in; // Input image width.
32
+ int height_in; // Input image height.
33
+ int width_out; // Output image width.
34
+ int height_out; // Output image height.
35
+ int depth; // Size of minibatch.
36
+ int instance_mode; // 1 if in instance rendering mode.
37
+ float xs, xo, ys, yo; // Pixel position to clip-space x, y transform.
38
+ };
39
+
40
+ //------------------------------------------------------------------------
41
+ // Gradient CUDA kernel params.
42
+
43
+ struct RasterizeGradParams
44
+ {
45
+ const float* pos; // Incoming position buffer.
46
+ const int* tri; // Incoming triangle buffer.
47
+ const float* out; // Rasterizer output buffer.
48
+ const float* dy; // Incoming gradients of rasterizer output buffer.
49
+ const float* ddb; // Incoming gradients of bary diff output buffer.
50
+ float* grad; // Outgoing position gradients.
51
+ int numTriangles; // Number of triangles.
52
+ int numVertices; // Number of vertices.
53
+ int width; // Image width.
54
+ int height; // Image height.
55
+ int depth; // Size of minibatch.
56
+ int instance_mode; // 1 if in instance rendering mode.
57
+ float xs, xo, ys, yo; // Pixel position to clip-space x, y transform.
58
+ };
59
+
60
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/rasterize_gl.cpp ADDED
@@ -0,0 +1,644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "rasterize_gl.h"
10
+ #include "glutil.h"
11
+ #include <vector>
12
+ #define STRINGIFY_SHADER_SOURCE(x) #x
13
+
14
+ //------------------------------------------------------------------------
15
+ // Helpers.
16
+
17
+ #define ROUND_UP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
18
+ static int ROUND_UP_BITS(uint32_t x, uint32_t y)
19
+ {
20
+ // Round x up so that it has at most y bits of mantissa.
21
+ if (x < (1u << y))
22
+ return x;
23
+ uint32_t m = 0;
24
+ while (x & ~m)
25
+ m = (m << 1) | 1u;
26
+ m >>= y;
27
+ if (!(x & m))
28
+ return x;
29
+ return (x | m) + 1u;
30
+ }
31
+
32
+ //------------------------------------------------------------------------
33
+ // Draw command struct used by rasterizer.
34
+
35
+ struct GLDrawCmd
36
+ {
37
+ uint32_t count;
38
+ uint32_t instanceCount;
39
+ uint32_t firstIndex;
40
+ uint32_t baseVertex;
41
+ uint32_t baseInstance;
42
+ };
43
+
44
+ //------------------------------------------------------------------------
45
+ // GL helpers.
46
+
47
+ static void compileGLShader(NVDR_CTX_ARGS, const RasterizeGLState& s, GLuint* pShader, GLenum shaderType, const char* src_buf)
48
+ {
49
+ std::string src(src_buf);
50
+
51
+ // Set preprocessor directives.
52
+ int n = src.find('\n') + 1; // After first line containing #version directive.
53
+ if (s.enableZModify)
54
+ src.insert(n, "#define IF_ZMODIFY(x) x\n");
55
+ else
56
+ src.insert(n, "#define IF_ZMODIFY(x)\n");
57
+
58
+ const char *cstr = src.c_str();
59
+ *pShader = 0;
60
+ NVDR_CHECK_GL_ERROR(*pShader = glCreateShader(shaderType));
61
+ NVDR_CHECK_GL_ERROR(glShaderSource(*pShader, 1, &cstr, 0));
62
+ NVDR_CHECK_GL_ERROR(glCompileShader(*pShader));
63
+ }
64
+
65
+ static void constructGLProgram(NVDR_CTX_ARGS, GLuint* pProgram, GLuint glVertexShader, GLuint glGeometryShader, GLuint glFragmentShader)
66
+ {
67
+ *pProgram = 0;
68
+
69
+ GLuint glProgram = 0;
70
+ NVDR_CHECK_GL_ERROR(glProgram = glCreateProgram());
71
+ NVDR_CHECK_GL_ERROR(glAttachShader(glProgram, glVertexShader));
72
+ NVDR_CHECK_GL_ERROR(glAttachShader(glProgram, glGeometryShader));
73
+ NVDR_CHECK_GL_ERROR(glAttachShader(glProgram, glFragmentShader));
74
+ NVDR_CHECK_GL_ERROR(glLinkProgram(glProgram));
75
+
76
+ GLint linkStatus = 0;
77
+ NVDR_CHECK_GL_ERROR(glGetProgramiv(glProgram, GL_LINK_STATUS, &linkStatus));
78
+ if (!linkStatus)
79
+ {
80
+ GLint infoLen = 0;
81
+ NVDR_CHECK_GL_ERROR(glGetProgramiv(glProgram, GL_INFO_LOG_LENGTH, &infoLen));
82
+ if (infoLen)
83
+ {
84
+ const char* hdr = "glLinkProgram() failed:\n";
85
+ std::vector<char> info(strlen(hdr) + infoLen);
86
+ strcpy(&info[0], hdr);
87
+ NVDR_CHECK_GL_ERROR(glGetProgramInfoLog(glProgram, infoLen, &infoLen, &info[strlen(hdr)]));
88
+ NVDR_CHECK(0, &info[0]);
89
+ }
90
+ NVDR_CHECK(0, "glLinkProgram() failed");
91
+ }
92
+
93
+ *pProgram = glProgram;
94
+ }
95
+
96
+ //------------------------------------------------------------------------
97
+ // Shared C++ functions.
98
+
99
+ void rasterizeInitGLContext(NVDR_CTX_ARGS, RasterizeGLState& s, int cudaDeviceIdx)
100
+ {
101
+ // Create GL context and set it current.
102
+ s.glctx = createGLContext(cudaDeviceIdx);
103
+ setGLContext(s.glctx);
104
+
105
+ // Version check.
106
+ GLint vMajor = 0;
107
+ GLint vMinor = 0;
108
+ glGetIntegerv(GL_MAJOR_VERSION, &vMajor);
109
+ glGetIntegerv(GL_MINOR_VERSION, &vMinor);
110
+ glGetError(); // Clear possible GL_INVALID_ENUM error in version query.
111
+ LOG(INFO) << "OpenGL version reported as " << vMajor << "." << vMinor;
112
+ NVDR_CHECK((vMajor == 4 && vMinor >= 4) || vMajor > 4, "OpenGL 4.4 or later is required");
113
+
114
+ // Enable depth modification workaround on A100 and later.
115
+ int capMajor = 0;
116
+ NVDR_CHECK_CUDA_ERROR(cudaDeviceGetAttribute(&capMajor, cudaDevAttrComputeCapabilityMajor, cudaDeviceIdx));
117
+ s.enableZModify = (capMajor >= 8);
118
+
119
+ // Number of output buffers.
120
+ int num_outputs = s.enableDB ? 2 : 1;
121
+
122
+ // Set up vertex shader.
123
+ compileGLShader(NVDR_CTX_PARAMS, s, &s.glVertexShader, GL_VERTEX_SHADER,
124
+ "#version 330\n"
125
+ "#extension GL_ARB_shader_draw_parameters : enable\n"
126
+ STRINGIFY_SHADER_SOURCE(
127
+ layout(location = 0) in vec4 in_pos;
128
+ out int v_layer;
129
+ out int v_offset;
130
+ void main()
131
+ {
132
+ int layer = gl_DrawIDARB;
133
+ gl_Position = in_pos;
134
+ v_layer = layer;
135
+ v_offset = gl_BaseInstanceARB; // Sneak in TriID offset here.
136
+ }
137
+ )
138
+ );
139
+
140
+ // Geometry and fragment shaders depend on if bary differential output is enabled or not.
141
+ if (s.enableDB)
142
+ {
143
+ // Set up geometry shader. Calculation of per-pixel bary differentials is based on:
144
+ // u = (u/w) / (1/w)
145
+ // --> du/dX = d((u/w) / (1/w))/dX
146
+ // --> du/dX = [d(u/w)/dX - u*d(1/w)/dX] * w
147
+ // and we know both d(u/w)/dX and d(1/w)/dX are constant over triangle.
148
+ compileGLShader(NVDR_CTX_PARAMS, s, &s.glGeometryShader, GL_GEOMETRY_SHADER,
149
+ "#version 430\n"
150
+ STRINGIFY_SHADER_SOURCE(
151
+ layout(triangles) in;
152
+ layout(triangle_strip, max_vertices=3) out;
153
+ layout(location = 0) uniform vec2 vp_scale;
154
+ in int v_layer[];
155
+ in int v_offset[];
156
+ out vec4 var_uvzw;
157
+ out vec4 var_db;
158
+ void main()
159
+ {
160
+ // Plane equations for bary differentials.
161
+ float w0 = gl_in[0].gl_Position.w;
162
+ float w1 = gl_in[1].gl_Position.w;
163
+ float w2 = gl_in[2].gl_Position.w;
164
+ vec2 p0 = gl_in[0].gl_Position.xy;
165
+ vec2 p1 = gl_in[1].gl_Position.xy;
166
+ vec2 p2 = gl_in[2].gl_Position.xy;
167
+ vec2 e0 = p0*w2 - p2*w0;
168
+ vec2 e1 = p1*w2 - p2*w1;
169
+ float a = e0.x*e1.y - e0.y*e1.x;
170
+
171
+ // Clamp area to an epsilon to avoid arbitrarily high bary differentials.
172
+ float eps = 1e-6f; // ~1 pixel in 1k x 1k image.
173
+ float ca = (abs(a) >= eps) ? a : (a < 0.f) ? -eps : eps; // Clamp with sign.
174
+ float ia = 1.f / ca; // Inverse area.
175
+
176
+ vec2 ascl = ia * vp_scale;
177
+ float dudx = e1.y * ascl.x;
178
+ float dudy = -e1.x * ascl.y;
179
+ float dvdx = -e0.y * ascl.x;
180
+ float dvdy = e0.x * ascl.y;
181
+
182
+ float duwdx = w2 * dudx;
183
+ float dvwdx = w2 * dvdx;
184
+ float duvdx = w0 * dudx + w1 * dvdx;
185
+ float duwdy = w2 * dudy;
186
+ float dvwdy = w2 * dvdy;
187
+ float duvdy = w0 * dudy + w1 * dvdy;
188
+
189
+ vec4 db0 = vec4(duvdx - dvwdx, duvdy - dvwdy, dvwdx, dvwdy);
190
+ vec4 db1 = vec4(duwdx, duwdy, duvdx - duwdx, duvdy - duwdy);
191
+ vec4 db2 = vec4(duwdx, duwdy, dvwdx, dvwdy);
192
+
193
+ int layer_id = v_layer[0];
194
+ int prim_id = gl_PrimitiveIDIn + v_offset[0];
195
+
196
+ gl_Layer = layer_id; gl_PrimitiveID = prim_id; gl_Position = vec4(gl_in[0].gl_Position.x, gl_in[0].gl_Position.y, gl_in[0].gl_Position.z, gl_in[0].gl_Position.w); var_uvzw = vec4(1.f, 0.f, gl_in[0].gl_Position.z, gl_in[0].gl_Position.w); var_db = db0; EmitVertex();
197
+ gl_Layer = layer_id; gl_PrimitiveID = prim_id; gl_Position = vec4(gl_in[1].gl_Position.x, gl_in[1].gl_Position.y, gl_in[1].gl_Position.z, gl_in[1].gl_Position.w); var_uvzw = vec4(0.f, 1.f, gl_in[1].gl_Position.z, gl_in[1].gl_Position.w); var_db = db1; EmitVertex();
198
+ gl_Layer = layer_id; gl_PrimitiveID = prim_id; gl_Position = vec4(gl_in[2].gl_Position.x, gl_in[2].gl_Position.y, gl_in[2].gl_Position.z, gl_in[2].gl_Position.w); var_uvzw = vec4(0.f, 0.f, gl_in[2].gl_Position.z, gl_in[2].gl_Position.w); var_db = db2; EmitVertex();
199
+ }
200
+ )
201
+ );
202
+
203
+ // Set up fragment shader.
204
+ compileGLShader(NVDR_CTX_PARAMS, s, &s.glFragmentShader, GL_FRAGMENT_SHADER,
205
+ "#version 430\n"
206
+ STRINGIFY_SHADER_SOURCE(
207
+ in vec4 var_uvzw;
208
+ in vec4 var_db;
209
+ layout(location = 0) out vec4 out_raster;
210
+ layout(location = 1) out vec4 out_db;
211
+ IF_ZMODIFY(
212
+ layout(location = 1) uniform float in_dummy;
213
+ )
214
+ void main()
215
+ {
216
+ int id_int = gl_PrimitiveID + 1;
217
+ float id_float = (id_int <= 0x01000000) ? float(id_int) : intBitsToFloat(0x4a800000 + id_int);
218
+
219
+ out_raster = vec4(var_uvzw.x, var_uvzw.y, var_uvzw.z / var_uvzw.w, id_float);
220
+ out_db = var_db * var_uvzw.w;
221
+ IF_ZMODIFY(gl_FragDepth = gl_FragCoord.z + in_dummy;)
222
+ }
223
+ )
224
+ );
225
+
226
+ // Set up fragment shader for depth peeling.
227
+ compileGLShader(NVDR_CTX_PARAMS, s, &s.glFragmentShaderDP, GL_FRAGMENT_SHADER,
228
+ "#version 430\n"
229
+ STRINGIFY_SHADER_SOURCE(
230
+ in vec4 var_uvzw;
231
+ in vec4 var_db;
232
+ layout(binding = 0) uniform sampler2DArray out_prev;
233
+ layout(location = 0) out vec4 out_raster;
234
+ layout(location = 1) out vec4 out_db;
235
+ IF_ZMODIFY(
236
+ layout(location = 1) uniform float in_dummy;
237
+ )
238
+ void main()
239
+ {
240
+ int id_int = gl_PrimitiveID + 1;
241
+ float id_float = (id_int <= 0x01000000) ? float(id_int) : intBitsToFloat(0x4a800000 + id_int);
242
+
243
+ vec4 prev = texelFetch(out_prev, ivec3(gl_FragCoord.x, gl_FragCoord.y, gl_Layer), 0);
244
+ float depth_new = var_uvzw.z / var_uvzw.w;
245
+ if (prev.w == 0 || depth_new <= prev.z)
246
+ discard;
247
+ out_raster = vec4(var_uvzw.x, var_uvzw.y, depth_new, id_float);
248
+ out_db = var_db * var_uvzw.w;
249
+ IF_ZMODIFY(gl_FragDepth = gl_FragCoord.z + in_dummy;)
250
+ }
251
+ )
252
+ );
253
+ }
254
+ else
255
+ {
256
+ // Geometry shader without bary differential output.
257
+ compileGLShader(NVDR_CTX_PARAMS, s, &s.glGeometryShader, GL_GEOMETRY_SHADER,
258
+ "#version 330\n"
259
+ STRINGIFY_SHADER_SOURCE(
260
+ layout(triangles) in;
261
+ layout(triangle_strip, max_vertices=3) out;
262
+ in int v_layer[];
263
+ in int v_offset[];
264
+ out vec4 var_uvzw;
265
+ void main()
266
+ {
267
+ int layer_id = v_layer[0];
268
+ int prim_id = gl_PrimitiveIDIn + v_offset[0];
269
+
270
+ gl_Layer = layer_id; gl_PrimitiveID = prim_id; gl_Position = vec4(gl_in[0].gl_Position.x, gl_in[0].gl_Position.y, gl_in[0].gl_Position.z, gl_in[0].gl_Position.w); var_uvzw = vec4(1.f, 0.f, gl_in[0].gl_Position.z, gl_in[0].gl_Position.w); EmitVertex();
271
+ gl_Layer = layer_id; gl_PrimitiveID = prim_id; gl_Position = vec4(gl_in[1].gl_Position.x, gl_in[1].gl_Position.y, gl_in[1].gl_Position.z, gl_in[1].gl_Position.w); var_uvzw = vec4(0.f, 1.f, gl_in[1].gl_Position.z, gl_in[1].gl_Position.w); EmitVertex();
272
+ gl_Layer = layer_id; gl_PrimitiveID = prim_id; gl_Position = vec4(gl_in[2].gl_Position.x, gl_in[2].gl_Position.y, gl_in[2].gl_Position.z, gl_in[2].gl_Position.w); var_uvzw = vec4(0.f, 0.f, gl_in[2].gl_Position.z, gl_in[2].gl_Position.w); EmitVertex();
273
+ }
274
+ )
275
+ );
276
+
277
+ // Fragment shader without bary differential output.
278
+ compileGLShader(NVDR_CTX_PARAMS, s, &s.glFragmentShader, GL_FRAGMENT_SHADER,
279
+ "#version 430\n"
280
+ STRINGIFY_SHADER_SOURCE(
281
+ in vec4 var_uvzw;
282
+ layout(location = 0) out vec4 out_raster;
283
+ IF_ZMODIFY(
284
+ layout(location = 1) uniform float in_dummy;
285
+ )
286
+ void main()
287
+ {
288
+ int id_int = gl_PrimitiveID + 1;
289
+ float id_float = (id_int <= 0x01000000) ? float(id_int) : intBitsToFloat(0x4a800000 + id_int);
290
+
291
+ out_raster = vec4(var_uvzw.x, var_uvzw.y, var_uvzw.z / var_uvzw.w, id_float);
292
+ IF_ZMODIFY(gl_FragDepth = gl_FragCoord.z + in_dummy;)
293
+ }
294
+ )
295
+ );
296
+
297
+ // Depth peeling variant of fragment shader.
298
+ compileGLShader(NVDR_CTX_PARAMS, s, &s.glFragmentShaderDP, GL_FRAGMENT_SHADER,
299
+ "#version 430\n"
300
+ STRINGIFY_SHADER_SOURCE(
301
+ in vec4 var_uvzw;
302
+ layout(binding = 0) uniform sampler2DArray out_prev;
303
+ layout(location = 0) out vec4 out_raster;
304
+ IF_ZMODIFY(
305
+ layout(location = 1) uniform float in_dummy;
306
+ )
307
+ void main()
308
+ {
309
+ int id_int = gl_PrimitiveID + 1;
310
+ float id_float = (id_int <= 0x01000000) ? float(id_int) : intBitsToFloat(0x4a800000 + id_int);
311
+
312
+ vec4 prev = texelFetch(out_prev, ivec3(gl_FragCoord.x, gl_FragCoord.y, gl_Layer), 0);
313
+ float depth_new = var_uvzw.z / var_uvzw.w;
314
+ if (prev.w == 0 || depth_new <= prev.z)
315
+ discard;
316
+ out_raster = vec4(var_uvzw.x, var_uvzw.y, var_uvzw.z / var_uvzw.w, id_float);
317
+ IF_ZMODIFY(gl_FragDepth = gl_FragCoord.z + in_dummy;)
318
+ }
319
+ )
320
+ );
321
+ }
322
+
323
+ // Finalize programs.
324
+ constructGLProgram(NVDR_CTX_PARAMS, &s.glProgram, s.glVertexShader, s.glGeometryShader, s.glFragmentShader);
325
+ constructGLProgram(NVDR_CTX_PARAMS, &s.glProgramDP, s.glVertexShader, s.glGeometryShader, s.glFragmentShaderDP);
326
+
327
+ // Construct main fbo and bind permanently.
328
+ NVDR_CHECK_GL_ERROR(glGenFramebuffers(1, &s.glFBO));
329
+ NVDR_CHECK_GL_ERROR(glBindFramebuffer(GL_FRAMEBUFFER, s.glFBO));
330
+
331
+ // Enable two color attachments.
332
+ GLenum draw_buffers[2] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
333
+ NVDR_CHECK_GL_ERROR(glDrawBuffers(num_outputs, draw_buffers));
334
+
335
+ // Construct vertex array object.
336
+ NVDR_CHECK_GL_ERROR(glGenVertexArrays(1, &s.glVAO));
337
+ NVDR_CHECK_GL_ERROR(glBindVertexArray(s.glVAO));
338
+
339
+ // Construct position buffer, bind permanently, enable, set ptr.
340
+ NVDR_CHECK_GL_ERROR(glGenBuffers(1, &s.glPosBuffer));
341
+ NVDR_CHECK_GL_ERROR(glBindBuffer(GL_ARRAY_BUFFER, s.glPosBuffer));
342
+ NVDR_CHECK_GL_ERROR(glEnableVertexAttribArray(0));
343
+ NVDR_CHECK_GL_ERROR(glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0));
344
+
345
+ // Construct index buffer and bind permanently.
346
+ NVDR_CHECK_GL_ERROR(glGenBuffers(1, &s.glTriBuffer));
347
+ NVDR_CHECK_GL_ERROR(glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s.glTriBuffer));
348
+
349
+ // Set up depth test.
350
+ NVDR_CHECK_GL_ERROR(glEnable(GL_DEPTH_TEST));
351
+ NVDR_CHECK_GL_ERROR(glDepthFunc(GL_LESS));
352
+ NVDR_CHECK_GL_ERROR(glClearDepth(1.0));
353
+
354
+ // Create and bind output buffers. Storage is allocated later.
355
+ NVDR_CHECK_GL_ERROR(glGenTextures(num_outputs, s.glColorBuffer));
356
+ for (int i=0; i < num_outputs; i++)
357
+ {
358
+ NVDR_CHECK_GL_ERROR(glBindTexture(GL_TEXTURE_2D_ARRAY, s.glColorBuffer[i]));
359
+ NVDR_CHECK_GL_ERROR(glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, s.glColorBuffer[i], 0));
360
+ }
361
+
362
+ // Create and bind depth/stencil buffer. Storage is allocated later.
363
+ NVDR_CHECK_GL_ERROR(glGenTextures(1, &s.glDepthStencilBuffer));
364
+ NVDR_CHECK_GL_ERROR(glBindTexture(GL_TEXTURE_2D_ARRAY, s.glDepthStencilBuffer));
365
+ NVDR_CHECK_GL_ERROR(glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, s.glDepthStencilBuffer, 0));
366
+
367
+ // Create texture name for previous output buffer (depth peeling).
368
+ NVDR_CHECK_GL_ERROR(glGenTextures(1, &s.glPrevOutBuffer));
369
+ }
370
+
371
+ void rasterizeResizeBuffers(NVDR_CTX_ARGS, RasterizeGLState& s, bool& changes, int posCount, int triCount, int width, int height, int depth)
372
+ {
373
+ changes = false;
374
+
375
+ // Resize vertex buffer?
376
+ if (posCount > s.posCount)
377
+ {
378
+ if (s.cudaPosBuffer)
379
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnregisterResource(s.cudaPosBuffer));
380
+ s.posCount = (posCount > 64) ? ROUND_UP_BITS(posCount, 2) : 64;
381
+ LOG(INFO) << "Increasing position buffer size to " << s.posCount << " float32";
382
+ NVDR_CHECK_GL_ERROR(glBufferData(GL_ARRAY_BUFFER, s.posCount * sizeof(float), NULL, GL_DYNAMIC_DRAW));
383
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsGLRegisterBuffer(&s.cudaPosBuffer, s.glPosBuffer, cudaGraphicsRegisterFlagsWriteDiscard));
384
+ changes = true;
385
+ }
386
+
387
+ // Resize triangle buffer?
388
+ if (triCount > s.triCount)
389
+ {
390
+ if (s.cudaTriBuffer)
391
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnregisterResource(s.cudaTriBuffer));
392
+ s.triCount = (triCount > 64) ? ROUND_UP_BITS(triCount, 2) : 64;
393
+ LOG(INFO) << "Increasing triangle buffer size to " << s.triCount << " int32";
394
+ NVDR_CHECK_GL_ERROR(glBufferData(GL_ELEMENT_ARRAY_BUFFER, s.triCount * sizeof(int32_t), NULL, GL_DYNAMIC_DRAW));
395
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsGLRegisterBuffer(&s.cudaTriBuffer, s.glTriBuffer, cudaGraphicsRegisterFlagsWriteDiscard));
396
+ changes = true;
397
+ }
398
+
399
+ // Resize framebuffer?
400
+ if (width > s.width || height > s.height || depth > s.depth)
401
+ {
402
+ int num_outputs = s.enableDB ? 2 : 1;
403
+ if (s.cudaColorBuffer[0])
404
+ for (int i=0; i < num_outputs; i++)
405
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnregisterResource(s.cudaColorBuffer[i]));
406
+
407
+ if (s.cudaPrevOutBuffer)
408
+ {
409
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnregisterResource(s.cudaPrevOutBuffer));
410
+ s.cudaPrevOutBuffer = 0;
411
+ }
412
+
413
+ // New framebuffer size.
414
+ s.width = (width > s.width) ? width : s.width;
415
+ s.height = (height > s.height) ? height : s.height;
416
+ s.depth = (depth > s.depth) ? depth : s.depth;
417
+ s.width = ROUND_UP(s.width, 32);
418
+ s.height = ROUND_UP(s.height, 32);
419
+ LOG(INFO) << "Increasing frame buffer size to (width, height, depth) = (" << s.width << ", " << s.height << ", " << s.depth << ")";
420
+
421
+ // Allocate color buffers.
422
+ for (int i=0; i < num_outputs; i++)
423
+ {
424
+ NVDR_CHECK_GL_ERROR(glBindTexture(GL_TEXTURE_2D_ARRAY, s.glColorBuffer[i]));
425
+ NVDR_CHECK_GL_ERROR(glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA32F, s.width, s.height, s.depth, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0));
426
+ NVDR_CHECK_GL_ERROR(glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
427
+ NVDR_CHECK_GL_ERROR(glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
428
+ NVDR_CHECK_GL_ERROR(glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
429
+ NVDR_CHECK_GL_ERROR(glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
430
+ }
431
+
432
+ // Allocate depth/stencil buffer.
433
+ NVDR_CHECK_GL_ERROR(glBindTexture(GL_TEXTURE_2D_ARRAY, s.glDepthStencilBuffer));
434
+ NVDR_CHECK_GL_ERROR(glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_DEPTH24_STENCIL8, s.width, s.height, s.depth, 0, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, 0));
435
+
436
+ // (Re-)register all GL buffers into Cuda.
437
+ for (int i=0; i < num_outputs; i++)
438
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsGLRegisterImage(&s.cudaColorBuffer[i], s.glColorBuffer[i], GL_TEXTURE_3D, cudaGraphicsRegisterFlagsReadOnly));
439
+
440
+ changes = true;
441
+ }
442
+ }
443
+
444
+ void rasterizeRender(NVDR_CTX_ARGS, RasterizeGLState& s, cudaStream_t stream, const float* posPtr, int posCount, int vtxPerInstance, const int32_t* triPtr, int triCount, const int32_t* rangesPtr, int width, int height, int depth, int peeling_idx)
445
+ {
446
+ // Only copy inputs if we are on first iteration of depth peeling or not doing it at all.
447
+ if (peeling_idx < 1)
448
+ {
449
+ if (triPtr)
450
+ {
451
+ // Copy both position and triangle buffers.
452
+ void* glPosPtr = NULL;
453
+ void* glTriPtr = NULL;
454
+ size_t posBytes = 0;
455
+ size_t triBytes = 0;
456
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsMapResources(2, &s.cudaPosBuffer, stream));
457
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsResourceGetMappedPointer(&glPosPtr, &posBytes, s.cudaPosBuffer));
458
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsResourceGetMappedPointer(&glTriPtr, &triBytes, s.cudaTriBuffer));
459
+ NVDR_CHECK(posBytes >= posCount * sizeof(float), "mapped GL position buffer size mismatch");
460
+ NVDR_CHECK(triBytes >= triCount * sizeof(int32_t), "mapped GL triangle buffer size mismatch");
461
+ NVDR_CHECK_CUDA_ERROR(cudaMemcpyAsync(glPosPtr, posPtr, posCount * sizeof(float), cudaMemcpyDeviceToDevice, stream));
462
+ NVDR_CHECK_CUDA_ERROR(cudaMemcpyAsync(glTriPtr, triPtr, triCount * sizeof(int32_t), cudaMemcpyDeviceToDevice, stream));
463
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnmapResources(2, &s.cudaPosBuffer, stream));
464
+ }
465
+ else
466
+ {
467
+ // Copy position buffer only. Triangles are already copied and known to be constant.
468
+ void* glPosPtr = NULL;
469
+ size_t posBytes = 0;
470
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsMapResources(1, &s.cudaPosBuffer, stream));
471
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsResourceGetMappedPointer(&glPosPtr, &posBytes, s.cudaPosBuffer));
472
+ NVDR_CHECK(posBytes >= posCount * sizeof(float), "mapped GL position buffer size mismatch");
473
+ NVDR_CHECK_CUDA_ERROR(cudaMemcpyAsync(glPosPtr, posPtr, posCount * sizeof(float), cudaMemcpyDeviceToDevice, stream));
474
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnmapResources(1, &s.cudaPosBuffer, stream));
475
+ }
476
+ }
477
+
478
+ // Select program based on whether we have a depth peeling input or not.
479
+ if (peeling_idx < 1)
480
+ {
481
+ // Normal case: No peeling, or peeling disabled.
482
+ NVDR_CHECK_GL_ERROR(glUseProgram(s.glProgram));
483
+ }
484
+ else
485
+ {
486
+ // If we don't have a third buffer yet, create one.
487
+ if (!s.cudaPrevOutBuffer)
488
+ {
489
+ NVDR_CHECK_GL_ERROR(glBindTexture(GL_TEXTURE_2D_ARRAY, s.glPrevOutBuffer));
490
+ NVDR_CHECK_GL_ERROR(glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA32F, s.width, s.height, s.depth, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0));
491
+ NVDR_CHECK_GL_ERROR(glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
492
+ NVDR_CHECK_GL_ERROR(glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
493
+ NVDR_CHECK_GL_ERROR(glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
494
+ NVDR_CHECK_GL_ERROR(glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
495
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsGLRegisterImage(&s.cudaPrevOutBuffer, s.glPrevOutBuffer, GL_TEXTURE_3D, cudaGraphicsRegisterFlagsReadOnly));
496
+ }
497
+
498
+ // Swap the GL buffers.
499
+ GLuint glTempBuffer = s.glPrevOutBuffer;
500
+ s.glPrevOutBuffer = s.glColorBuffer[0];
501
+ s.glColorBuffer[0] = glTempBuffer;
502
+
503
+ // Swap the Cuda buffers.
504
+ cudaGraphicsResource_t cudaTempBuffer = s.cudaPrevOutBuffer;
505
+ s.cudaPrevOutBuffer = s.cudaColorBuffer[0];
506
+ s.cudaColorBuffer[0] = cudaTempBuffer;
507
+
508
+ // Bind the new output buffer.
509
+ NVDR_CHECK_GL_ERROR(glBindTexture(GL_TEXTURE_2D_ARRAY, s.glColorBuffer[0]));
510
+ NVDR_CHECK_GL_ERROR(glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, s.glColorBuffer[0], 0));
511
+
512
+ // Bind old buffer as the input texture.
513
+ NVDR_CHECK_GL_ERROR(glBindTexture(GL_TEXTURE_2D_ARRAY, s.glPrevOutBuffer));
514
+
515
+ // Activate the correct program.
516
+ NVDR_CHECK_GL_ERROR(glUseProgram(s.glProgramDP));
517
+ }
518
+
519
+ // Set viewport, clear color buffer(s) and depth/stencil buffer.
520
+ NVDR_CHECK_GL_ERROR(glViewport(0, 0, width, height));
521
+ NVDR_CHECK_GL_ERROR(glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT));
522
+
523
+ // If outputting bary differentials, set resolution uniform
524
+ if (s.enableDB)
525
+ NVDR_CHECK_GL_ERROR(glUniform2f(0, 2.f / (float)width, 2.f / (float)height));
526
+
527
+ // Set the dummy uniform if depth modification workaround is active.
528
+ if (s.enableZModify)
529
+ NVDR_CHECK_GL_ERROR(glUniform1f(1, 0.f));
530
+
531
+ // Render the meshes.
532
+ if (depth == 1 && !rangesPtr)
533
+ {
534
+ // Trivial case.
535
+ NVDR_CHECK_GL_ERROR(glDrawElements(GL_TRIANGLES, triCount, GL_UNSIGNED_INT, 0));
536
+ }
537
+ else
538
+ {
539
+ // Populate a buffer for draw commands and execute it.
540
+ std::vector<GLDrawCmd> drawCmdBuffer(depth);
541
+
542
+ if (!rangesPtr)
543
+ {
544
+ // Fill in range array to instantiate the same triangles for each output layer.
545
+ // Triangle IDs starts at zero (i.e., one) for each layer, so they correspond to
546
+ // the first dimension in addressing the triangle array.
547
+ for (int i=0; i < depth; i++)
548
+ {
549
+ GLDrawCmd& cmd = drawCmdBuffer[i];
550
+ cmd.firstIndex = 0;
551
+ cmd.count = triCount;
552
+ cmd.baseVertex = vtxPerInstance * i;
553
+ cmd.baseInstance = 0;
554
+ cmd.instanceCount = 1;
555
+ }
556
+ }
557
+ else
558
+ {
559
+ // Fill in the range array according to user-given ranges. Triangle IDs point
560
+ // to the input triangle array, NOT index within range, so they correspond to
561
+ // the first dimension in addressing the triangle array.
562
+ for (int i=0, j=0; i < depth; i++)
563
+ {
564
+ GLDrawCmd& cmd = drawCmdBuffer[i];
565
+ int first = rangesPtr[j++];
566
+ int count = rangesPtr[j++];
567
+ NVDR_CHECK(first >= 0 && count >= 0, "range contains negative values");
568
+ NVDR_CHECK((first + count) * 3 <= triCount, "range extends beyond end of triangle buffer");
569
+ cmd.firstIndex = first * 3;
570
+ cmd.count = count * 3;
571
+ cmd.baseVertex = 0;
572
+ cmd.baseInstance = first;
573
+ cmd.instanceCount = 1;
574
+ }
575
+ }
576
+
577
+ // Draw!
578
+ NVDR_CHECK_GL_ERROR(glMultiDrawElementsIndirect(GL_TRIANGLES, GL_UNSIGNED_INT, &drawCmdBuffer[0], depth, sizeof(GLDrawCmd)));
579
+ }
580
+ }
581
+
582
+ void rasterizeCopyResults(NVDR_CTX_ARGS, RasterizeGLState& s, cudaStream_t stream, float** outputPtr, int width, int height, int depth)
583
+ {
584
+ // Copy color buffers to output tensors.
585
+ cudaArray_t array = 0;
586
+ cudaChannelFormatDesc arrayDesc = {}; // For error checking.
587
+ cudaExtent arrayExt = {}; // For error checking.
588
+ int num_outputs = s.enableDB ? 2 : 1;
589
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsMapResources(num_outputs, s.cudaColorBuffer, stream));
590
+ for (int i=0; i < num_outputs; i++)
591
+ {
592
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsSubResourceGetMappedArray(&array, s.cudaColorBuffer[i], 0, 0));
593
+ NVDR_CHECK_CUDA_ERROR(cudaArrayGetInfo(&arrayDesc, &arrayExt, NULL, array));
594
+ NVDR_CHECK(arrayDesc.f == cudaChannelFormatKindFloat, "CUDA mapped array data kind mismatch");
595
+ NVDR_CHECK(arrayDesc.x == 32 && arrayDesc.y == 32 && arrayDesc.z == 32 && arrayDesc.w == 32, "CUDA mapped array data width mismatch");
596
+ NVDR_CHECK(arrayExt.width >= width && arrayExt.height >= height && arrayExt.depth >= depth, "CUDA mapped array extent mismatch");
597
+ cudaMemcpy3DParms p = {0};
598
+ p.srcArray = array;
599
+ p.dstPtr.ptr = outputPtr[i];
600
+ p.dstPtr.pitch = width * 4 * sizeof(float);
601
+ p.dstPtr.xsize = width;
602
+ p.dstPtr.ysize = height;
603
+ p.extent.width = width;
604
+ p.extent.height = height;
605
+ p.extent.depth = depth;
606
+ p.kind = cudaMemcpyDeviceToDevice;
607
+ NVDR_CHECK_CUDA_ERROR(cudaMemcpy3DAsync(&p, stream));
608
+ }
609
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnmapResources(num_outputs, s.cudaColorBuffer, stream));
610
+ }
611
+
612
+ void rasterizeReleaseBuffers(NVDR_CTX_ARGS, RasterizeGLState& s)
613
+ {
614
+ int num_outputs = s.enableDB ? 2 : 1;
615
+
616
+ if (s.cudaPosBuffer)
617
+ {
618
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnregisterResource(s.cudaPosBuffer));
619
+ s.cudaPosBuffer = 0;
620
+ }
621
+
622
+ if (s.cudaTriBuffer)
623
+ {
624
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnregisterResource(s.cudaTriBuffer));
625
+ s.cudaTriBuffer = 0;
626
+ }
627
+
628
+ for (int i=0; i < num_outputs; i++)
629
+ {
630
+ if (s.cudaColorBuffer[i])
631
+ {
632
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnregisterResource(s.cudaColorBuffer[i]));
633
+ s.cudaColorBuffer[i] = 0;
634
+ }
635
+ }
636
+
637
+ if (s.cudaPrevOutBuffer)
638
+ {
639
+ NVDR_CHECK_CUDA_ERROR(cudaGraphicsUnregisterResource(s.cudaPrevOutBuffer));
640
+ s.cudaPrevOutBuffer = 0;
641
+ }
642
+ }
643
+
644
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/rasterize_gl.h ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+
11
+ //------------------------------------------------------------------------
12
+ // Do not try to include OpenGL stuff when compiling CUDA kernels for torch.
13
+
14
+ #if !(defined(NVDR_TORCH) && defined(__CUDACC__))
15
+ #include "framework.h"
16
+ #include "glutil.h"
17
+
18
+ //------------------------------------------------------------------------
19
+ // OpenGL-related persistent state for forward op.
20
+
21
+ struct RasterizeGLState // Must be initializable by memset to zero.
22
+ {
23
+ int width; // Allocated frame buffer width.
24
+ int height; // Allocated frame buffer height.
25
+ int depth; // Allocated frame buffer depth.
26
+ int posCount; // Allocated position buffer in floats.
27
+ int triCount; // Allocated triangle buffer in ints.
28
+ GLContext glctx;
29
+ GLuint glFBO;
30
+ GLuint glColorBuffer[2];
31
+ GLuint glPrevOutBuffer;
32
+ GLuint glDepthStencilBuffer;
33
+ GLuint glVAO;
34
+ GLuint glTriBuffer;
35
+ GLuint glPosBuffer;
36
+ GLuint glProgram;
37
+ GLuint glProgramDP;
38
+ GLuint glVertexShader;
39
+ GLuint glGeometryShader;
40
+ GLuint glFragmentShader;
41
+ GLuint glFragmentShaderDP;
42
+ cudaGraphicsResource_t cudaColorBuffer[2];
43
+ cudaGraphicsResource_t cudaPrevOutBuffer;
44
+ cudaGraphicsResource_t cudaPosBuffer;
45
+ cudaGraphicsResource_t cudaTriBuffer;
46
+ int enableDB;
47
+ int enableZModify; // Modify depth in shader, workaround for a rasterization issue on A100.
48
+ };
49
+
50
+ //------------------------------------------------------------------------
51
+ // Shared C++ code prototypes.
52
+
53
+ void rasterizeInitGLContext(NVDR_CTX_ARGS, RasterizeGLState& s, int cudaDeviceIdx);
54
+ void rasterizeResizeBuffers(NVDR_CTX_ARGS, RasterizeGLState& s, bool& changes, int posCount, int triCount, int width, int height, int depth);
55
+ void rasterizeRender(NVDR_CTX_ARGS, RasterizeGLState& s, cudaStream_t stream, const float* posPtr, int posCount, int vtxPerInstance, const int32_t* triPtr, int triCount, const int32_t* rangesPtr, int width, int height, int depth, int peeling_idx);
56
+ void rasterizeCopyResults(NVDR_CTX_ARGS, RasterizeGLState& s, cudaStream_t stream, float** outputPtr, int width, int height, int depth);
57
+ void rasterizeReleaseBuffers(NVDR_CTX_ARGS, RasterizeGLState& s);
58
+
59
+ //------------------------------------------------------------------------
60
+ #endif // !(defined(NVDR_TORCH) && defined(__CUDACC__))
extensions/nvdiffrast/nvdiffrast/common/texture.cpp ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "framework.h"
10
+ #include "texture.h"
11
+
12
+ //------------------------------------------------------------------------
13
+ // Mip stack construction and access helpers.
14
+
15
+ void raiseMipSizeError(NVDR_CTX_ARGS, const TextureKernelParams& p)
16
+ {
17
+ char buf[1024];
18
+ int bufsz = 1024;
19
+
20
+ std::string msg = "Mip-map size error - cannot downsample an odd extent greater than 1. Resize the texture so that both spatial extents are powers of two, or limit the number of mip maps using max_mip_level argument.\n";
21
+
22
+ int w = p.texWidth;
23
+ int h = p.texHeight;
24
+ bool ew = false;
25
+ bool eh = false;
26
+
27
+ msg += "Attempted mip stack construction:\n";
28
+ msg += "level width height\n";
29
+ msg += "----- ----- ------\n";
30
+ snprintf(buf, bufsz, "base %5d %5d\n", w, h);
31
+ msg += buf;
32
+
33
+ int mipTotal = 0;
34
+ int level = 0;
35
+ while ((w|h) > 1 && !(ew || eh)) // Stop at first impossible size.
36
+ {
37
+ // Current level.
38
+ level += 1;
39
+
40
+ // Determine if downsampling fails.
41
+ ew = ew || (w > 1 && (w & 1));
42
+ eh = eh || (h > 1 && (h & 1));
43
+
44
+ // Downsample.
45
+ if (w > 1) w >>= 1;
46
+ if (h > 1) h >>= 1;
47
+
48
+ // Append level size to error message.
49
+ snprintf(buf, bufsz, "mip %-2d ", level);
50
+ msg += buf;
51
+ if (ew) snprintf(buf, bufsz, " err ");
52
+ else snprintf(buf, bufsz, "%5d ", w);
53
+ msg += buf;
54
+ if (eh) snprintf(buf, bufsz, " err\n");
55
+ else snprintf(buf, bufsz, "%5d\n", h);
56
+ msg += buf;
57
+ }
58
+
59
+ NVDR_CHECK(0, msg);
60
+ }
61
+
62
+ int calculateMipInfo(NVDR_CTX_ARGS, TextureKernelParams& p, int* mipOffsets)
63
+ {
64
+ // No levels at all?
65
+ if (p.mipLevelLimit == 0)
66
+ {
67
+ p.mipLevelMax = 0;
68
+ return 0;
69
+ }
70
+
71
+ // Current level size.
72
+ int w = p.texWidth;
73
+ int h = p.texHeight;
74
+
75
+ int mipTotal = 0;
76
+ int level = 0;
77
+ int c = (p.boundaryMode == TEX_BOUNDARY_MODE_CUBE) ? (p.channels * 6) : p.channels;
78
+ mipOffsets[0] = 0;
79
+ while ((w|h) > 1)
80
+ {
81
+ // Current level.
82
+ level += 1;
83
+
84
+ // Quit if cannot downsample.
85
+ if ((w > 1 && (w & 1)) || (h > 1 && (h & 1)))
86
+ raiseMipSizeError(NVDR_CTX_PARAMS, p);
87
+
88
+ // Downsample.
89
+ if (w > 1) w >>= 1;
90
+ if (h > 1) h >>= 1;
91
+
92
+ mipOffsets[level] = mipTotal; // Store the mip offset (#floats).
93
+ mipTotal += w * h * p.texDepth * c;
94
+
95
+ // Hit the level limit?
96
+ if (p.mipLevelLimit >= 0 && level == p.mipLevelLimit)
97
+ break;
98
+ }
99
+
100
+ p.mipLevelMax = level;
101
+ return mipTotal;
102
+ }
103
+
104
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/texture.h ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #pragma once
10
+ #include "framework.h"
11
+
12
+ //------------------------------------------------------------------------
13
+ // Constants.
14
+
15
+ #define TEX_DEBUG_MIP_RETAIN_VARIANCE 0 // For debugging
16
+ #define TEX_FWD_MAX_KERNEL_BLOCK_WIDTH 8
17
+ #define TEX_FWD_MAX_KERNEL_BLOCK_HEIGHT 8
18
+ #define TEX_FWD_MAX_MIP_KERNEL_BLOCK_WIDTH 8
19
+ #define TEX_FWD_MAX_MIP_KERNEL_BLOCK_HEIGHT 8
20
+ #define TEX_GRAD_MAX_KERNEL_BLOCK_WIDTH 8
21
+ #define TEX_GRAD_MAX_KERNEL_BLOCK_HEIGHT 8
22
+ #define TEX_GRAD_MAX_MIP_KERNEL_BLOCK_WIDTH 8
23
+ #define TEX_GRAD_MAX_MIP_KERNEL_BLOCK_HEIGHT 8
24
+ #define TEX_MAX_MIP_LEVEL 16 // Currently a texture cannot be larger than 2 GB because we use 32-bit indices everywhere.
25
+ #define TEX_MODE_NEAREST 0 // Nearest on base level.
26
+ #define TEX_MODE_LINEAR 1 // Bilinear on base level.
27
+ #define TEX_MODE_LINEAR_MIPMAP_NEAREST 2 // Bilinear on nearest mip level.
28
+ #define TEX_MODE_LINEAR_MIPMAP_LINEAR 3 // Trilinear.
29
+ #define TEX_MODE_COUNT 4
30
+ #define TEX_BOUNDARY_MODE_CUBE 0 // Cube map mode.
31
+ #define TEX_BOUNDARY_MODE_WRAP 1 // Wrap (u, v).
32
+ #define TEX_BOUNDARY_MODE_CLAMP 2 // Clamp (u, v).
33
+ #define TEX_BOUNDARY_MODE_ZERO 3 // Pad with zeros.
34
+ #define TEX_BOUNDARY_MODE_COUNT 4
35
+
36
+ //------------------------------------------------------------------------
37
+ // CUDA kernel params.
38
+
39
+ struct TextureKernelParams
40
+ {
41
+ const float* tex[TEX_MAX_MIP_LEVEL]; // Incoming texture buffer with mip levels.
42
+ const float* uv; // Incoming texcoord buffer.
43
+ const float* uvDA; // Incoming uv pixel diffs or NULL.
44
+ const float* mipLevelBias; // Incoming mip level bias or NULL.
45
+ const float* dy; // Incoming output gradient.
46
+ float* out; // Outgoing texture data.
47
+ float* gradTex[TEX_MAX_MIP_LEVEL]; // Outgoing texture gradients with mip levels.
48
+ float* gradUV; // Outgoing texcoord gradient.
49
+ float* gradUVDA; // Outgoing texcoord pixel differential gradient.
50
+ float* gradMipLevelBias; // Outgoing mip level bias gradient.
51
+ int enableMip; // If true, we have uv_da and/or mip_level_bias input(s), and a mip tensor.
52
+ int filterMode; // One of the TEX_MODE_ constants.
53
+ int boundaryMode; // One of the TEX_BOUNDARY_MODE_ contants.
54
+ int texConst; // If true, texture is known to be constant.
55
+ int mipLevelLimit; // Mip level limit coming from the op.
56
+ int channels; // Number of texture channels.
57
+ int imgWidth; // Image width.
58
+ int imgHeight; // Image height.
59
+ int texWidth; // Texture width.
60
+ int texHeight; // Texture height.
61
+ int texDepth; // Texture depth.
62
+ int n; // Minibatch size.
63
+ int mipLevelMax; // Maximum mip level index. Zero if mips disabled.
64
+ int mipLevelOut; // Mip level being calculated in builder kernel.
65
+ };
66
+
67
+ //------------------------------------------------------------------------
68
+ // C++ helper function prototypes.
69
+
70
+ void raiseMipSizeError(NVDR_CTX_ARGS, const TextureKernelParams& p);
71
+ int calculateMipInfo(NVDR_CTX_ARGS, TextureKernelParams& p, int* mipOffsets);
72
+
73
+ //------------------------------------------------------------------------
74
+ // Macros.
75
+
76
+ #define mipLevelSize(p, i) make_int2(((p).texWidth >> (i)) > 1 ? ((p).texWidth >> (i)) : 1, ((p).texHeight >> (i)) > 1 ? ((p).texHeight >> (i)) : 1)
77
+
78
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/common/texture_.cu ADDED
@@ -0,0 +1,1156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "common.h"
10
+ #include "texture.h"
11
+
12
+ //------------------------------------------------------------------------
13
+ // Memory access and math helpers.
14
+
15
+ static __device__ __forceinline__ void accum_from_mem(float* a, int s, float b, float c) { a[0] += b * c; }
16
+ static __device__ __forceinline__ void accum_from_mem(float* a, int s, float2 b, float c) { a[0] += b.x * c; a[s] += b.y * c; }
17
+ static __device__ __forceinline__ void accum_from_mem(float* a, int s, float4 b, float c) { a[0] += b.x * c; a[s] += b.y * c; a[2*s] += b.z * c; a[3*s] += b.w * c; }
18
+ static __device__ __forceinline__ void accum_to_mem(float& a, float* b, int s) { a += b[0]; }
19
+ static __device__ __forceinline__ void accum_to_mem(float2& a, float* b, int s) { float2 v = a; v.x += b[0]; v.y += b[s]; a = v; }
20
+ static __device__ __forceinline__ void accum_to_mem(float4& a, float* b, int s) { float4 v = a; v.x += b[0]; v.y += b[s]; v.z += b[2*s]; v.w += b[3*s]; a = v; }
21
+ static __device__ __forceinline__ bool isfinite_vec3(const float3& a) { return isfinite(a.x) && isfinite(a.y) && isfinite(a.z); }
22
+ static __device__ __forceinline__ bool isfinite_vec4(const float4& a) { return isfinite(a.x) && isfinite(a.y) && isfinite(a.z) && isfinite(a.w); }
23
+ template<class T> static __device__ __forceinline__ T lerp (const T& a, const T& b, float c) { return a + c * (b - a); }
24
+ template<class T> static __device__ __forceinline__ T bilerp(const T& a, const T& b, const T& c, const T& d, const float2& e) { return lerp(lerp(a, b, e.x), lerp(c, d, e.x), e.y); }
25
+
26
+ //------------------------------------------------------------------------
27
+ // Cube map wrapping for smooth filtering across edges and corners. At corners,
28
+ // one of the texture coordinates will be negative. For correct interpolation,
29
+ // the missing texel must take the average color of the other three.
30
+
31
+ static __constant__ uint32_t c_cubeWrapMask1[48] =
32
+ {
33
+ 0x1530a440, 0x1133a550, 0x6103a110, 0x1515aa44, 0x6161aa11, 0x40154a04, 0x44115a05, 0x04611a01,
34
+ 0x2630a440, 0x2233a550, 0x5203a110, 0x2626aa44, 0x5252aa11, 0x40264a04, 0x44225a05, 0x04521a01,
35
+ 0x32608064, 0x3366a055, 0x13062091, 0x32328866, 0x13132299, 0x50320846, 0x55330a55, 0x05130219,
36
+ 0x42508064, 0x4455a055, 0x14052091, 0x42428866, 0x14142299, 0x60420846, 0x66440a55, 0x06140219,
37
+ 0x5230a044, 0x5533a055, 0x1503a011, 0x5252aa44, 0x1515aa11, 0x40520a44, 0x44550a55, 0x04150a11,
38
+ 0x6130a044, 0x6633a055, 0x2603a011, 0x6161aa44, 0x2626aa11, 0x40610a44, 0x44660a55, 0x04260a11,
39
+ };
40
+
41
+ static __constant__ uint8_t c_cubeWrapMask2[48] =
42
+ {
43
+ 0x26, 0x33, 0x11, 0x05, 0x00, 0x09, 0x0c, 0x04, 0x04, 0x00, 0x00, 0x05, 0x00, 0x81, 0xc0, 0x40,
44
+ 0x02, 0x03, 0x09, 0x00, 0x0a, 0x00, 0x00, 0x02, 0x64, 0x30, 0x90, 0x55, 0xa0, 0x99, 0xcc, 0x64,
45
+ 0x24, 0x30, 0x10, 0x05, 0x00, 0x01, 0x00, 0x00, 0x06, 0x03, 0x01, 0x05, 0x00, 0x89, 0xcc, 0x44,
46
+ };
47
+
48
+ static __device__ __forceinline__ int4 wrapCubeMap(int face, int ix0, int ix1, int iy0, int iy1, int w)
49
+ {
50
+ // Calculate case number.
51
+ int cx = (ix0 < 0) ? 0 : (ix1 >= w) ? 2 : 1;
52
+ int cy = (iy0 < 0) ? 0 : (iy1 >= w) ? 6 : 3;
53
+ int c = cx + cy;
54
+ if (c >= 5)
55
+ c--;
56
+ c = (face << 3) + c;
57
+
58
+ // Compute coordinates and faces.
59
+ unsigned int m = c_cubeWrapMask1[c];
60
+ int x0 = (m >> 0) & 3; x0 = (x0 == 0) ? 0 : (x0 == 1) ? ix0 : iy0;
61
+ int x1 = (m >> 2) & 3; x1 = (x1 == 0) ? 0 : (x1 == 1) ? ix1 : iy0;
62
+ int x2 = (m >> 4) & 3; x2 = (x2 == 0) ? 0 : (x2 == 1) ? ix0 : iy1;
63
+ int x3 = (m >> 6) & 3; x3 = (x3 == 0) ? 0 : (x3 == 1) ? ix1 : iy1;
64
+ int y0 = (m >> 8) & 3; y0 = (y0 == 0) ? 0 : (y0 == 1) ? ix0 : iy0;
65
+ int y1 = (m >> 10) & 3; y1 = (y1 == 0) ? 0 : (y1 == 1) ? ix1 : iy0;
66
+ int y2 = (m >> 12) & 3; y2 = (y2 == 0) ? 0 : (y2 == 1) ? ix0 : iy1;
67
+ int y3 = (m >> 14) & 3; y3 = (y3 == 0) ? 0 : (y3 == 1) ? ix1 : iy1;
68
+ int f0 = ((m >> 16) & 15) - 1;
69
+ int f1 = ((m >> 20) & 15) - 1;
70
+ int f2 = ((m >> 24) & 15) - 1;
71
+ int f3 = ((m >> 28) ) - 1;
72
+
73
+ // Flips.
74
+ unsigned int f = c_cubeWrapMask2[c];
75
+ int w1 = w - 1;
76
+ if (f & 0x01) x0 = w1 - x0;
77
+ if (f & 0x02) x1 = w1 - x1;
78
+ if (f & 0x04) x2 = w1 - x2;
79
+ if (f & 0x08) x3 = w1 - x3;
80
+ if (f & 0x10) y0 = w1 - y0;
81
+ if (f & 0x20) y1 = w1 - y1;
82
+ if (f & 0x40) y2 = w1 - y2;
83
+ if (f & 0x80) y3 = w1 - y3;
84
+
85
+ // Done.
86
+ int4 tcOut;
87
+ tcOut.x = x0 + (y0 + f0 * w) * w;
88
+ tcOut.y = x1 + (y1 + f1 * w) * w;
89
+ tcOut.z = x2 + (y2 + f2 * w) * w;
90
+ tcOut.w = x3 + (y3 + f3 * w) * w;
91
+ return tcOut;
92
+ }
93
+
94
+ //------------------------------------------------------------------------
95
+ // Cube map indexing and gradient functions.
96
+
97
+ // Map a 3D lookup vector into an (s,t) face coordinates (returned in first .
98
+ // two parameters) and face index.
99
+ static __device__ __forceinline__ int indexCubeMap(float& x, float& y, float z)
100
+ {
101
+ float ax = fabsf(x);
102
+ float ay = fabsf(y);
103
+ float az = fabsf(z);
104
+ int idx;
105
+ float c;
106
+ if (az > fmaxf(ax, ay)) { idx = 4; c = z; }
107
+ else if (ay > ax) { idx = 2; c = y; y = z; }
108
+ else { idx = 0; c = x; x = z; }
109
+ if (c < 0.f) idx += 1;
110
+ float m = __frcp_rz(fabsf(c)) * .5;
111
+ float m0 = __uint_as_float(__float_as_uint(m) ^ ((0x21u >> idx) << 31));
112
+ float m1 = (idx != 2) ? -m : m;
113
+ x = x * m0 + .5;
114
+ y = y * m1 + .5;
115
+ if (!isfinite(x) || !isfinite(y))
116
+ return -1; // Invalid uv.
117
+ x = fminf(fmaxf(x, 0.f), 1.f);
118
+ y = fminf(fmaxf(y, 0.f), 1.f);
119
+ return idx;
120
+ }
121
+
122
+ // Based on dA/d{s,t}, compute dA/d{x,y,z} at a given 3D lookup vector.
123
+ static __device__ __forceinline__ float3 indexCubeMapGrad(float3 uv, float gu, float gv)
124
+ {
125
+ float ax = fabsf(uv.x);
126
+ float ay = fabsf(uv.y);
127
+ float az = fabsf(uv.z);
128
+ int idx;
129
+ float c;
130
+ float c0 = gu;
131
+ float c1 = gv;
132
+ if (az > fmaxf(ax, ay)) { idx = 0x10; c = uv.z; c0 *= uv.x; c1 *= uv.y; }
133
+ else if (ay > ax) { idx = 0x04; c = uv.y; c0 *= uv.x; c1 *= uv.z; }
134
+ else { idx = 0x01; c = uv.x; c0 *= uv.z; c1 *= uv.y; }
135
+ if (c < 0.f) idx += idx;
136
+ float m = __frcp_rz(fabsf(c));
137
+ c0 = (idx & 0x34) ? -c0 : c0;
138
+ c1 = (idx & 0x2e) ? -c1 : c1;
139
+ float gl = (c0 + c1) * m;
140
+ float gx = (idx & 0x03) ? gl : (idx & 0x20) ? -gu : gu;
141
+ float gy = (idx & 0x0c) ? gl : -gv;
142
+ float gz = (idx & 0x30) ? gl : (idx & 0x03) ? gu : gv;
143
+ gz = (idx & 0x09) ? -gz : gz;
144
+ float3 res = make_float3(gx, gy, gz) * (m * .5f);
145
+ if (!isfinite_vec3(res))
146
+ return make_float3(0.f, 0.f, 0.f); // Invalid uv.
147
+ return res;
148
+ }
149
+
150
+ // Based on dL/d(d{s,t}/s{X,Y}), compute dL/d(d{x,y,z}/d{X,Y}). This is just two
151
+ // indexCubeMapGrad() functions rolled together.
152
+ static __device__ __forceinline__ void indexCubeMapGrad4(float3 uv, float4 dw, float3& g0, float3& g1)
153
+ {
154
+ float ax = fabsf(uv.x);
155
+ float ay = fabsf(uv.y);
156
+ float az = fabsf(uv.z);
157
+ int idx;
158
+ float c, c0, c1;
159
+ if (az > fmaxf(ax, ay)) { idx = 0x10; c = uv.z; c0 = uv.x; c1 = uv.y; }
160
+ else if (ay > ax) { idx = 0x04; c = uv.y; c0 = uv.x; c1 = uv.z; }
161
+ else { idx = 0x01; c = uv.x; c0 = uv.z; c1 = uv.y; }
162
+ if (c < 0.f) idx += idx;
163
+ float m = __frcp_rz(fabsf(c));
164
+ c0 = (idx & 0x34) ? -c0 : c0;
165
+ c1 = (idx & 0x2e) ? -c1 : c1;
166
+ float gl0 = (dw.x * c0 + dw.z * c1) * m;
167
+ float gl1 = (dw.y * c0 + dw.w * c1) * m;
168
+ float gx0 = (idx & 0x03) ? gl0 : (idx & 0x20) ? -dw.x : dw.x;
169
+ float gx1 = (idx & 0x03) ? gl1 : (idx & 0x20) ? -dw.y : dw.y;
170
+ float gy0 = (idx & 0x0c) ? gl0 : -dw.z;
171
+ float gy1 = (idx & 0x0c) ? gl1 : -dw.w;
172
+ float gz0 = (idx & 0x30) ? gl0 : (idx & 0x03) ? dw.x : dw.z;
173
+ float gz1 = (idx & 0x30) ? gl1 : (idx & 0x03) ? dw.y : dw.w;
174
+ if (idx & 0x09)
175
+ {
176
+ gz0 = -gz0;
177
+ gz1 = -gz1;
178
+ }
179
+ g0 = make_float3(gx0, gy0, gz0) * (m * .5f);
180
+ g1 = make_float3(gx1, gy1, gz1) * (m * .5f);
181
+ if (!isfinite_vec3(g0) || !isfinite_vec3(g1))
182
+ {
183
+ g0 = make_float3(0.f, 0.f, 0.f); // Invalid uv.
184
+ g1 = make_float3(0.f, 0.f, 0.f);
185
+ }
186
+ }
187
+
188
+ // Compute d{s,t}/d{X,Y} based on d{x,y,z}/d{X,Y} at a given 3D lookup vector.
189
+ // Result is (ds/dX, ds/dY, dt/dX, dt/dY).
190
+ static __device__ __forceinline__ float4 indexCubeMapGradST(float3 uv, float3 dvdX, float3 dvdY)
191
+ {
192
+ float ax = fabsf(uv.x);
193
+ float ay = fabsf(uv.y);
194
+ float az = fabsf(uv.z);
195
+ int idx;
196
+ float c, gu, gv;
197
+ if (az > fmaxf(ax, ay)) { idx = 0x10; c = uv.z; gu = uv.x; gv = uv.y; }
198
+ else if (ay > ax) { idx = 0x04; c = uv.y; gu = uv.x; gv = uv.z; }
199
+ else { idx = 0x01; c = uv.x; gu = uv.z; gv = uv.y; }
200
+ if (c < 0.f) idx += idx;
201
+ if (idx & 0x09)
202
+ {
203
+ dvdX.z = -dvdX.z;
204
+ dvdY.z = -dvdY.z;
205
+ }
206
+ float m = __frcp_rz(fabsf(c));
207
+ float dm = m * .5f;
208
+ float mm = m * dm;
209
+ gu *= (idx & 0x34) ? -mm : mm;
210
+ gv *= (idx & 0x2e) ? -mm : mm;
211
+
212
+ float4 res;
213
+ if (idx & 0x03)
214
+ {
215
+ res = make_float4(gu * dvdX.x + dm * dvdX.z,
216
+ gu * dvdY.x + dm * dvdY.z,
217
+ gv * dvdX.x - dm * dvdX.y,
218
+ gv * dvdY.x - dm * dvdY.y);
219
+ }
220
+ else if (idx & 0x0c)
221
+ {
222
+ res = make_float4(gu * dvdX.y + dm * dvdX.x,
223
+ gu * dvdY.y + dm * dvdY.x,
224
+ gv * dvdX.y + dm * dvdX.z,
225
+ gv * dvdY.y + dm * dvdY.z);
226
+ }
227
+ else // (idx & 0x30)
228
+ {
229
+ res = make_float4(gu * dvdX.z + copysignf(dm, c) * dvdX.x,
230
+ gu * dvdY.z + copysignf(dm, c) * dvdY.x,
231
+ gv * dvdX.z - dm * dvdX.y,
232
+ gv * dvdY.z - dm * dvdY.y);
233
+ }
234
+
235
+ if (!isfinite_vec4(res))
236
+ return make_float4(0.f, 0.f, 0.f, 0.f);
237
+
238
+ return res;
239
+ }
240
+
241
+ // Compute d(d{s,t}/d{X,Y})/d{x,y,z}, i.e., how the pixel derivatives of 2D face
242
+ // coordinates change w.r.t. 3D texture coordinate vector, returned as follows:
243
+ // | d(ds/dX)/dx d(ds/dY)/dx d(dt/dX)/dx d(dt/dY)/dx |
244
+ // | d(ds/dX)/dy d(ds/dY)/dy d(dt/dX)/dy d(dt/dY)/dy |
245
+ // | d(ds/dX)/dz d(ds/dY)/dz d(dt/dX)/dz d(dt/dY)/dz |
246
+ static __device__ __forceinline__ void indexCubeMapGrad2(float3 uv, float3 dvdX, float3 dvdY, float4& dx, float4& dy, float4& dz)
247
+ {
248
+ float ax = fabsf(uv.x);
249
+ float ay = fabsf(uv.y);
250
+ float az = fabsf(uv.z);
251
+ int idx;
252
+ float c, gu, gv;
253
+ if (az > fmaxf(ax, ay)) { idx = 0x10; c = uv.z; gu = uv.x; gv = uv.y; }
254
+ else if (ay > ax) { idx = 0x04; c = uv.y; gu = uv.x; gv = uv.z; }
255
+ else { idx = 0x01; c = uv.x; gu = uv.z; gv = uv.y; }
256
+ if (c < 0.f) idx += idx;
257
+
258
+ if (idx & 0x09)
259
+ {
260
+ dvdX.z = -dvdX.z;
261
+ dvdY.z = -dvdY.z;
262
+ }
263
+
264
+ float m = __frcp_rz(c);
265
+ float dm = -m * fabsf(m) * .5;
266
+ float mm = m * m * .5;
267
+ float mu = (idx & 0x34) ? -mm : mm;
268
+ float mv = (idx & 0x2e) ? -mm : mm;
269
+ gu *= -2.0 * m * mu;
270
+ gv *= -2.0 * m * mv;
271
+
272
+ if (idx & 0x03)
273
+ {
274
+ dx.x = gu * dvdX.x + dm * dvdX.z;
275
+ dx.y = gu * dvdY.x + dm * dvdY.z;
276
+ dx.z = gv * dvdX.x - dm * dvdX.y;
277
+ dx.w = gv * dvdY.x - dm * dvdY.y;
278
+ dy.x = 0.f;
279
+ dy.y = 0.f;
280
+ dy.z = mv * dvdX.x;
281
+ dy.w = mv * dvdY.x;
282
+ dz.x = mu * dvdX.x;
283
+ dz.y = mu * dvdY.x;
284
+ dz.z = 0.f;
285
+ dz.w = 0.f;
286
+ }
287
+ else if (idx & 0x0c)
288
+ {
289
+ dx.x = mu * dvdX.y;
290
+ dx.y = mu * dvdY.y;
291
+ dx.z = 0.f;
292
+ dx.w = 0.f;
293
+ dy.x = gu * dvdX.y + dm * dvdX.x;
294
+ dy.y = gu * dvdY.y + dm * dvdY.x;
295
+ dy.z = gv * dvdX.y + dm * dvdX.z;
296
+ dy.w = gv * dvdY.y + dm * dvdY.z;
297
+ dz.x = 0.f;
298
+ dz.y = 0.f;
299
+ dz.z = mv * dvdX.y;
300
+ dz.w = mv * dvdY.y;
301
+ }
302
+ else // (idx & 0x30)
303
+ {
304
+ dx.x = mu * dvdX.z;
305
+ dx.y = mu * dvdY.z;
306
+ dx.z = 0.f;
307
+ dx.w = 0.f;
308
+ dy.x = 0.f;
309
+ dy.y = 0.f;
310
+ dy.z = mv * dvdX.z;
311
+ dy.w = mv * dvdY.z;
312
+ dz.x = gu * dvdX.z - fabsf(dm) * dvdX.x;
313
+ dz.y = gu * dvdY.z - fabsf(dm) * dvdY.x;
314
+ dz.z = gv * dvdX.z - dm * dvdX.y;
315
+ dz.w = gv * dvdY.z - dm * dvdY.y;
316
+ }
317
+ }
318
+
319
+ //------------------------------------------------------------------------
320
+ // General texture indexing.
321
+
322
+ template <bool CUBE_MODE>
323
+ static __device__ __forceinline__ int indexTextureNearest(const TextureKernelParams& p, float3 uv, int tz)
324
+ {
325
+ int w = p.texWidth;
326
+ int h = p.texHeight;
327
+ float u = uv.x;
328
+ float v = uv.y;
329
+
330
+ // Cube map indexing.
331
+ if (CUBE_MODE)
332
+ {
333
+ // No wrap. Fold face index into tz right away.
334
+ int idx = indexCubeMap(u, v, uv.z); // Rewrites u, v.
335
+ if (idx < 0)
336
+ return -1; // Invalid uv.
337
+ tz = 6 * tz + idx;
338
+ }
339
+ else
340
+ {
341
+ // Handle boundary.
342
+ if (p.boundaryMode == TEX_BOUNDARY_MODE_WRAP)
343
+ {
344
+ u = u - (float)__float2int_rd(u);
345
+ v = v - (float)__float2int_rd(v);
346
+ }
347
+ }
348
+
349
+ u = u * (float)w;
350
+ v = v * (float)h;
351
+
352
+ int iu = __float2int_rd(u);
353
+ int iv = __float2int_rd(v);
354
+
355
+ // In zero boundary mode, return texture address -1.
356
+ if (!CUBE_MODE && p.boundaryMode == TEX_BOUNDARY_MODE_ZERO)
357
+ {
358
+ if (iu < 0 || iu >= w || iv < 0 || iv >= h)
359
+ return -1;
360
+ }
361
+
362
+ // Otherwise clamp and calculate the coordinate properly.
363
+ iu = min(max(iu, 0), w-1);
364
+ iv = min(max(iv, 0), h-1);
365
+ return iu + w * (iv + tz * h);
366
+ }
367
+
368
+ template <bool CUBE_MODE>
369
+ static __device__ __forceinline__ float2 indexTextureLinear(const TextureKernelParams& p, float3 uv, int tz, int4& tcOut, int level)
370
+ {
371
+ // Mip level size.
372
+ int2 sz = mipLevelSize(p, level);
373
+ int w = sz.x;
374
+ int h = sz.y;
375
+
376
+ // Compute texture-space u, v.
377
+ float u = uv.x;
378
+ float v = uv.y;
379
+ bool clampU = false;
380
+ bool clampV = false;
381
+
382
+ // Cube map indexing.
383
+ int face = 0;
384
+ if (CUBE_MODE)
385
+ {
386
+ // Neither clamp or wrap.
387
+ face = indexCubeMap(u, v, uv.z); // Rewrites u, v.
388
+ if (face < 0)
389
+ {
390
+ tcOut.x = tcOut.y = tcOut.z = tcOut.w = -1; // Invalid uv.
391
+ return make_float2(0.f, 0.f);
392
+ }
393
+ u = u * (float)w - 0.5f;
394
+ v = v * (float)h - 0.5f;
395
+ }
396
+ else
397
+ {
398
+ if (p.boundaryMode == TEX_BOUNDARY_MODE_WRAP)
399
+ {
400
+ // Wrap.
401
+ u = u - (float)__float2int_rd(u);
402
+ v = v - (float)__float2int_rd(v);
403
+ }
404
+
405
+ // Move to texel space.
406
+ u = u * (float)w - 0.5f;
407
+ v = v * (float)h - 0.5f;
408
+
409
+ if (p.boundaryMode == TEX_BOUNDARY_MODE_CLAMP)
410
+ {
411
+ // Clamp to center of edge texels.
412
+ u = fminf(fmaxf(u, 0.f), w - 1.f);
413
+ v = fminf(fmaxf(v, 0.f), h - 1.f);
414
+ clampU = (u == 0.f || u == w - 1.f);
415
+ clampV = (v == 0.f || v == h - 1.f);
416
+ }
417
+ }
418
+
419
+ // Compute texel coordinates and weights.
420
+ int iu0 = __float2int_rd(u);
421
+ int iv0 = __float2int_rd(v);
422
+ int iu1 = iu0 + (clampU ? 0 : 1); // Ensure zero u/v gradients with clamped.
423
+ int iv1 = iv0 + (clampV ? 0 : 1);
424
+ u -= (float)iu0;
425
+ v -= (float)iv0;
426
+
427
+ // Cube map wrapping.
428
+ bool cubeWrap = CUBE_MODE && (iu0 < 0 || iv0 < 0 || iu1 >= w || iv1 >= h);
429
+ if (cubeWrap)
430
+ {
431
+ tcOut = wrapCubeMap(face, iu0, iu1, iv0, iv1, w);
432
+ tcOut += 6 * tz * w * h; // Bring in tz.
433
+ return make_float2(u, v); // Done.
434
+ }
435
+
436
+ // Fold cube map face into tz.
437
+ if (CUBE_MODE)
438
+ tz = 6 * tz + face;
439
+
440
+ // Wrap overflowing texel indices.
441
+ if (!CUBE_MODE && p.boundaryMode == TEX_BOUNDARY_MODE_WRAP)
442
+ {
443
+ if (iu0 < 0) iu0 += w;
444
+ if (iv0 < 0) iv0 += h;
445
+ if (iu1 >= w) iu1 -= w;
446
+ if (iv1 >= h) iv1 -= h;
447
+ }
448
+
449
+ // Coordinates with tz folded in.
450
+ int iu0z = iu0 + tz * w * h;
451
+ int iu1z = iu1 + tz * w * h;
452
+ tcOut.x = iu0z + w * iv0;
453
+ tcOut.y = iu1z + w * iv0;
454
+ tcOut.z = iu0z + w * iv1;
455
+ tcOut.w = iu1z + w * iv1;
456
+
457
+ // Invalidate texture addresses outside unit square if we are in zero mode.
458
+ if (!CUBE_MODE && p.boundaryMode == TEX_BOUNDARY_MODE_ZERO)
459
+ {
460
+ bool iu0_out = (iu0 < 0 || iu0 >= w);
461
+ bool iu1_out = (iu1 < 0 || iu1 >= w);
462
+ bool iv0_out = (iv0 < 0 || iv0 >= h);
463
+ bool iv1_out = (iv1 < 0 || iv1 >= h);
464
+ if (iu0_out || iv0_out) tcOut.x = -1;
465
+ if (iu1_out || iv0_out) tcOut.y = -1;
466
+ if (iu0_out || iv1_out) tcOut.z = -1;
467
+ if (iu1_out || iv1_out) tcOut.w = -1;
468
+ }
469
+
470
+ // All done.
471
+ return make_float2(u, v);
472
+ }
473
+
474
+ //------------------------------------------------------------------------
475
+ // Mip level calculation.
476
+
477
+ template <bool CUBE_MODE, bool BIAS_ONLY, int FILTER_MODE>
478
+ static __device__ __forceinline__ void calculateMipLevel(int& level0, int& level1, float& flevel, const TextureKernelParams& p, int pidx, float3 uv, float4* pdw, float3* pdfdv)
479
+ {
480
+ // Do nothing if mips not in use.
481
+ if (FILTER_MODE == TEX_MODE_NEAREST || FILTER_MODE == TEX_MODE_LINEAR)
482
+ return;
483
+
484
+ // Determine mip level based on UV pixel derivatives. If no derivatives are given (mip level bias only), leave as zero.
485
+ if (!BIAS_ONLY)
486
+ {
487
+ // Get pixel derivatives of texture coordinates.
488
+ float4 uvDA;
489
+ float3 dvdX, dvdY; // Gradients use these later.
490
+ if (CUBE_MODE)
491
+ {
492
+ // Fetch.
493
+ float2 d0 = ((const float2*)p.uvDA)[3 * pidx + 0];
494
+ float2 d1 = ((const float2*)p.uvDA)[3 * pidx + 1];
495
+ float2 d2 = ((const float2*)p.uvDA)[3 * pidx + 2];
496
+
497
+ // Map d{x,y,z}/d{X,Y} into d{s,t}/d{X,Y}.
498
+ dvdX = make_float3(d0.x, d1.x, d2.x); // d{x,y,z}/dX
499
+ dvdY = make_float3(d0.y, d1.y, d2.y); // d{x,y,z}/dY
500
+ uvDA = indexCubeMapGradST(uv, dvdX, dvdY); // d{s,t}/d{X,Y}
501
+ }
502
+ else
503
+ {
504
+ // Fetch.
505
+ uvDA = ((const float4*)p.uvDA)[pidx];
506
+ }
507
+
508
+ // Scaling factors.
509
+ float uscl = p.texWidth;
510
+ float vscl = p.texHeight;
511
+
512
+ // d[s,t]/d[X,Y].
513
+ float dsdx = uvDA.x * uscl;
514
+ float dsdy = uvDA.y * uscl;
515
+ float dtdx = uvDA.z * vscl;
516
+ float dtdy = uvDA.w * vscl;
517
+
518
+ // Calculate footprint axis lengths.
519
+ float A = dsdx*dsdx + dtdx*dtdx;
520
+ float B = dsdy*dsdy + dtdy*dtdy;
521
+ float C = dsdx*dsdy + dtdx*dtdy;
522
+ float l2b = 0.5 * (A + B);
523
+ float l2n = 0.25 * (A-B)*(A-B) + C*C;
524
+ float l2a = sqrt(l2n);
525
+ float lenMinorSqr = fmaxf(0.0, l2b - l2a);
526
+ float lenMajorSqr = l2b + l2a;
527
+
528
+ // Footprint vs. mip level gradient.
529
+ if (pdw && FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_LINEAR)
530
+ {
531
+ float dw = 0.72134752f / (l2n + l2a * l2b); // Constant is 0.5/ln(2).
532
+ float AB = dw * .5f * (A - B);
533
+ float Cw = dw * C;
534
+ float l2aw = dw * l2a;
535
+ float d_f_ddsdX = uscl * (dsdx * (l2aw + AB) + dsdy * Cw);
536
+ float d_f_ddsdY = uscl * (dsdy * (l2aw - AB) + dsdx * Cw);
537
+ float d_f_ddtdX = vscl * (dtdx * (l2aw + AB) + dtdy * Cw);
538
+ float d_f_ddtdY = vscl * (dtdy * (l2aw - AB) + dtdx * Cw);
539
+
540
+ float4 d_f_dw = make_float4(d_f_ddsdX, d_f_ddsdY, d_f_ddtdX, d_f_ddtdY);
541
+ if (!CUBE_MODE)
542
+ *pdw = isfinite_vec4(d_f_dw) ? d_f_dw : make_float4(0.f, 0.f, 0.f, 0.f);
543
+
544
+ // In cube maps, there is also a texture coordinate vs. mip level gradient.
545
+ // Only output nonzero vectors if both are free of inf/Nan garbage.
546
+ if (CUBE_MODE)
547
+ {
548
+ float4 dx, dy, dz;
549
+ indexCubeMapGrad2(uv, dvdX, dvdY, dx, dy, dz);
550
+ float3 d_dsdX_dv = make_float3(dx.x, dy.x, dz.x);
551
+ float3 d_dsdY_dv = make_float3(dx.y, dy.y, dz.y);
552
+ float3 d_dtdX_dv = make_float3(dx.z, dy.z, dz.z);
553
+ float3 d_dtdY_dv = make_float3(dx.w, dy.w, dz.w);
554
+
555
+ float3 d_f_dv = make_float3(0.f, 0.f, 0.f);
556
+ d_f_dv += d_dsdX_dv * d_f_ddsdX;
557
+ d_f_dv += d_dsdY_dv * d_f_ddsdY;
558
+ d_f_dv += d_dtdX_dv * d_f_ddtdX;
559
+ d_f_dv += d_dtdY_dv * d_f_ddtdY;
560
+
561
+ bool finite = isfinite_vec4(d_f_dw) && isfinite_vec3(d_f_dv);
562
+ *pdw = finite ? d_f_dw : make_float4(0.f, 0.f, 0.f, 0.f);
563
+ *pdfdv = finite ? d_f_dv : make_float3(0.f, 0.f, 0.f);
564
+ }
565
+ }
566
+
567
+ // Finally, calculate mip level.
568
+ flevel = .5f * __log2f(lenMajorSqr); // May be inf/NaN, but clamp fixes it.
569
+ }
570
+
571
+ // Bias the mip level and clamp.
572
+ if (p.mipLevelBias)
573
+ flevel += p.mipLevelBias[pidx];
574
+ flevel = fminf(fmaxf(flevel, 0.f), (float)p.mipLevelMax);
575
+
576
+ // Calculate levels depending on filter mode.
577
+ level0 = __float2int_rd(flevel);
578
+
579
+ // Leave everything else at zero if flevel == 0 (magnification) or when in linear-mipmap-nearest mode.
580
+ if (FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_LINEAR && flevel > 0.f)
581
+ {
582
+ level1 = min(level0 + 1, p.mipLevelMax);
583
+ flevel -= level0; // Fractional part. Zero if clamped on last level.
584
+ }
585
+ }
586
+
587
+ //------------------------------------------------------------------------
588
+ // Texel fetch and accumulator helpers that understand cube map corners.
589
+
590
+ template<class T>
591
+ static __device__ __forceinline__ void fetchQuad(T& a00, T& a10, T& a01, T& a11, const float* pIn, int4 tc, bool corner)
592
+ {
593
+ // For invalid cube map uv, tc will be all negative, and all texel values will be zero.
594
+ if (corner)
595
+ {
596
+ T avg = zero_value<T>();
597
+ if (tc.x >= 0) avg += (a00 = *((const T*)&pIn[tc.x]));
598
+ if (tc.y >= 0) avg += (a10 = *((const T*)&pIn[tc.y]));
599
+ if (tc.z >= 0) avg += (a01 = *((const T*)&pIn[tc.z]));
600
+ if (tc.w >= 0) avg += (a11 = *((const T*)&pIn[tc.w]));
601
+ avg *= 0.33333333f;
602
+ if (tc.x < 0) a00 = avg;
603
+ if (tc.y < 0) a10 = avg;
604
+ if (tc.z < 0) a01 = avg;
605
+ if (tc.w < 0) a11 = avg;
606
+ }
607
+ else
608
+ {
609
+ a00 = (tc.x >= 0) ? *((const T*)&pIn[tc.x]) : zero_value<T>();
610
+ a10 = (tc.y >= 0) ? *((const T*)&pIn[tc.y]) : zero_value<T>();
611
+ a01 = (tc.z >= 0) ? *((const T*)&pIn[tc.z]) : zero_value<T>();
612
+ a11 = (tc.w >= 0) ? *((const T*)&pIn[tc.w]) : zero_value<T>();
613
+ }
614
+ }
615
+
616
+ static __device__ __forceinline__ void accumQuad(float4 c, float* pOut, int level, int4 tc, bool corner, CA_TEMP_PARAM)
617
+ {
618
+ // For invalid cube map uv, tc will be all negative, and no accumulation will take place.
619
+ if (corner)
620
+ {
621
+ float cb;
622
+ if (tc.x < 0) cb = c.x;
623
+ if (tc.y < 0) cb = c.y;
624
+ if (tc.z < 0) cb = c.z;
625
+ if (tc.w < 0) cb = c.w;
626
+ cb *= 0.33333333f;
627
+ if (tc.x >= 0) caAtomicAddTexture(pOut, level, tc.x, c.x + cb);
628
+ if (tc.y >= 0) caAtomicAddTexture(pOut, level, tc.y, c.y + cb);
629
+ if (tc.z >= 0) caAtomicAddTexture(pOut, level, tc.z, c.z + cb);
630
+ if (tc.w >= 0) caAtomicAddTexture(pOut, level, tc.w, c.w + cb);
631
+ }
632
+ else
633
+ {
634
+ if (tc.x >= 0) caAtomicAddTexture(pOut, level, tc.x, c.x);
635
+ if (tc.y >= 0) caAtomicAddTexture(pOut, level, tc.y, c.y);
636
+ if (tc.z >= 0) caAtomicAddTexture(pOut, level, tc.z, c.z);
637
+ if (tc.w >= 0) caAtomicAddTexture(pOut, level, tc.w, c.w);
638
+ }
639
+ }
640
+
641
+ //------------------------------------------------------------------------
642
+ // Mip builder kernel.
643
+
644
+ template<class T, int C>
645
+ static __forceinline__ __device__ void MipBuildKernelTemplate(const TextureKernelParams p)
646
+ {
647
+ // Sizes.
648
+ int2 sz_in = mipLevelSize(p, p.mipLevelOut - 1);
649
+ int2 sz_out = mipLevelSize(p, p.mipLevelOut);
650
+
651
+ // Calculate pixel position.
652
+ int px = blockIdx.x * blockDim.x + threadIdx.x;
653
+ int py = blockIdx.y * blockDim.y + threadIdx.y;
654
+ int pz = blockIdx.z;
655
+ if (px >= sz_out.x || py >= sz_out.y)
656
+ return;
657
+
658
+ // Pixel indices.
659
+ int pidx_in0 = p.channels * (((px + sz_in.x * py) << 1) + (pz * sz_in.x * sz_in.y));
660
+ int pidx_in1 = pidx_in0 + p.channels * sz_in.x; // Next pixel down.
661
+ int pidx_out = p.channels * (px + sz_out.x * (py + sz_out.y * pz));
662
+
663
+ // Input and output pointers.
664
+ const float* pin = p.tex[p.mipLevelOut - 1];
665
+ float* pout = (float*)p.tex[p.mipLevelOut];
666
+
667
+ // Special case: Input texture height or width is 1.
668
+ if (sz_in.x == 1 || sz_in.y == 1)
669
+ {
670
+ if (sz_in.y == 1)
671
+ pidx_in1 = pidx_in0 + p.channels; // Next pixel on the right.
672
+
673
+ for (int i=0; i < p.channels; i += C)
674
+ {
675
+ T v0 = *((const T*)&pin[pidx_in0 + i]);
676
+ T v1 = *((const T*)&pin[pidx_in1 + i]);
677
+ T avg = .5f * (v0 + v1);
678
+ #if TEX_DEBUG_MIP_RETAIN_VARIANCE
679
+ avg = (avg - .5f) * 1.41421356f + .5f;
680
+ #endif
681
+ *((T*)&pout[pidx_out + i]) = avg;
682
+ }
683
+
684
+ return;
685
+ }
686
+
687
+ for (int i=0; i < p.channels; i += C)
688
+ {
689
+ T v0 = *((const T*)&pin[pidx_in0 + i]);
690
+ T v1 = *((const T*)&pin[pidx_in0 + i + p.channels]);
691
+ T v2 = *((const T*)&pin[pidx_in1 + i]);
692
+ T v3 = *((const T*)&pin[pidx_in1 + i + p.channels]);
693
+ T avg = .25f * (v0 + v1 + v2 + v3);
694
+ #if TEX_DEBUG_MIP_RETAIN_VARIANCE
695
+ avg = (avg - .5f) * 2.f + .5f;
696
+ #endif
697
+ *((T*)&pout[pidx_out + i]) = avg;
698
+ }
699
+ }
700
+
701
+ // Template specializations.
702
+ __global__ void MipBuildKernel1(const TextureKernelParams p) { MipBuildKernelTemplate<float, 1>(p); }
703
+ __global__ void MipBuildKernel2(const TextureKernelParams p) { MipBuildKernelTemplate<float2, 2>(p); }
704
+ __global__ void MipBuildKernel4(const TextureKernelParams p) { MipBuildKernelTemplate<float4, 4>(p); }
705
+
706
+ //------------------------------------------------------------------------
707
+ // Forward kernel.
708
+
709
+ template <class T, int C, bool CUBE_MODE, bool BIAS_ONLY, int FILTER_MODE>
710
+ static __forceinline__ __device__ void TextureFwdKernelTemplate(const TextureKernelParams p)
711
+ {
712
+ // Calculate pixel position.
713
+ int px = blockIdx.x * blockDim.x + threadIdx.x;
714
+ int py = blockIdx.y * blockDim.y + threadIdx.y;
715
+ int pz = blockIdx.z;
716
+ int tz = (p.texDepth == 1) ? 0 : pz;
717
+ if (px >= p.imgWidth || py >= p.imgHeight || pz >= p.n)
718
+ return;
719
+
720
+ // Pixel index.
721
+ int pidx = px + p.imgWidth * (py + p.imgHeight * pz);
722
+
723
+ // Output ptr.
724
+ float* pOut = p.out + pidx * p.channels;
725
+
726
+ // Get UV.
727
+ float3 uv;
728
+ if (CUBE_MODE)
729
+ uv = ((const float3*)p.uv)[pidx];
730
+ else
731
+ uv = make_float3(((const float2*)p.uv)[pidx], 0.f);
732
+
733
+ // Nearest mode.
734
+ if (FILTER_MODE == TEX_MODE_NEAREST)
735
+ {
736
+ int tc = indexTextureNearest<CUBE_MODE>(p, uv, tz);
737
+ tc *= p.channels;
738
+ const float* pIn = p.tex[0];
739
+
740
+ // Copy if valid tc, otherwise output zero.
741
+ for (int i=0; i < p.channels; i += C)
742
+ *((T*)&pOut[i]) = (tc >= 0) ? *((const T*)&pIn[tc + i]) : zero_value<T>();
743
+
744
+ return; // Exit.
745
+ }
746
+
747
+ // Calculate mip level. In 'linear' mode these will all stay zero.
748
+ float flevel = 0.f; // Fractional level.
749
+ int level0 = 0; // Discrete level 0.
750
+ int level1 = 0; // Discrete level 1.
751
+ calculateMipLevel<CUBE_MODE, BIAS_ONLY, FILTER_MODE>(level0, level1, flevel, p, pidx, uv, 0, 0);
752
+
753
+ // Get texel indices and pointer for level 0.
754
+ int4 tc0 = make_int4(0, 0, 0, 0);
755
+ float2 uv0 = indexTextureLinear<CUBE_MODE>(p, uv, tz, tc0, level0);
756
+ const float* pIn0 = p.tex[level0];
757
+ bool corner0 = CUBE_MODE && ((tc0.x | tc0.y | tc0.z | tc0.w) < 0);
758
+ tc0 *= p.channels;
759
+
760
+ // Bilinear fetch.
761
+ if (FILTER_MODE == TEX_MODE_LINEAR || FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_NEAREST)
762
+ {
763
+ // Interpolate.
764
+ for (int i=0; i < p.channels; i += C, tc0 += C)
765
+ {
766
+ T a00, a10, a01, a11;
767
+ fetchQuad<T>(a00, a10, a01, a11, pIn0, tc0, corner0);
768
+ *((T*)&pOut[i]) = bilerp(a00, a10, a01, a11, uv0);
769
+ }
770
+ return; // Exit.
771
+ }
772
+
773
+ // Get texel indices and pointer for level 1.
774
+ int4 tc1 = make_int4(0, 0, 0, 0);
775
+ float2 uv1 = indexTextureLinear<CUBE_MODE>(p, uv, tz, tc1, level1);
776
+ const float* pIn1 = p.tex[level1];
777
+ bool corner1 = CUBE_MODE && ((tc1.x | tc1.y | tc1.z | tc1.w) < 0);
778
+ tc1 *= p.channels;
779
+
780
+ // Trilinear fetch.
781
+ for (int i=0; i < p.channels; i += C, tc0 += C, tc1 += C)
782
+ {
783
+ // First level.
784
+ T a00, a10, a01, a11;
785
+ fetchQuad<T>(a00, a10, a01, a11, pIn0, tc0, corner0);
786
+ T a = bilerp(a00, a10, a01, a11, uv0);
787
+
788
+ // Second level unless in magnification mode.
789
+ if (flevel > 0.f)
790
+ {
791
+ T b00, b10, b01, b11;
792
+ fetchQuad<T>(b00, b10, b01, b11, pIn1, tc1, corner1);
793
+ T b = bilerp(b00, b10, b01, b11, uv1);
794
+ a = lerp(a, b, flevel); // Interpolate between levels.
795
+ }
796
+
797
+ // Write.
798
+ *((T*)&pOut[i]) = a;
799
+ }
800
+ }
801
+
802
+ // Template specializations.
803
+ __global__ void TextureFwdKernelNearest1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, false, TEX_MODE_NEAREST>(p); }
804
+ __global__ void TextureFwdKernelNearest2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, false, TEX_MODE_NEAREST>(p); }
805
+ __global__ void TextureFwdKernelNearest4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, false, TEX_MODE_NEAREST>(p); }
806
+ __global__ void TextureFwdKernelLinear1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, false, TEX_MODE_LINEAR>(p); }
807
+ __global__ void TextureFwdKernelLinear2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, false, TEX_MODE_LINEAR>(p); }
808
+ __global__ void TextureFwdKernelLinear4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, false, TEX_MODE_LINEAR>(p); }
809
+ __global__ void TextureFwdKernelLinearMipmapNearest1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
810
+ __global__ void TextureFwdKernelLinearMipmapNearest2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
811
+ __global__ void TextureFwdKernelLinearMipmapNearest4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
812
+ __global__ void TextureFwdKernelLinearMipmapLinear1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
813
+ __global__ void TextureFwdKernelLinearMipmapLinear2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
814
+ __global__ void TextureFwdKernelLinearMipmapLinear4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
815
+ __global__ void TextureFwdKernelCubeNearest1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, false, TEX_MODE_NEAREST>(p); }
816
+ __global__ void TextureFwdKernelCubeNearest2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, false, TEX_MODE_NEAREST>(p); }
817
+ __global__ void TextureFwdKernelCubeNearest4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, false, TEX_MODE_NEAREST>(p); }
818
+ __global__ void TextureFwdKernelCubeLinear1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, false, TEX_MODE_LINEAR>(p); }
819
+ __global__ void TextureFwdKernelCubeLinear2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, false, TEX_MODE_LINEAR>(p); }
820
+ __global__ void TextureFwdKernelCubeLinear4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, false, TEX_MODE_LINEAR>(p); }
821
+ __global__ void TextureFwdKernelCubeLinearMipmapNearest1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
822
+ __global__ void TextureFwdKernelCubeLinearMipmapNearest2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
823
+ __global__ void TextureFwdKernelCubeLinearMipmapNearest4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
824
+ __global__ void TextureFwdKernelCubeLinearMipmapLinear1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
825
+ __global__ void TextureFwdKernelCubeLinearMipmapLinear2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
826
+ __global__ void TextureFwdKernelCubeLinearMipmapLinear4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
827
+ __global__ void TextureFwdKernelLinearMipmapNearestBO1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
828
+ __global__ void TextureFwdKernelLinearMipmapNearestBO2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
829
+ __global__ void TextureFwdKernelLinearMipmapNearestBO4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
830
+ __global__ void TextureFwdKernelLinearMipmapLinearBO1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, false, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
831
+ __global__ void TextureFwdKernelLinearMipmapLinearBO2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, false, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
832
+ __global__ void TextureFwdKernelLinearMipmapLinearBO4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, false, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
833
+ __global__ void TextureFwdKernelCubeLinearMipmapNearestBO1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
834
+ __global__ void TextureFwdKernelCubeLinearMipmapNearestBO2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
835
+ __global__ void TextureFwdKernelCubeLinearMipmapNearestBO4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
836
+ __global__ void TextureFwdKernelCubeLinearMipmapLinearBO1 (const TextureKernelParams p) { TextureFwdKernelTemplate<float, 1, true, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
837
+ __global__ void TextureFwdKernelCubeLinearMipmapLinearBO2 (const TextureKernelParams p) { TextureFwdKernelTemplate<float2, 2, true, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
838
+ __global__ void TextureFwdKernelCubeLinearMipmapLinearBO4 (const TextureKernelParams p) { TextureFwdKernelTemplate<float4, 4, true, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
839
+
840
+ //------------------------------------------------------------------------
841
+ // Gradient mip puller kernel.
842
+
843
+ template<class T, int C>
844
+ static __forceinline__ __device__ void MipGradKernelTemplate(const TextureKernelParams p)
845
+ {
846
+ // Calculate pixel position.
847
+ int px = blockIdx.x * blockDim.x + threadIdx.x;
848
+ int py = blockIdx.y * blockDim.y + threadIdx.y;
849
+ int pz = blockIdx.z;
850
+ if (px >= p.texWidth || py >= p.texHeight)
851
+ return;
852
+
853
+ // Number of wide elements.
854
+ int c = p.channels;
855
+ if (C == 2) c >>= 1;
856
+ if (C == 4) c >>= 2;
857
+
858
+ // Dynamically allocated shared memory for holding a texel.
859
+ extern __shared__ float s_texelAccum[];
860
+ int sharedOfs = threadIdx.x + threadIdx.y * blockDim.x;
861
+ int sharedStride = blockDim.x * blockDim.y;
862
+ # define TEXEL_ACCUM(_i) (s_texelAccum + (sharedOfs + (_i) * sharedStride))
863
+
864
+ // Clear the texel.
865
+ for (int i=0; i < p.channels; i++)
866
+ *TEXEL_ACCUM(i) = 0.f;
867
+
868
+ // Track texel position and accumulation weight over the mip stack.
869
+ int x = px;
870
+ int y = py;
871
+ float w = 1.f;
872
+
873
+ // Pull gradients from all levels.
874
+ int2 sz = mipLevelSize(p, 0); // Previous level size.
875
+ for (int level=1; level <= p.mipLevelMax; level++)
876
+ {
877
+ // Weight decay depends on previous level size.
878
+ if (sz.x > 1) w *= .5f;
879
+ if (sz.y > 1) w *= .5f;
880
+
881
+ // Current level size and coordinates.
882
+ sz = mipLevelSize(p, level);
883
+ x >>= 1;
884
+ y >>= 1;
885
+
886
+ T* pIn = (T*)(p.gradTex[level] + (x + sz.x * (y + sz.y * pz)) * p.channels);
887
+ for (int i=0; i < c; i++)
888
+ accum_from_mem(TEXEL_ACCUM(i * C), sharedStride, pIn[i], w);
889
+ }
890
+
891
+ // Add to main texture gradients.
892
+ T* pOut = (T*)(p.gradTex[0] + (px + p.texWidth * (py + p.texHeight * pz)) * p.channels);
893
+ for (int i=0; i < c; i++)
894
+ accum_to_mem(pOut[i], TEXEL_ACCUM(i * C), sharedStride);
895
+ }
896
+
897
+ // Template specializations.
898
+ __global__ void MipGradKernel1(const TextureKernelParams p) { MipGradKernelTemplate<float, 1>(p); }
899
+ __global__ void MipGradKernel2(const TextureKernelParams p) { MipGradKernelTemplate<float2, 2>(p); }
900
+ __global__ void MipGradKernel4(const TextureKernelParams p) { MipGradKernelTemplate<float4, 4>(p); }
901
+
902
+ //------------------------------------------------------------------------
903
+ // Gradient kernel.
904
+
905
+ template <bool CUBE_MODE, bool BIAS_ONLY, int FILTER_MODE>
906
+ static __forceinline__ __device__ void TextureGradKernelTemplate(const TextureKernelParams p)
907
+ {
908
+ // Temporary space for coalesced atomics.
909
+ CA_DECLARE_TEMP(TEX_GRAD_MAX_KERNEL_BLOCK_WIDTH * TEX_GRAD_MAX_KERNEL_BLOCK_HEIGHT);
910
+
911
+ // Calculate pixel position.
912
+ int px = blockIdx.x * blockDim.x + threadIdx.x;
913
+ int py = blockIdx.y * blockDim.y + threadIdx.y;
914
+ int pz = blockIdx.z;
915
+ int tz = (p.texDepth == 1) ? 0 : pz;
916
+ if (px >= p.imgWidth || py >= p.imgHeight || pz >= p.n)
917
+ return;
918
+
919
+ // Pixel index.
920
+ int pidx = px + p.imgWidth * (py + p.imgHeight * pz);
921
+
922
+ // Early exit if output gradients are zero.
923
+ const float* pDy = p.dy + pidx * p.channels;
924
+ unsigned int dmax = 0u;
925
+ if ((p.channels & 3) == 0)
926
+ {
927
+ for (int i=0; i < p.channels; i += 4)
928
+ {
929
+ uint4 dy = *((const uint4*)&pDy[i]);
930
+ dmax |= (dy.x | dy.y | dy.z | dy.w);
931
+ }
932
+ }
933
+ else
934
+ {
935
+ for (int i=0; i < p.channels; i++)
936
+ dmax |= __float_as_uint(pDy[i]);
937
+ }
938
+
939
+ // Store zeros and exit.
940
+ if (__uint_as_float(dmax) == 0.f)
941
+ {
942
+ if (CUBE_MODE)
943
+ {
944
+ if (FILTER_MODE != TEX_MODE_NEAREST)
945
+ ((float3*)p.gradUV)[pidx] = make_float3(0.f, 0.f, 0.f);
946
+ if (FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_LINEAR)
947
+ {
948
+ if (p.gradUVDA)
949
+ {
950
+ ((float2*)p.gradUVDA)[3 * pidx + 0] = make_float2(0.f, 0.f);
951
+ ((float2*)p.gradUVDA)[3 * pidx + 1] = make_float2(0.f, 0.f);
952
+ ((float2*)p.gradUVDA)[3 * pidx + 2] = make_float2(0.f, 0.f);
953
+ }
954
+ if (p.gradMipLevelBias)
955
+ p.gradMipLevelBias[pidx] = 0.f;
956
+ }
957
+ }
958
+ else
959
+ {
960
+ if (FILTER_MODE != TEX_MODE_NEAREST)
961
+ ((float2*)p.gradUV)[pidx] = make_float2(0.f, 0.f);
962
+ if (FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_LINEAR)
963
+ {
964
+ if (p.gradUVDA)
965
+ ((float4*)p.gradUVDA)[pidx] = make_float4(0.f, 0.f, 0.f, 0.f);
966
+ if (p.gradMipLevelBias)
967
+ p.gradMipLevelBias[pidx] = 0.f;
968
+ }
969
+ }
970
+ return;
971
+ }
972
+
973
+ // Get UV.
974
+ float3 uv;
975
+ if (CUBE_MODE)
976
+ uv = ((const float3*)p.uv)[pidx];
977
+ else
978
+ uv = make_float3(((const float2*)p.uv)[pidx], 0.f);
979
+
980
+ // Nearest mode - texture gradients only.
981
+ if (FILTER_MODE == TEX_MODE_NEAREST)
982
+ {
983
+ int tc = indexTextureNearest<CUBE_MODE>(p, uv, tz);
984
+ if (tc < 0)
985
+ return; // Outside texture.
986
+
987
+ tc *= p.channels;
988
+ float* pOut = p.gradTex[0];
989
+
990
+ // Accumulate texture gradients.
991
+ for (int i=0; i < p.channels; i++)
992
+ caAtomicAddTexture(pOut, 0, tc + i, pDy[i]);
993
+
994
+ return; // Exit.
995
+ }
996
+
997
+ // Calculate mip level. In 'linear' mode these will all stay zero.
998
+ float4 dw = make_float4(0.f, 0.f, 0.f, 0.f);
999
+ float3 dfdv = make_float3(0.f, 0.f, 0.f);
1000
+ float flevel = 0.f; // Fractional level.
1001
+ int level0 = 0; // Discrete level 0.
1002
+ int level1 = 0; // Discrete level 1.
1003
+ calculateMipLevel<CUBE_MODE, BIAS_ONLY, FILTER_MODE>(level0, level1, flevel, p, pidx, uv, &dw, &dfdv);
1004
+
1005
+ // UV gradient accumulators.
1006
+ float gu = 0.f;
1007
+ float gv = 0.f;
1008
+
1009
+ // Get texel indices and pointers for level 0.
1010
+ int4 tc0 = make_int4(0, 0, 0, 0);
1011
+ float2 uv0 = indexTextureLinear<CUBE_MODE>(p, uv, tz, tc0, level0);
1012
+ const float* pIn0 = p.tex[level0];
1013
+ float* pOut0 = p.gradTex[level0];
1014
+ bool corner0 = CUBE_MODE && ((tc0.x | tc0.y | tc0.z | tc0.w) < 0);
1015
+ tc0 *= p.channels;
1016
+
1017
+ // Texel weights.
1018
+ float uv011 = uv0.x * uv0.y;
1019
+ float uv010 = uv0.x - uv011;
1020
+ float uv001 = uv0.y - uv011;
1021
+ float uv000 = 1.f - uv0.x - uv001;
1022
+ float4 tw0 = make_float4(uv000, uv010, uv001, uv011);
1023
+
1024
+ // Attribute weights.
1025
+ int2 sz0 = mipLevelSize(p, level0);
1026
+ float sclu0 = (float)sz0.x;
1027
+ float sclv0 = (float)sz0.y;
1028
+
1029
+ // Bilinear mode - texture and uv gradients.
1030
+ if (FILTER_MODE == TEX_MODE_LINEAR || FILTER_MODE == TEX_MODE_LINEAR_MIPMAP_NEAREST)
1031
+ {
1032
+ for (int i=0; i < p.channels; i++, tc0 += 1)
1033
+ {
1034
+ float dy = pDy[i];
1035
+ accumQuad(tw0 * dy, pOut0, level0, tc0, corner0, CA_TEMP);
1036
+
1037
+ float a00, a10, a01, a11;
1038
+ fetchQuad<float>(a00, a10, a01, a11, pIn0, tc0, corner0);
1039
+ float ad = (a11 + a00 - a10 - a01);
1040
+ gu += dy * ((a10 - a00) + uv0.y * ad) * sclu0;
1041
+ gv += dy * ((a01 - a00) + uv0.x * ad) * sclv0;
1042
+ }
1043
+
1044
+ // Store UV gradients and exit.
1045
+ if (CUBE_MODE)
1046
+ ((float3*)p.gradUV)[pidx] = indexCubeMapGrad(uv, gu, gv);
1047
+ else
1048
+ ((float2*)p.gradUV)[pidx] = make_float2(gu, gv);
1049
+
1050
+ return;
1051
+ }
1052
+
1053
+ // Accumulate fractional mip level gradient.
1054
+ float df = 0; // dL/df.
1055
+
1056
+ // Get texel indices and pointers for level 1.
1057
+ int4 tc1 = make_int4(0, 0, 0, 0);
1058
+ float2 uv1 = indexTextureLinear<CUBE_MODE>(p, uv, tz, tc1, level1);
1059
+ const float* pIn1 = p.tex[level1];
1060
+ float* pOut1 = p.gradTex[level1];
1061
+ bool corner1 = CUBE_MODE && ((tc1.x | tc1.y | tc1.z | tc1.w) < 0);
1062
+ tc1 *= p.channels;
1063
+
1064
+ // Texel weights.
1065
+ float uv111 = uv1.x * uv1.y;
1066
+ float uv110 = uv1.x - uv111;
1067
+ float uv101 = uv1.y - uv111;
1068
+ float uv100 = 1.f - uv1.x - uv101;
1069
+ float4 tw1 = make_float4(uv100, uv110, uv101, uv111);
1070
+
1071
+ // Attribute weights.
1072
+ int2 sz1 = mipLevelSize(p, level1);
1073
+ float sclu1 = (float)sz1.x;
1074
+ float sclv1 = (float)sz1.y;
1075
+
1076
+ // Trilinear mode.
1077
+ for (int i=0; i < p.channels; i++, tc0 += 1, tc1 += 1)
1078
+ {
1079
+ float dy = pDy[i];
1080
+ float dy0 = (1.f - flevel) * dy;
1081
+ accumQuad(tw0 * dy0, pOut0, level0, tc0, corner0, CA_TEMP);
1082
+
1083
+ // UV gradients for first level.
1084
+ float a00, a10, a01, a11;
1085
+ fetchQuad<float>(a00, a10, a01, a11, pIn0, tc0, corner0);
1086
+ float ad = (a11 + a00 - a10 - a01);
1087
+ gu += dy0 * ((a10 - a00) + uv0.y * ad) * sclu0;
1088
+ gv += dy0 * ((a01 - a00) + uv0.x * ad) * sclv0;
1089
+
1090
+ // Second level unless in magnification mode.
1091
+ if (flevel > 0.f)
1092
+ {
1093
+ // Texture gradients for second level.
1094
+ float dy1 = flevel * dy;
1095
+ accumQuad(tw1 * dy1, pOut1, level1, tc1, corner1, CA_TEMP);
1096
+
1097
+ // UV gradients for second level.
1098
+ float b00, b10, b01, b11;
1099
+ fetchQuad<float>(b00, b10, b01, b11, pIn1, tc1, corner1);
1100
+ float bd = (b11 + b00 - b10 - b01);
1101
+ gu += dy1 * ((b10 - b00) + uv1.y * bd) * sclu1;
1102
+ gv += dy1 * ((b01 - b00) + uv1.x * bd) * sclv1;
1103
+
1104
+ // Mip level gradient.
1105
+ float a = bilerp(a00, a10, a01, a11, uv0);
1106
+ float b = bilerp(b00, b10, b01, b11, uv1);
1107
+ df += (b-a) * dy;
1108
+ }
1109
+ }
1110
+
1111
+ // Store UV gradients.
1112
+ if (CUBE_MODE)
1113
+ ((float3*)p.gradUV)[pidx] = indexCubeMapGrad(uv, gu, gv) + (dfdv * df);
1114
+ else
1115
+ ((float2*)p.gradUV)[pidx] = make_float2(gu, gv);
1116
+
1117
+ // Store mip level bias gradient.
1118
+ if (p.gradMipLevelBias)
1119
+ p.gradMipLevelBias[pidx] = df;
1120
+
1121
+ // Store UV pixel differential gradients.
1122
+ if (!BIAS_ONLY)
1123
+ {
1124
+ // Final gradients.
1125
+ dw *= df; // dL/(d{s,y}/d{X,Y}) = df/(d{s,y}/d{X,Y}) * dL/df.
1126
+
1127
+ // Store them.
1128
+ if (CUBE_MODE)
1129
+ {
1130
+ // Remap from dL/(d{s,t}/s{X,Y}) to dL/(d{x,y,z}/d{X,Y}).
1131
+ float3 g0, g1;
1132
+ indexCubeMapGrad4(uv, dw, g0, g1);
1133
+ ((float2*)p.gradUVDA)[3 * pidx + 0] = make_float2(g0.x, g1.x);
1134
+ ((float2*)p.gradUVDA)[3 * pidx + 1] = make_float2(g0.y, g1.y);
1135
+ ((float2*)p.gradUVDA)[3 * pidx + 2] = make_float2(g0.z, g1.z);
1136
+ }
1137
+ else
1138
+ ((float4*)p.gradUVDA)[pidx] = dw;
1139
+ }
1140
+ }
1141
+
1142
+ // Template specializations.
1143
+ __global__ void TextureGradKernelNearest (const TextureKernelParams p) { TextureGradKernelTemplate<false, false, TEX_MODE_NEAREST>(p); }
1144
+ __global__ void TextureGradKernelLinear (const TextureKernelParams p) { TextureGradKernelTemplate<false, false, TEX_MODE_LINEAR>(p); }
1145
+ __global__ void TextureGradKernelLinearMipmapNearest (const TextureKernelParams p) { TextureGradKernelTemplate<false, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
1146
+ __global__ void TextureGradKernelLinearMipmapLinear (const TextureKernelParams p) { TextureGradKernelTemplate<false, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
1147
+ __global__ void TextureGradKernelCubeNearest (const TextureKernelParams p) { TextureGradKernelTemplate<true, false, TEX_MODE_NEAREST>(p); }
1148
+ __global__ void TextureGradKernelCubeLinear (const TextureKernelParams p) { TextureGradKernelTemplate<true, false, TEX_MODE_LINEAR>(p); }
1149
+ __global__ void TextureGradKernelCubeLinearMipmapNearest (const TextureKernelParams p) { TextureGradKernelTemplate<true, false, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
1150
+ __global__ void TextureGradKernelCubeLinearMipmapLinear (const TextureKernelParams p) { TextureGradKernelTemplate<true, false, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
1151
+ __global__ void TextureGradKernelLinearMipmapNearestBO (const TextureKernelParams p) { TextureGradKernelTemplate<false, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
1152
+ __global__ void TextureGradKernelLinearMipmapLinearBO (const TextureKernelParams p) { TextureGradKernelTemplate<false, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
1153
+ __global__ void TextureGradKernelCubeLinearMipmapNearestBO (const TextureKernelParams p) { TextureGradKernelTemplate<true, true, TEX_MODE_LINEAR_MIPMAP_NEAREST>(p); }
1154
+ __global__ void TextureGradKernelCubeLinearMipmapLinearBO (const TextureKernelParams p) { TextureGradKernelTemplate<true, true, TEX_MODE_LINEAR_MIPMAP_LINEAR>(p); }
1155
+
1156
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/lib/setgpu.lib ADDED
Binary file (7.25 kB). View file
 
extensions/nvdiffrast/nvdiffrast/tensorflow/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ from .ops import rasterize, interpolate, texture, antialias
10
+ from .plugin_loader import set_cache_dir
11
+
12
+ __all__ = ["rasterize", "interpolate", "texture", "antialias", "set_cache_dir"]
extensions/nvdiffrast/nvdiffrast/tensorflow/ops.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ import tensorflow as tf
10
+ import numpy as np
11
+ import os
12
+ from . import plugin_loader
13
+
14
+ #----------------------------------------------------------------------------
15
+ # Helpers.
16
+ #----------------------------------------------------------------------------
17
+
18
+ # OpenGL-related linker options depending on platform.
19
+ def _get_gl_opts():
20
+ libs = {
21
+ 'posix': ['GL', 'EGL'],
22
+ 'nt': ['gdi32', 'opengl32', 'user32', 'setgpu'],
23
+ }
24
+ return ['-l' + x for x in libs[os.name]]
25
+
26
+ # Load the cpp plugin.
27
+ def _get_plugin():
28
+ fn = os.path.join(os.path.dirname(__file__), 'tf_all.cu')
29
+ return plugin_loader.get_plugin(fn, extra_nvcc_options=_get_gl_opts() + ['-DNVDR_TENSORFLOW'])
30
+
31
+ # Convert parameter to a numpy array if possible.
32
+ def _get_constant(x, dtype):
33
+ try:
34
+ return np.asarray(x, dtype=dtype)
35
+ except (TypeError, ValueError):
36
+ return None
37
+
38
+ # Tests for a construction-time constantness instead of tf.constant node because
39
+ # the latter can be overridden in Session.run() feed_dict at evaluation time.
40
+ def _is_constant(x, dtype):
41
+ if isinstance(x, np.ndarray):
42
+ return np.can_cast(x.dtype, dtype, 'unsafe')
43
+ else:
44
+ return _get_constant(x, dtype) is not None
45
+
46
+ #----------------------------------------------------------------------------
47
+ # Rasterize.
48
+ #----------------------------------------------------------------------------
49
+
50
+ def rasterize(pos, tri, resolution, ranges=None, tri_const=False, output_db=True, grad_db=True):
51
+ assert tri_const is True or tri_const is False
52
+ assert output_db is True or output_db is False
53
+
54
+ # Known constant resolution?
55
+ resolution_c = _get_constant(resolution, np.int32)
56
+
57
+ # Known constant triangles?
58
+ tri_const = tri_const or _is_constant(tri, np.int32)
59
+
60
+ # Convert all inputs to tensors / base types.
61
+ tri_const = 1 if tri_const else 0
62
+ tri = tf.convert_to_tensor(tri, dtype=tf.int32)
63
+ pos = tf.convert_to_tensor(pos, dtype=tf.float32)
64
+ resolution = tf.convert_to_tensor(resolution, dtype=tf.int32)
65
+ if ranges is None:
66
+ ranges = tf.convert_to_tensor(np.zeros(shape=[0, 2], dtype=np.int32)) # Empty tensor.
67
+ else:
68
+ ranges = tf.convert_to_tensor(ranges, dtype=tf.int32) # Convert input to tensor.
69
+
70
+ # Infer as much about the output shape as possible.
71
+ out_shape = [None, None, None, 4]
72
+ if pos.shape.rank == 3: # Instanced mode.
73
+ out_shape[0] = pos.shape[0].value
74
+ elif pos.shape.rank == 2: # Range mode.
75
+ if ranges.shape.rank not in [None, 0]:
76
+ out_shape[0] = ranges.shape[0].value
77
+ if resolution_c is not None:
78
+ assert resolution_c.shape == (2,)
79
+ out_shape[1], out_shape[2] = resolution_c
80
+
81
+ # Output pixel differentials.
82
+ @tf.custom_gradient
83
+ def func_db(pos):
84
+ out, out_db = _get_plugin().rasterize_fwd(pos, tri, resolution, ranges, 1, tri_const)
85
+ out.set_shape(out_shape)
86
+ out_db.set_shape(out_shape)
87
+ def grad(dy, ddb):
88
+ if grad_db:
89
+ return _get_plugin().rasterize_grad_db(pos, tri, out, dy, ddb)
90
+ else:
91
+ return _get_plugin().rasterize_grad(pos, tri, out, dy)
92
+ return (out, out_db), grad
93
+
94
+ # Do not output pixel differentials.
95
+ @tf.custom_gradient
96
+ def func(pos):
97
+ out, out_db = _get_plugin().rasterize_fwd(pos, tri, resolution, ranges, 0, tri_const)
98
+ out.set_shape(out_shape)
99
+ out_db.set_shape(out_shape[:-1] + [0]) # Zero channels in out_db.
100
+ def grad(dy, _):
101
+ return _get_plugin().rasterize_grad(pos, tri, out, dy)
102
+ return (out, out_db), grad
103
+
104
+ # Choose stub.
105
+ if output_db:
106
+ return func_db(pos)
107
+ else:
108
+ return func(pos)
109
+
110
+ #----------------------------------------------------------------------------
111
+ # Interpolate.
112
+ #----------------------------------------------------------------------------
113
+
114
+ def interpolate(attr, rast, tri, rast_db=None, diff_attrs=None):
115
+ # Sanitize the list of pixel differential attributes.
116
+ if diff_attrs is None:
117
+ diff_attrs = []
118
+ elif diff_attrs != 'all':
119
+ diff_attrs = _get_constant(diff_attrs, np.int32)
120
+ assert (diff_attrs is not None) and len(diff_attrs.shape) == 1
121
+ diff_attrs = diff_attrs.tolist()
122
+
123
+ # Convert all inputs to tensors.
124
+ attr = tf.convert_to_tensor(attr, dtype=tf.float32)
125
+ rast = tf.convert_to_tensor(rast, dtype=tf.float32)
126
+ tri = tf.convert_to_tensor(tri, dtype=tf.int32)
127
+ if diff_attrs:
128
+ rast_db = tf.convert_to_tensor(rast_db, dtype=tf.float32)
129
+
130
+ # Infer output shape.
131
+ out_shape = [None, None, None, None]
132
+ if rast.shape.rank is not None:
133
+ out_shape = [rast.shape[0].value, rast.shape[1].value, rast.shape[2].value, None]
134
+ if attr.shape.rank in [2, 3]:
135
+ out_shape[3] = attr.shape[-1].value
136
+
137
+ # Output pixel differentials for at least some attributes.
138
+ @tf.custom_gradient
139
+ def func_da(attr, rast, rast_db):
140
+ diff_attrs_all = int(diff_attrs == 'all')
141
+ diff_attrs_list = [] if diff_attrs_all else diff_attrs
142
+ out, out_da = _get_plugin().interpolate_fwd_da(attr, rast, tri, rast_db, diff_attrs_all, diff_attrs_list)
143
+
144
+ # Infer number of channels in out_da.
145
+ if not diff_attrs_all:
146
+ da_channels = 2 * len(diff_attrs)
147
+ if (attr.shape.rank in [2, 3]) and (attr.shape[-1].value is not None):
148
+ da_channels = 2 * attr.shape[-1].value
149
+ else:
150
+ da_channels = None
151
+
152
+ # Set output shapes.
153
+ out.set_shape(out_shape)
154
+ out_da.set_shape([out_shape[0], out_shape[1], out_shape[2], da_channels])
155
+
156
+ def grad(dy, dda):
157
+ return _get_plugin().interpolate_grad_da(attr, rast, tri, dy, rast_db, dda, diff_attrs_all, diff_attrs_list)
158
+ return (out, out_da), grad
159
+
160
+ # No pixel differentials for any attribute.
161
+ @tf.custom_gradient
162
+ def func(attr, rast):
163
+ out, out_da = _get_plugin().interpolate_fwd(attr, rast, tri)
164
+ out.set_shape(out_shape)
165
+ out_da.set_shape(out_shape[:-1] + [0]) # Zero channels in out_da.
166
+ def grad(dy, _):
167
+ return _get_plugin().interpolate_grad(attr, rast, tri, dy)
168
+ return (out, out_da), grad
169
+
170
+ # Choose stub.
171
+ if diff_attrs:
172
+ return func_da(attr, rast, rast_db)
173
+ else:
174
+ return func(attr, rast)
175
+
176
+ #----------------------------------------------------------------------------
177
+ # Texture.
178
+ #----------------------------------------------------------------------------
179
+
180
+ def texture(tex, uv, uv_da=None, filter_mode='auto', boundary_mode='wrap', tex_const=False, max_mip_level=None):
181
+ assert tex_const is True or tex_const is False
182
+
183
+ # Default filter mode.
184
+ if filter_mode == 'auto':
185
+ filter_mode = 'linear-mipmap-linear' if (uv_da is not None) else 'linear'
186
+
187
+ # Known constant texture?
188
+ tex_const = tex_const or _is_constant(tex, np.float32)
189
+
190
+ # Sanitize inputs.
191
+ tex_const = 1 if tex_const else 0
192
+ if max_mip_level is None:
193
+ max_mip_level = -1
194
+ else:
195
+ max_mip_level = int(max_mip_level)
196
+ assert max_mip_level >= 0
197
+
198
+ # Convert inputs to tensors.
199
+ tex = tf.convert_to_tensor(tex, dtype=tf.float32)
200
+ uv = tf.convert_to_tensor(uv, dtype=tf.float32)
201
+ if 'mipmap' in filter_mode:
202
+ uv_da = tf.convert_to_tensor(uv_da, dtype=tf.float32)
203
+
204
+ # Infer output shape.
205
+ out_shape = [None, None, None, None]
206
+ if uv.shape.rank is not None:
207
+ assert uv.shape.rank == 4
208
+ out_shape = [uv.shape[0].value, uv.shape[1].value, uv.shape[2].value, None]
209
+ if tex.shape.rank is not None:
210
+ assert tex.shape.rank == (5 if boundary_mode == 'cube' else 4)
211
+ out_shape[-1] = tex.shape[-1].value
212
+
213
+ # If mipping disabled via max level=0, we may as well use simpler filtering internally.
214
+ if max_mip_level == 0 and filter_mode in ['linear-mipmap-nearest', 'linear-mipmap-linear']:
215
+ filter_mode = 'linear'
216
+
217
+ # Convert filter mode to internal enumeration.
218
+ filter_mode_dict = {'nearest': 0, 'linear': 1, 'linear-mipmap-nearest': 2, 'linear-mipmap-linear': 3}
219
+ filter_mode_enum = filter_mode_dict[filter_mode]
220
+
221
+ # Convert boundary mode to internal enumeration.
222
+ boundary_mode_dict = {'cube': 0, 'wrap': 1, 'clamp': 2, 'zero': 3}
223
+ boundary_mode_enum = boundary_mode_dict[boundary_mode]
224
+
225
+ # Linear-mipmap-linear: Mipmaps enabled, all gradients active.
226
+ @tf.custom_gradient
227
+ def func_linear_mipmap_linear(tex, uv, uv_da):
228
+ out, mip = _get_plugin().texture_fwd_mip(tex, uv, uv_da, filter_mode_enum, boundary_mode_enum, tex_const, max_mip_level)
229
+ out.set_shape(out_shape)
230
+ def grad(dy):
231
+ return _get_plugin().texture_grad_linear_mipmap_linear(tex, uv, dy, uv_da, mip, filter_mode_enum, boundary_mode_enum, max_mip_level)
232
+ return out, grad
233
+
234
+ # Linear-mipmap-nearest: Mipmaps enabled, no gradients to uv_da.
235
+ @tf.custom_gradient
236
+ def func_linear_mipmap_nearest(tex, uv):
237
+ out, mip = _get_plugin().texture_fwd_mip(tex, uv, uv_da, filter_mode_enum, boundary_mode_enum, tex_const, max_mip_level)
238
+ out.set_shape(out_shape)
239
+ def grad(dy):
240
+ return _get_plugin().texture_grad_linear_mipmap_nearest(tex, uv, dy, uv_da, mip, filter_mode_enum, boundary_mode_enum, max_mip_level)
241
+ return out, grad
242
+
243
+ # Linear: Mipmaps disabled, no uv_da, no gradients to uv_da.
244
+ @tf.custom_gradient
245
+ def func_linear(tex, uv):
246
+ out = _get_plugin().texture_fwd(tex, uv, filter_mode_enum, boundary_mode_enum)
247
+ out.set_shape(out_shape)
248
+ def grad(dy):
249
+ return _get_plugin().texture_grad_linear(tex, uv, dy, filter_mode_enum, boundary_mode_enum)
250
+ return out, grad
251
+
252
+ # Nearest: Mipmaps disabled, no uv_da, no gradients to uv_da or uv.
253
+ @tf.custom_gradient
254
+ def func_nearest(tex):
255
+ out = _get_plugin().texture_fwd(tex, uv, filter_mode_enum, boundary_mode_enum)
256
+ out.set_shape(out_shape)
257
+ def grad(dy):
258
+ return _get_plugin().texture_grad_nearest(tex, uv, dy, filter_mode_enum, boundary_mode_enum)
259
+ return out, grad
260
+
261
+ # Choose stub.
262
+ if filter_mode == 'linear-mipmap-linear':
263
+ return func_linear_mipmap_linear(tex, uv, uv_da)
264
+ elif filter_mode == 'linear-mipmap-nearest':
265
+ return func_linear_mipmap_nearest(tex, uv)
266
+ elif filter_mode == 'linear':
267
+ return func_linear(tex, uv)
268
+ elif filter_mode == 'nearest':
269
+ return func_nearest(tex)
270
+
271
+ #----------------------------------------------------------------------------
272
+ # Antialias.
273
+ #----------------------------------------------------------------------------
274
+
275
+ def antialias(color, rast, pos, tri, tri_const=False, pos_gradient_boost=1.0):
276
+ assert tri_const is True or tri_const is False
277
+
278
+ # Known constant triangles?
279
+ tri_const = tri_const or _is_constant(tri, np.int32)
280
+
281
+ # Convert inputs to tensors.
282
+ color = tf.convert_to_tensor(color, dtype=tf.float32)
283
+ rast = tf.convert_to_tensor(rast, dtype=tf.float32)
284
+ pos = tf.convert_to_tensor(pos, dtype=tf.float32)
285
+ tri = tf.convert_to_tensor(tri, dtype=tf.int32)
286
+
287
+ # Sanitize inputs.
288
+ tri_const = 1 if tri_const else 0
289
+
290
+ @tf.custom_gradient
291
+ def func(color, pos):
292
+ color_out, work_buffer = _get_plugin().antialias_fwd(color, rast, pos, tri, tri_const)
293
+ color_out.set_shape(color.shape)
294
+ def grad(dy):
295
+ grad_color, grad_pos = _get_plugin().antialias_grad(color, rast, pos, tri, dy, work_buffer)
296
+ if pos_gradient_boost != 1.0:
297
+ grad_pos = grad_pos * pos_gradient_boost
298
+ return grad_color, grad_pos
299
+ return color_out, grad
300
+
301
+ return func(color, pos)
302
+
303
+ #----------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/tensorflow/plugin_loader.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ import glob
10
+ import os
11
+ import re
12
+ import uuid
13
+ import hashlib
14
+ import tempfile
15
+ import shutil
16
+ import tensorflow as tf
17
+ from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
18
+
19
+ #----------------------------------------------------------------------------
20
+ # Global options.
21
+
22
+ _nvdiffrast_cache_dir = None
23
+
24
+ def set_cache_dir(path: str) -> None:
25
+ '''Set CUDA kernel compilation temp dir.
26
+
27
+ If `set_cache_dir` is not called, the cache directory will default to
28
+ one of the below:
29
+
30
+ - Value of NVDIFFRAST_CACHE_DIR env var, if set
31
+ - $HOME/.cache/nvdiffrast if HOME env var is set
32
+ - $USERPROFILE/.cache/nvdiffrast if USERPROFILE is set.
33
+
34
+ Args:
35
+ path: Where to save CUDA kernel build temporaries
36
+ '''
37
+ global _nvdiffrast_cache_dir
38
+ _nvdiffrast_cache_dir = path
39
+
40
+ def make_cache_dir_path(*paths: str) -> str:
41
+ if _nvdiffrast_cache_dir is not None:
42
+ return os.path.join(_nvdiffrast_cache_dir, *paths)
43
+ if 'NVDIFFRAST_CACHE_DIR' in os.environ:
44
+ return os.path.join(os.environ['NVDIFFRAST_CACHE_DIR'], *paths)
45
+ if 'HOME' in os.environ:
46
+ return os.path.join(os.environ['HOME'], '.cache', 'nvdiffrast', *paths)
47
+ if 'USERPROFILE' in os.environ:
48
+ return os.path.join(os.environ['USERPROFILE'], '.cache', 'nvdiffrast', *paths)
49
+ return os.path.join(tempfile.gettempdir(), '.cache', 'nvdiffrast', *paths)
50
+
51
+ cuda_cache_version_tag = 'v1'
52
+ do_not_hash_included_headers = False # Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe!
53
+ verbose = True # Print status messages to stdout.
54
+
55
+ #----------------------------------------------------------------------------
56
+ # Internal helper funcs.
57
+
58
+ def _find_compiler_bindir():
59
+ hostx64_paths = sorted(glob.glob('C:/Program Files/Microsoft Visual Studio/*/Enterprise/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True)
60
+ if hostx64_paths != []:
61
+ return hostx64_paths[0]
62
+ hostx64_paths = sorted(glob.glob('C:/Program Files (x86)/Microsoft Visual Studio/*/Enterprise/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True)
63
+ if hostx64_paths != []:
64
+ return hostx64_paths[0]
65
+ hostx64_paths = sorted(glob.glob('C:/Program Files/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True)
66
+ if hostx64_paths != []:
67
+ return hostx64_paths[0]
68
+ hostx64_paths = sorted(glob.glob('C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True)
69
+ if hostx64_paths != []:
70
+ return hostx64_paths[0]
71
+ hostx64_paths = sorted(glob.glob('C:/Program Files/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True)
72
+ if hostx64_paths != []:
73
+ return hostx64_paths[0]
74
+ hostx64_paths = sorted(glob.glob('C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True)
75
+ if hostx64_paths != []:
76
+ return hostx64_paths[0]
77
+ hostx64_paths = sorted(glob.glob('C:/Program Files/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True)
78
+ if hostx64_paths != []:
79
+ return hostx64_paths[0]
80
+ hostx64_paths = sorted(glob.glob('C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64'), reverse=True)
81
+ if hostx64_paths != []:
82
+ return hostx64_paths[0]
83
+ vc_bin_dir = 'C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin'
84
+ if os.path.isdir(vc_bin_dir):
85
+ return vc_bin_dir
86
+ return None
87
+
88
+ def _get_compute_cap(device):
89
+ caps_str = device.physical_device_desc
90
+ m = re.search('compute capability: (\\d+).(\\d+)', caps_str)
91
+ major = m.group(1)
92
+ minor = m.group(2)
93
+ return (major, minor)
94
+
95
+ def _get_cuda_gpu_arch_string():
96
+ gpus = [x for x in device_lib.list_local_devices() if x.device_type == 'GPU']
97
+ if len(gpus) == 0:
98
+ raise RuntimeError('No GPU devices found')
99
+ (major, minor) = _get_compute_cap(gpus[0])
100
+ return 'sm_%s%s' % (major, minor)
101
+
102
+ def _run_cmd(cmd):
103
+ with os.popen(cmd) as pipe:
104
+ output = pipe.read()
105
+ status = pipe.close()
106
+ if status is not None:
107
+ raise RuntimeError('NVCC returned an error. See below for full command line and output log:\n\n%s\n\n%s' % (cmd, output))
108
+
109
+ def _prepare_nvcc_cli(opts):
110
+ cmd = 'nvcc ' + opts.strip()
111
+ cmd += ' --disable-warnings'
112
+ cmd += ' --include-path "%s"' % tf.sysconfig.get_include()
113
+ cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src')
114
+ cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'com_google_absl')
115
+ cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'eigen_archive')
116
+
117
+ compiler_bindir = _find_compiler_bindir()
118
+ if compiler_bindir is None:
119
+ # Require that _find_compiler_bindir succeeds on Windows. Allow
120
+ # nvcc to use whatever is the default on Linux.
121
+ if os.name == 'nt':
122
+ raise RuntimeError('Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in "%s".' % __file__)
123
+ else:
124
+ cmd += ' --compiler-bindir "%s"' % compiler_bindir
125
+ cmd += ' 2>&1'
126
+ return cmd
127
+
128
+ #----------------------------------------------------------------------------
129
+ # Main entry point.
130
+
131
+ _plugin_cache = dict()
132
+
133
+ def get_plugin(cuda_file, extra_nvcc_options=[]):
134
+ cuda_file_base = os.path.basename(cuda_file)
135
+ cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base)
136
+
137
+ # Already in cache?
138
+ if cuda_file in _plugin_cache:
139
+ return _plugin_cache[cuda_file]
140
+
141
+ # Setup plugin.
142
+ if verbose:
143
+ print('Setting up TensorFlow plugin "%s": ' % cuda_file_base, end='', flush=True)
144
+ try:
145
+ # Hash CUDA source.
146
+ md5 = hashlib.md5()
147
+ with open(cuda_file, 'rb') as f:
148
+ md5.update(f.read())
149
+ md5.update(b'\n')
150
+
151
+ # Hash headers included by the CUDA code by running it through the preprocessor.
152
+ if not do_not_hash_included_headers:
153
+ if verbose:
154
+ print('Preprocessing... ', end='', flush=True)
155
+ with tempfile.TemporaryDirectory() as tmp_dir:
156
+ tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext)
157
+ _run_cmd(_prepare_nvcc_cli('"%s" --preprocess -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir)))
158
+ with open(tmp_file, 'rb') as f:
159
+ bad_file_str = ('"' + cuda_file.replace('\\', '/') + '"').encode('utf-8') # __FILE__ in error check macros
160
+ good_file_str = ('"' + cuda_file_base + '"').encode('utf-8')
161
+ for ln in f:
162
+ if not ln.startswith(b'# ') and not ln.startswith(b'#line '): # ignore line number pragmas
163
+ ln = ln.replace(bad_file_str, good_file_str)
164
+ md5.update(ln)
165
+ md5.update(b'\n')
166
+
167
+ # Select compiler options.
168
+ compile_opts = ''
169
+ if os.name == 'nt':
170
+ compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib')
171
+ compile_opts += ' --library-path="%s"' % (os.path.dirname(__file__) + r"\..\lib") # Find libraries during compilation.
172
+ elif os.name == 'posix':
173
+ compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so')
174
+ compile_opts += ' --compiler-options \'-fPIC -D_GLIBCXX_USE_CXX11_ABI=0\''
175
+ else:
176
+ assert False # not Windows or Linux, w00t?
177
+ compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string()
178
+ compile_opts += ' --use_fast_math'
179
+ for opt in extra_nvcc_options:
180
+ compile_opts += ' ' + opt
181
+ nvcc_cmd = _prepare_nvcc_cli(compile_opts)
182
+
183
+ # Hash build configuration.
184
+ md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n')
185
+ md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n')
186
+ md5.update(('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\n')
187
+
188
+ # Compile if not already compiled.
189
+ bin_file_ext = '.dll' if os.name == 'nt' else '.so'
190
+ cuda_cache_path = make_cache_dir_path()
191
+ bin_file = os.path.join(make_cache_dir_path(), cuda_file_name + '_' + md5.hexdigest() + bin_file_ext)
192
+ if not os.path.isfile(bin_file):
193
+ if verbose:
194
+ print('Compiling... ', end='', flush=True)
195
+ with tempfile.TemporaryDirectory() as tmp_dir:
196
+ tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + bin_file_ext)
197
+ _run_cmd(nvcc_cmd + ' "%s" --shared -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))
198
+ os.makedirs(cuda_cache_path, exist_ok=True)
199
+ intermediate_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext)
200
+ shutil.copyfile(tmp_file, intermediate_file)
201
+ os.rename(intermediate_file, bin_file) # atomic
202
+
203
+ # Load.
204
+ if verbose:
205
+ print('Loading... ', end='', flush=True)
206
+ plugin = tf.load_op_library(bin_file)
207
+
208
+ # Add to cache.
209
+ _plugin_cache[cuda_file] = plugin
210
+ if verbose:
211
+ print('Done.', flush=True)
212
+ return plugin
213
+
214
+ except:
215
+ if verbose:
216
+ print('Failed!', flush=True)
217
+ raise
218
+
219
+ #----------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/tensorflow/tf_all.cu ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ // TF-specific helpers.
10
+
11
+ #define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal("Cuda error: ", cudaGetErrorName(err), "[", #CUDA_CALL, ";]")); } while (0)
12
+ #define OP_CHECK_GL_ERROR(CTX, GL_CALL) do { GL_CALL; GLenum err = glGetError(); OP_REQUIRES(CTX, err == GL_NO_ERROR, errors::Internal("OpenGL error: ", getGLErrorString(err), "[", #GL_CALL, ";]")); } while (0)
13
+
14
+ // Cuda kernels and CPP all together. What an absolute compilation unit.
15
+
16
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
17
+ #include "../common/framework.h"
18
+ #include "../common/glutil.cpp"
19
+
20
+ #include "../common/common.h"
21
+ #include "../common/common.cpp"
22
+
23
+ #include "../common/rasterize.h"
24
+ #include "../common/rasterize_gl.cpp"
25
+ #include "../common/rasterize.cu"
26
+ #include "tf_rasterize.cu"
27
+
28
+ #include "../common/interpolate.cu"
29
+ #include "tf_interpolate.cu"
30
+
31
+ #include "../common/texture.cpp"
32
+ #include "../common/texture.cu"
33
+ #include "tf_texture.cu"
34
+
35
+ #include "../common/antialias.cu"
36
+ #include "tf_antialias.cu"
extensions/nvdiffrast/nvdiffrast/tensorflow/tf_antialias.cu ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ //------------------------------------------------------------------------
10
+ // Forward TensorFlow op.
11
+
12
+ struct AntialiasFwdOp : public OpKernel
13
+ {
14
+ AntialiasKernelParams m_attribs;
15
+
16
+ AntialiasFwdOp(OpKernelConstruction* ctx): OpKernel(ctx)
17
+ {
18
+ memset(&m_attribs, 0, sizeof(m_attribs));
19
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("tri_const", &m_attribs.tri_const));
20
+ }
21
+
22
+ void Compute(OpKernelContext* ctx)
23
+ {
24
+ AntialiasKernelParams& p = m_attribs;
25
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
26
+
27
+ // Get input.
28
+ const Tensor& color = ctx->input(0);
29
+ const Tensor& rasterOut = ctx->input(1);
30
+ const Tensor& pos = ctx->input(2);
31
+ const Tensor& tri = ctx->input(3);
32
+
33
+ // Instance rendering mode?
34
+ p.instance_mode = pos.dims() > 2;
35
+
36
+ // Extract input dimensions.
37
+ if (p.instance_mode)
38
+ p.numVertices = (pos.dims() > 1) ? pos.dim_size(1) : 0;
39
+ else
40
+ p.numVertices = (pos.dims() > 0) ? pos.dim_size(0) : 0;
41
+ p.numTriangles = (tri.dims() > 0) ? tri.dim_size(0) : 0;
42
+ p.n = (color.dims() > 0) ? color.dim_size(0) : 0;
43
+ p.height = (color.dims() > 1) ? color.dim_size(1) : 0;
44
+ p.width = (color.dims() > 2) ? color.dim_size(2) : 0;
45
+ p.channels = (color.dims() > 3) ? color.dim_size(3) : 0;
46
+
47
+ // Sanity checks.
48
+ OP_REQUIRES(ctx, color.dims() == 4 && color.dim_size(0) > 0 && color.dim_size(1) > 0 && color.dim_size(2) > 0 && color.dim_size(3) > 0, errors::InvalidArgument("color must have shape[>0, >0, >0, >0]"));
49
+ OP_REQUIRES(ctx, rasterOut.dims() == 4 && rasterOut.dim_size(0) > 0 && rasterOut.dim_size(1) > 0 && rasterOut.dim_size(2) > 0 && rasterOut.dim_size(3) == 4, errors::InvalidArgument("raster_out must have shape[>0, >0, >0, 4]"));
50
+ OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
51
+ OP_REQUIRES(ctx, color.dim_size(1) == rasterOut.dim_size(1) && color.dim_size(2) == rasterOut.dim_size(2), errors::InvalidArgument("color and raster_out inputs must have same spatial dimensions"));
52
+ if (p.instance_mode)
53
+ {
54
+ OP_REQUIRES(ctx, pos.dims() == 3 && pos.dim_size(0) > 0 && pos.dim_size(1) > 0 && pos.dim_size(2) == 4, errors::InvalidArgument("pos must have shape [>0, >0, 4] or [>0, 4]"));
55
+ OP_REQUIRES(ctx, rasterOut.dim_size(0) == p.n && pos.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs color, raster_out, pos"));
56
+ }
57
+ else
58
+ {
59
+ OP_REQUIRES(ctx, pos.dims() == 2 && pos.dim_size(0) > 0 && pos.dim_size(1) == 4, errors::InvalidArgument("pos must have shape [>0, >0, 4] or [>0, 4]"));
60
+ OP_REQUIRES(ctx, rasterOut.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs color, raster_out"));
61
+ }
62
+
63
+ // Get input pointers.
64
+ p.color = color.flat<float>().data();
65
+ p.rasterOut = rasterOut.flat<float>().data();
66
+ p.tri = tri.flat<int>().data();
67
+ p.pos = pos.flat<float>().data();
68
+
69
+ // Misc parameters.
70
+ p.xh = .5f * (float)p.width;
71
+ p.yh = .5f * (float)p.height;
72
+
73
+ // Allocate output tensor.
74
+ Tensor* outputTensor = NULL;
75
+ TensorShape outputShape;
76
+ outputShape.AddDim(p.n);
77
+ outputShape.AddDim(p.height);
78
+ outputShape.AddDim(p.width);
79
+ outputShape.AddDim(p.channels);
80
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, outputShape, &outputTensor));
81
+ p.output = outputTensor->flat<float>().data();
82
+
83
+ // Allocate work buffer. One extra int4 for storing counters.
84
+ Tensor* workTensor = NULL;
85
+ TensorShape workShape;
86
+ workShape.AddDim(p.n * p.width * p.height * 8 + 4); // 8 int for a maximum of two work items per pixel.
87
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(1, workShape, &workTensor));
88
+ p.workBuffer = (int4*)(workTensor->flat<int>().data());
89
+
90
+ // Clear the work counters.
91
+ OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(p.workBuffer, 0, sizeof(int4), stream));
92
+
93
+ // Verify that buffers are aligned to allow float2/float4 operations.
94
+ OP_REQUIRES(ctx, !((uintptr_t)p.pos & 15), errors::Internal("pos input tensor not aligned to float4"));
95
+ OP_REQUIRES(ctx, !((uintptr_t)p.rasterOut & 7), errors::Internal("raster_out input tensor not aligned to float2"));
96
+ OP_REQUIRES(ctx, !((uintptr_t)p.workBuffer & 15), errors::Internal("work_buffer internal tensor not aligned to int4"));
97
+
98
+ // Kernel parameters.
99
+ void* args[] = {&p};
100
+
101
+ // (Re-)calculate opposite vertex hash.
102
+ if (!p.evHash || !p.tri_const)
103
+ {
104
+ if (p.allocTriangles < p.numTriangles)
105
+ {
106
+ p.allocTriangles = max(p.allocTriangles, 64);
107
+ while (p.allocTriangles < p.numTriangles)
108
+ p.allocTriangles <<= 1; // Must be power of two.
109
+
110
+ // (Re-)allocate memory for the hash.
111
+ OP_CHECK_CUDA_ERROR(ctx, cudaFree(p.evHash));
112
+ OP_CHECK_CUDA_ERROR(ctx, cudaMalloc(&p.evHash, p.allocTriangles * AA_HASH_ELEMENTS_PER_TRIANGLE(p.allocTriangles) * sizeof(uint4)));
113
+ LOG(INFO) << "Increasing topology hash size to accommodate " << p.allocTriangles << " triangles";
114
+ }
115
+
116
+ // Clear the hash and launch the mesh kernel to populate it.
117
+ OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(p.evHash, 0, p.allocTriangles * AA_HASH_ELEMENTS_PER_TRIANGLE(p.allocTriangles) * sizeof(uint4), stream));
118
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)AntialiasFwdMeshKernel, (p.numTriangles - 1) / AA_MESH_KERNEL_THREADS_PER_BLOCK + 1, AA_MESH_KERNEL_THREADS_PER_BLOCK, args, 0, stream));
119
+ }
120
+
121
+ // Copy input to output as a baseline.
122
+ OP_CHECK_CUDA_ERROR(ctx, cudaMemcpyAsync(p.output, p.color, p.n * p.height * p.width * p.channels * sizeof(float), cudaMemcpyDeviceToDevice, stream));
123
+
124
+ // Choose launch parameters for the discontinuity finder kernel and launch.
125
+ dim3 blockSize(AA_DISCONTINUITY_KERNEL_BLOCK_WIDTH, AA_DISCONTINUITY_KERNEL_BLOCK_HEIGHT, 1);
126
+ dim3 gridSize = getLaunchGridSize(blockSize, p.width, p.height, p.n);
127
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)AntialiasFwdDiscontinuityKernel, gridSize, blockSize, args, 0, stream));
128
+
129
+ // Determine optimum block size for the persistent analysis kernel.
130
+ int device = 0;
131
+ int numCTA = 0;
132
+ int numSM = 0;
133
+ OP_CHECK_CUDA_ERROR(ctx, cudaGetDevice(&device));
134
+ OP_CHECK_CUDA_ERROR(ctx, cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numCTA, (void*)AntialiasFwdAnalysisKernel, AA_ANALYSIS_KERNEL_THREADS_PER_BLOCK, 0));
135
+ OP_CHECK_CUDA_ERROR(ctx, cudaDeviceGetAttribute(&numSM, cudaDevAttrMultiProcessorCount, device));
136
+
137
+ // Launch analysis kernel.
138
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)AntialiasFwdAnalysisKernel, numCTA * numSM, AA_ANALYSIS_KERNEL_THREADS_PER_BLOCK, args, 0, stream));
139
+ }
140
+ };
141
+
142
+ REGISTER_OP("AntialiasFwd")
143
+ .Input ("color: float")
144
+ .Input ("raster_out: float")
145
+ .Input ("pos: float")
146
+ .Input ("tri: int32")
147
+ .Output ("output: float")
148
+ .Output ("work_buffer: int32")
149
+ .Attr ("tri_const: int");
150
+
151
+ REGISTER_KERNEL_BUILDER(Name("AntialiasFwd").Device(DEVICE_GPU), AntialiasFwdOp);
152
+
153
+ //------------------------------------------------------------------------
154
+ // Gradient TensorFlow op.
155
+
156
+ struct AntialiasGradOp : public OpKernel
157
+ {
158
+ AntialiasKernelParams m_attribs;
159
+
160
+ AntialiasGradOp(OpKernelConstruction* ctx): OpKernel(ctx)
161
+ {
162
+ memset(&m_attribs, 0, sizeof(m_attribs));
163
+ }
164
+
165
+ void Compute(OpKernelContext* ctx)
166
+ {
167
+ AntialiasKernelParams& p = m_attribs;
168
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
169
+
170
+ // Get input.
171
+ const Tensor& color = ctx->input(0);
172
+ const Tensor& rasterOut = ctx->input(1);
173
+ const Tensor& pos = ctx->input(2);
174
+ const Tensor& tri = ctx->input(3);
175
+ const Tensor& dy = ctx->input(4);
176
+ const Tensor& workBuffer = ctx->input(5);
177
+
178
+ // Instance rendering mode?
179
+ p.instance_mode = pos.dims() > 2;
180
+
181
+ // Extract input dimensions.
182
+ if (p.instance_mode)
183
+ p.numVertices = (pos.dims() > 1) ? pos.dim_size(1) : 0;
184
+ else
185
+ p.numVertices = (pos.dims() > 0) ? pos.dim_size(0) : 0;
186
+ p.numTriangles = (tri.dims() > 0) ? tri.dim_size(0) : 0;
187
+ p.n = (color.dims() > 0) ? color.dim_size(0) : 0;
188
+ p.height = (color.dims() > 1) ? color.dim_size(1) : 0;
189
+ p.width = (color.dims() > 2) ? color.dim_size(2) : 0;
190
+ p.channels = (color.dims() > 3) ? color.dim_size(3) : 0;
191
+
192
+ // Sanity checks.
193
+ OP_REQUIRES(ctx, dy.dims() == 4 && dy.dim_size(0) > 0 && dy.dim_size(1) > 0 && dy.dim_size(2) > 0 && dy.dim_size(3) > 0, errors::InvalidArgument("dy must have shape[>0, >0, >0, >0]"));
194
+ OP_REQUIRES(ctx, color.dims() == 4 && color.dim_size(0) > 0 && color.dim_size(1) > 0 && color.dim_size(2) > 0 && color.dim_size(3) > 0, errors::InvalidArgument("color must have shape[>0, >0, >0, >0]"));
195
+ OP_REQUIRES(ctx, rasterOut.dims() == 4 && rasterOut.dim_size(0) > 0 && rasterOut.dim_size(1) > 0 && rasterOut.dim_size(2) > 0 && rasterOut.dim_size(3) == 4, errors::InvalidArgument("raster_out must have shape[>0, >0, >0, 4]"));
196
+ OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
197
+ OP_REQUIRES(ctx, color.dim_size(1) == rasterOut.dim_size(1) && color.dim_size(2) == rasterOut.dim_size(2), errors::InvalidArgument("color and raster_out inputs must have same spatial dimensions"));
198
+ OP_REQUIRES(ctx, color.dim_size(1) == dy.dim_size(1) && color.dim_size(2) == dy.dim_size(2) && color.dim_size(3) == dy.dim_size(3), errors::InvalidArgument("color and dy inputs must have same dimensions"));
199
+ if (p.instance_mode)
200
+ {
201
+ OP_REQUIRES(ctx, pos.dims() == 3 && pos.dim_size(0) > 0 && pos.dim_size(1) > 0 && pos.dim_size(2) == 4, errors::InvalidArgument("pos must have shape [>0, >0, 4] or [>0, 4]"));
202
+ OP_REQUIRES(ctx, rasterOut.dim_size(0) == p.n && pos.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs color, raster_out, pos"));
203
+ OP_REQUIRES(ctx, dy.dim_size(0) == p.n && rasterOut.dim_size(0) == p.n && pos.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs dy, color, raster_out, pos"));
204
+ }
205
+ else
206
+ {
207
+ OP_REQUIRES(ctx, pos.dims() == 2 && pos.dim_size(0) > 0 && pos.dim_size(1) == 4, errors::InvalidArgument("pos must have shape [>0, >0, 4] or [>0, 4]"));
208
+ OP_REQUIRES(ctx, rasterOut.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs color, raster_out"));
209
+ OP_REQUIRES(ctx, dy.dim_size(0) == p.n && rasterOut.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs dy, color, raster_out"));
210
+ }
211
+
212
+ // Get input pointers.
213
+ p.dy = dy.flat<float>().data();
214
+ p.color = color.flat<float>().data();
215
+ p.rasterOut = rasterOut.flat<float>().data();
216
+ p.tri = tri.flat<int>().data();
217
+ p.pos = pos.flat<float>().data();
218
+ p.workBuffer = (int4*)(workBuffer.flat<int>().data());
219
+
220
+ // Misc parameters.
221
+ p.xh = .5f * (float)p.width;
222
+ p.yh = .5f * (float)p.height;
223
+
224
+ // Allocate color gradient output tensor.
225
+ Tensor* gradColor = NULL;
226
+ TensorShape gradColorShape;
227
+ gradColorShape.AddDim(p.n);
228
+ gradColorShape.AddDim(p.height);
229
+ gradColorShape.AddDim(p.width);
230
+ gradColorShape.AddDim(p.channels);
231
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, gradColorShape, &gradColor));
232
+ p.gradColor = gradColor->flat<float>().data();
233
+
234
+ // Allocate position gradient output tensor.
235
+ Tensor* gradPos = NULL;
236
+ TensorShape gradPosShape;
237
+ if (p.instance_mode)
238
+ gradPosShape.AddDim(p.n);
239
+ gradPosShape.AddDim(p.numVertices);
240
+ gradPosShape.AddDim(4);
241
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(1, gradPosShape, &gradPos));
242
+ p.gradPos = gradPos->flat<float>().data();
243
+
244
+ // Initialize all the stuff.
245
+ OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(&p.workBuffer[0].y, 0, sizeof(int), stream)); // Gradient kernel work counter.
246
+ OP_CHECK_CUDA_ERROR(ctx, cudaMemcpyAsync(p.gradColor, p.dy, p.n * p.height * p.width * p.channels * sizeof(float), cudaMemcpyDeviceToDevice, stream));
247
+ OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(p.gradPos, 0, (p.instance_mode ? p.n : 1) * p.numVertices * 4 * sizeof(float), stream));
248
+
249
+ // Verify that buffers are aligned to allow float2/float4 operations.
250
+ OP_REQUIRES(ctx, !((uintptr_t)p.pos & 15), errors::Internal("pos input tensor not aligned to float4"));
251
+ OP_REQUIRES(ctx, !((uintptr_t)p.workBuffer & 15), errors::Internal("work_buffer internal tensor not aligned to int4"));
252
+
253
+ // Launch the gradient kernel.
254
+ void* args[] = {&p};
255
+
256
+ int device = 0;
257
+ int numCTA = 0;
258
+ int numSM = 0;
259
+ OP_CHECK_CUDA_ERROR(ctx, cudaGetDevice(&device));
260
+ OP_CHECK_CUDA_ERROR(ctx, cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numCTA, (void*)AntialiasGradKernel, AA_GRAD_KERNEL_THREADS_PER_BLOCK, 0));
261
+ OP_CHECK_CUDA_ERROR(ctx, cudaDeviceGetAttribute(&numSM, cudaDevAttrMultiProcessorCount, device));
262
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)AntialiasGradKernel, numCTA * numSM, AA_GRAD_KERNEL_THREADS_PER_BLOCK, args, 0, stream));
263
+ }
264
+ };
265
+
266
+ REGISTER_OP("AntialiasGrad")
267
+ .Input ("color: float")
268
+ .Input ("raster_out: float")
269
+ .Input ("pos: float")
270
+ .Input ("tri: int32")
271
+ .Input ("dy: float")
272
+ .Input ("work_buffer: int32")
273
+ .Output ("grad_color: float")
274
+ .Output ("grad_pos: float");
275
+
276
+ REGISTER_KERNEL_BUILDER(Name("AntialiasGrad").Device(DEVICE_GPU), AntialiasGradOp);
277
+
278
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/tensorflow/tf_interpolate.cu ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ //------------------------------------------------------------------------
10
+ // Common op attribute parser.
11
+
12
+ static __host__ void interpolateParseOpAttributes(OpKernelConstruction* ctx, InterpolateKernelParams& p, bool enableDA)
13
+ {
14
+ if (enableDA)
15
+ {
16
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("diff_attrs_all", &p.diff_attrs_all));
17
+ if (!p.diff_attrs_all)
18
+ {
19
+ std::vector<int> diff_attrs_vec;
20
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("diff_attrs", &diff_attrs_vec));
21
+ OP_REQUIRES(ctx, diff_attrs_vec.size() > 0, errors::InvalidArgument("differentiation enabled with empty diff_attrs list"));
22
+ OP_REQUIRES(ctx, diff_attrs_vec.size() <= IP_MAX_DIFF_ATTRS, errors::InvalidArgument("too many entries in diff_attrs list (increase IP_MAX_DIFF_ATTRS)"));
23
+ p.numDiffAttr = diff_attrs_vec.size();
24
+ memcpy(p.diffAttrs, &diff_attrs_vec[0], diff_attrs_vec.size()*sizeof(int));
25
+ }
26
+ }
27
+ }
28
+
29
+ //------------------------------------------------------------------------
30
+ // Forward TensorFlow op.
31
+
32
+ template <bool ENABLE_DA>
33
+ struct InterpolateFwdOp : public OpKernel
34
+ {
35
+ InterpolateKernelParams m_attribs;
36
+
37
+ InterpolateFwdOp(OpKernelConstruction* ctx): OpKernel(ctx)
38
+ {
39
+ memset(&m_attribs, 0, sizeof(m_attribs));
40
+ interpolateParseOpAttributes(ctx, m_attribs, ENABLE_DA);
41
+ }
42
+
43
+ void Compute(OpKernelContext* ctx)
44
+ {
45
+ InterpolateKernelParams& p = m_attribs;
46
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
47
+
48
+ // Get input.
49
+ const Tensor& attr = ctx->input(0);
50
+ const Tensor& rast = ctx->input(1);
51
+ const Tensor& tri = ctx->input(2);
52
+ const Tensor& rast_db = ctx->input(ENABLE_DA ? 3 : 2);
53
+
54
+ // Instance rendering mode?
55
+ p.instance_mode = attr.dims() > 2;
56
+
57
+ // Extract input dimensions.
58
+ if (p.instance_mode)
59
+ {
60
+ p.numVertices = (attr.dims() > 1) ? attr.dim_size(1) : 0;
61
+ p.numAttr = (attr.dims() > 2) ? attr.dim_size(2) : 0;
62
+ }
63
+ else
64
+ {
65
+ p.numVertices = (attr.dims() > 0) ? attr.dim_size(0) : 0;
66
+ p.numAttr = (attr.dims() > 1) ? attr.dim_size(1) : 0;
67
+ }
68
+ p.numTriangles = (tri.dims() > 0) ? tri.dim_size(0) : 0;
69
+ p.height = (rast.dims() > 1) ? rast.dim_size(1) : 0;
70
+ p.width = (rast.dims() > 2) ? rast.dim_size(2) : 0;
71
+ p.depth = (rast.dims() > 0) ? rast.dim_size(0) : 0;
72
+
73
+ // Sanity checks.
74
+ OP_REQUIRES(ctx, rast.dims() == 4 && rast.dim_size(0) > 0 && rast.dim_size(1) > 0 && rast.dim_size(2) > 0 && rast.dim_size(3) == 4, errors::InvalidArgument("rast must have shape[>0, >0, >0, 4]"));
75
+ OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
76
+ OP_REQUIRES(ctx, (attr.dims() == 2 || attr.dims() == 3) && attr.dim_size(0) > 0 && attr.dim_size(1) > 0 && (attr.dims() == 2 || attr.dim_size(2) > 0), errors::InvalidArgument("attr must have shape [>0, >0, >0] or [>0, >0]"));
77
+ if (p.instance_mode)
78
+ OP_REQUIRES(ctx, attr.dim_size(0) == p.depth || attr.dim_size(0) == 1, errors::InvalidArgument("minibatch size mismatch between inputs rast, attr"));
79
+ if (ENABLE_DA)
80
+ {
81
+ OP_REQUIRES(ctx, rast_db.dims() == 4 && rast_db.dim_size(0) > 0 && rast_db.dim_size(1) > 0 && rast_db.dim_size(2) > 0 && rast_db.dim_size(3) == 4, errors::InvalidArgument("rast_db must have shape[>0, >0, >0, 4]"));
82
+ OP_REQUIRES(ctx, rast_db.dim_size(1) == rast.dim_size(1) && rast_db.dim_size(2) == rast.dim_size(2), errors::InvalidArgument("spatial size mismatch between inputs rast and rast_db"));
83
+ OP_REQUIRES(ctx, rast_db.dim_size(0) == p.depth, errors::InvalidArgument("minibatch size mismatch between inputs rast, rast_db"));
84
+ }
85
+
86
+ // All diff attrs mode.
87
+ if (p.diff_attrs_all)
88
+ p.numDiffAttr = p.numAttr;
89
+
90
+ // Get input pointers.
91
+ p.attr = attr.flat<float>().data();
92
+ p.rast = rast.flat<float>().data();
93
+ p.tri = tri.flat<int>().data();
94
+ p.attrBC = (p.instance_mode && attr.dim_size(0) == 1) ? 1 : 0;
95
+ p.rastDB = ENABLE_DA ? rast_db.flat<float>().data() : 0;
96
+
97
+ // Allocate main output tensor.
98
+ Tensor* out_tensor = NULL;
99
+ TensorShape out_shape;
100
+ out_shape.AddDim(p.depth);
101
+ out_shape.AddDim(p.height);
102
+ out_shape.AddDim(p.width);
103
+ out_shape.AddDim(p.numAttr);
104
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, out_shape, &out_tensor));
105
+ p.out = out_tensor->flat<float>().data();
106
+
107
+ // Allocate pixel differential output tensor.
108
+ Tensor* out_da_tensor = NULL;
109
+ out_shape.set_dim(3, p.numDiffAttr * 2);
110
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(1, out_shape, &out_da_tensor));
111
+ p.outDA = ENABLE_DA ? out_da_tensor->flat<float>().data() : 0;
112
+
113
+ // Verify that buffers are aligned to allow float2/float4 operations.
114
+ OP_REQUIRES(ctx, !((uintptr_t)p.rast & 15), errors::Internal("rast input tensor not aligned to float4"));
115
+ OP_REQUIRES(ctx, !((uintptr_t)p.rastDB & 15), errors::Internal("rast_db input tensor not aligned to float4"));
116
+ if (ENABLE_DA)
117
+ OP_REQUIRES(ctx, !((uintptr_t)p.outDA & 7), errors::Internal("out_da output tensor not aligned to float2"));
118
+
119
+ // Choose launch parameters.
120
+ dim3 blockSize = getLaunchBlockSize(IP_FWD_MAX_KERNEL_BLOCK_WIDTH, IP_FWD_MAX_KERNEL_BLOCK_HEIGHT, p.width, p.height);
121
+ dim3 gridSize = getLaunchGridSize(blockSize, p.width, p.height, p.depth);
122
+
123
+ // Launch CUDA kernel.
124
+ void* args[] = {&p};
125
+ void* func = ENABLE_DA ? (void*)InterpolateFwdKernelDa : (void*)InterpolateFwdKernel;
126
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(func, gridSize, blockSize, args, 0, stream));
127
+ }
128
+ };
129
+
130
+ REGISTER_OP("InterpolateFwd")
131
+ .Input ("attr: float")
132
+ .Input ("rast: float")
133
+ .Input ("tri: int32")
134
+ .Output ("out: float")
135
+ .Output ("out_da: float");
136
+
137
+ REGISTER_OP("InterpolateFwdDa")
138
+ .Input ("attr: float")
139
+ .Input ("rast: float")
140
+ .Input ("tri: int32")
141
+ .Input ("rast_db: float")
142
+ .Output ("out: float")
143
+ .Output ("out_da: float")
144
+ .Attr ("diff_attrs_all: int")
145
+ .Attr ("diff_attrs: list(int)");
146
+
147
+ REGISTER_KERNEL_BUILDER(Name("InterpolateFwd") .Device(DEVICE_GPU), InterpolateFwdOp<false>);
148
+ REGISTER_KERNEL_BUILDER(Name("InterpolateFwdDa").Device(DEVICE_GPU), InterpolateFwdOp<true>);
149
+
150
+ //------------------------------------------------------------------------
151
+ // Gradient TensorFlow op.
152
+
153
+ template <bool ENABLE_DA>
154
+ struct InterpolateGradOp : public OpKernel
155
+ {
156
+ InterpolateKernelParams m_attribs;
157
+
158
+ InterpolateGradOp(OpKernelConstruction* ctx): OpKernel(ctx)
159
+ {
160
+ memset(&m_attribs, 0, sizeof(m_attribs));
161
+ interpolateParseOpAttributes(ctx, m_attribs, ENABLE_DA);
162
+ }
163
+
164
+ void Compute(OpKernelContext* ctx)
165
+ {
166
+ InterpolateKernelParams& p = m_attribs;
167
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
168
+
169
+ // Get input.
170
+ const Tensor& attr = ctx->input(0);
171
+ const Tensor& rast = ctx->input(1);
172
+ const Tensor& tri = ctx->input(2);
173
+ const Tensor& dy = ctx->input(3);
174
+ const Tensor& rast_db = ctx->input(ENABLE_DA ? 4 : 3);
175
+ const Tensor& dda = ctx->input(ENABLE_DA ? 5 : 3);
176
+
177
+ // Instance rendering mode?
178
+ p.instance_mode = attr.dims() > 2;
179
+
180
+ // Extract input dimensions.
181
+ if (p.instance_mode)
182
+ {
183
+ p.numVertices = (attr.dims() > 1) ? attr.dim_size(1) : 0;
184
+ p.numAttr = (attr.dims() > 2) ? attr.dim_size(2) : 0;
185
+ }
186
+ else
187
+ {
188
+ p.numVertices = (attr.dims() > 0) ? attr.dim_size(0) : 0;
189
+ p.numAttr = (attr.dims() > 1) ? attr.dim_size(1) : 0;
190
+ }
191
+ p.numTriangles = (tri.dims() > 0) ? tri.dim_size(0) : 0;
192
+ p.depth = (rast.dims() > 0) ? rast.dim_size(0) : 0;
193
+ p.height = (rast.dims() > 1) ? rast.dim_size(1) : 0;
194
+ p.width = (rast.dims() > 2) ? rast.dim_size(2) : 0;
195
+ int attr_depth = p.instance_mode ? (attr.dims() > 1 ? attr.dim_size(0) : 0) : 1;
196
+
197
+ // Sanity checks.
198
+ OP_REQUIRES(ctx, rast.dims() == 4 && rast.dim_size(0) > 0 && rast.dim_size(1) > 0 && rast.dim_size(2) > 0 && rast.dim_size(3) == 4, errors::InvalidArgument("rast must have shape[>0, >0, >0, 4]"));
199
+ OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
200
+ OP_REQUIRES(ctx, (attr.dims() == 2 || attr.dims() == 3) && attr.dim_size(0) > 0 && attr.dim_size(1) > 0 && (attr.dims() == 2 || attr.dim_size(2) > 0), errors::InvalidArgument("attr must have shape [>0, >0, >0] or [>0, >0]"));
201
+ OP_REQUIRES(ctx, dy.dims() == 4 && dy.dim_size(0) > 0 && dy.dim_size(1) == p.height && dy.dim_size(2) == p.width && dy.dim_size(3) > 0, errors::InvalidArgument("dy must have shape [>0, height, width, >0]"));
202
+ OP_REQUIRES(ctx, dy.dim_size(3) == p.numAttr, errors::InvalidArgument("argument count mismatch between inputs dy, attr"));
203
+ OP_REQUIRES(ctx, (attr_depth == p.depth || attr_depth == 1) && dy.dim_size(0) == p.depth, errors::InvalidArgument("minibatch size mismatch between inputs rast, dy, attr"));
204
+ if (ENABLE_DA)
205
+ {
206
+ OP_REQUIRES(ctx, dda.dims() == 4 && dda.dim_size(0) > 0 && dda.dim_size(1) == p.height && dda.dim_size(2) == p.width, errors::InvalidArgument("dda must have shape [>0, height, width, ?]"));
207
+ OP_REQUIRES(ctx, dda.dim_size(0) == p.depth, errors::InvalidArgument("minibatch size mismatch between rast, dda"));
208
+ }
209
+
210
+ // All diff attrs mode.
211
+ if (p.diff_attrs_all)
212
+ p.numDiffAttr = p.numAttr;
213
+
214
+ // Get input pointers.
215
+ p.attr = attr.flat<float>().data();
216
+ p.rast = rast.flat<float>().data();
217
+ p.tri = tri.flat<int>().data();
218
+ p.dy = dy.flat<float>().data();
219
+ p.rastDB = ENABLE_DA ? rast_db.flat<float>().data() : 0;
220
+ p.dda = ENABLE_DA ? dda.flat<float>().data() : 0;
221
+ p.attrBC = (p.instance_mode && attr_depth < p.depth) ? 1 : 0;
222
+
223
+ // Allocate attribute gradient output tensor.
224
+ Tensor* grad_attr_tensor = NULL;
225
+ TensorShape grad_attr_shape;
226
+ if (p.instance_mode)
227
+ grad_attr_shape.AddDim(attr_depth);
228
+ grad_attr_shape.AddDim(p.numVertices);
229
+ grad_attr_shape.AddDim(p.numAttr);
230
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, grad_attr_shape, &grad_attr_tensor));
231
+ p.gradAttr = grad_attr_tensor->flat<float>().data();
232
+
233
+ // Allocate bary gradient output tensor.
234
+ Tensor* grad_rast_tensor = NULL;
235
+ TensorShape grad_rast_shape;
236
+ grad_rast_shape.AddDim(p.depth);
237
+ grad_rast_shape.AddDim(p.height);
238
+ grad_rast_shape.AddDim(p.width);
239
+ grad_rast_shape.AddDim(4);
240
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(1, grad_rast_shape, &grad_rast_tensor));
241
+ p.gradRaster = grad_rast_tensor->flat<float>().data();
242
+
243
+ // Allocate bary pixel diff gradient output tensor.
244
+ if (ENABLE_DA)
245
+ {
246
+ Tensor* grad_rast_db_tensor = NULL;
247
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(2, grad_rast_shape, &grad_rast_db_tensor));
248
+ p.gradRasterDB = grad_rast_db_tensor->flat<float>().data();
249
+ }
250
+
251
+ // Clear attribute gradients.
252
+ cudaMemsetAsync(p.gradAttr, 0, attr_depth * p.numVertices * p.numAttr * sizeof(float), stream);
253
+
254
+ // Verify that buffers are aligned to allow float2/float4 operations.
255
+ OP_REQUIRES(ctx, !((uintptr_t)p.rast & 15), errors::Internal("rast input tensor not aligned to float4"));
256
+ OP_REQUIRES(ctx, !((uintptr_t)p.gradRaster & 15), errors::Internal("grad_rast output tensor not aligned to float4"));
257
+ if (ENABLE_DA)
258
+ {
259
+ OP_REQUIRES(ctx, !((uintptr_t)p.dda & 7), errors::Internal("dda input tensor not aligned to float2"));
260
+ OP_REQUIRES(ctx, !((uintptr_t)p.rastDB & 15), errors::Internal("rast_db input tensor not aligned to float4"));
261
+ OP_REQUIRES(ctx, !((uintptr_t)p.gradRasterDB & 15), errors::Internal("grad_rast_db output tensor not aligned to float4"));
262
+ }
263
+
264
+ // Choose launch parameters.
265
+ dim3 blockSize = getLaunchBlockSize(IP_GRAD_MAX_KERNEL_BLOCK_WIDTH, IP_GRAD_MAX_KERNEL_BLOCK_HEIGHT, p.width, p.height);
266
+ dim3 gridSize = getLaunchGridSize(blockSize, p.width, p.height, p.depth);
267
+
268
+ // Launch CUDA kernel.
269
+ void* args[] = {&p};
270
+ void* func = ENABLE_DA ? (void*)InterpolateGradKernelDa : (void*)InterpolateGradKernel;
271
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(func, gridSize, blockSize, args, 0, stream));
272
+ }
273
+ };
274
+
275
+ REGISTER_OP("InterpolateGrad")
276
+ .Input ("attr: float")
277
+ .Input ("rast: float")
278
+ .Input ("tri: int32")
279
+ .Input ("dy: float")
280
+ .Output ("grad_attr: float")
281
+ .Output ("grad_rast: float")
282
+ ;
283
+
284
+ REGISTER_OP("InterpolateGradDa")
285
+ .Input ("attr: float")
286
+ .Input ("rast: float")
287
+ .Input ("tri: int32")
288
+ .Input ("dy: float")
289
+ .Input ("rast_db: float")
290
+ .Input ("dda: float")
291
+ .Output ("grad_attr: float")
292
+ .Output ("grad_rast: float")
293
+ .Output ("grad_rast_db: float")
294
+ .Attr ("diff_attrs_all: int")
295
+ .Attr ("diff_attrs: list(int)");
296
+ ;
297
+
298
+ REGISTER_KERNEL_BUILDER(Name("InterpolateGrad") .Device(DEVICE_GPU), InterpolateGradOp<false>);
299
+ REGISTER_KERNEL_BUILDER(Name("InterpolateGradDa").Device(DEVICE_GPU), InterpolateGradOp<true>);
300
+
301
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/tensorflow/tf_rasterize.cu ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ //------------------------------------------------------------------------
10
+ // Forward TensorFlow op.
11
+
12
+ struct RasterizeFwdOp : public OpKernel
13
+ {
14
+ RasterizeGLState m_glState; // OpenGL-related persistent state.
15
+ int m_tri_const; // 1 if triangle array is known to be constant.
16
+
17
+ RasterizeFwdOp(OpKernelConstruction* ctx):
18
+ OpKernel(ctx)
19
+ {
20
+ memset(&m_glState, 0, sizeof(RasterizeGLState));
21
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("enable_db", &m_glState.enableDB));
22
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("tri_const", &m_tri_const));
23
+ }
24
+
25
+ void Compute(OpKernelContext* ctx)
26
+ {
27
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
28
+
29
+ // Check that input shapes are correct.
30
+ const Tensor& pos = ctx->input(0);
31
+ const Tensor& tri = ctx->input(1);
32
+ const Tensor& resolution = ctx->input(2);
33
+ const Tensor& ranges = ctx->input(3);
34
+
35
+ // Determine number of outputs
36
+ int num_outputs = m_glState.enableDB ? 2 : 1;
37
+
38
+ // Determine instance mode and check input dimensions.
39
+ bool instance_mode = pos.dims() > 2;
40
+ if (instance_mode)
41
+ {
42
+ OP_REQUIRES(ctx, pos.dims() == 3 && pos.dim_size(0) > 0 && pos.dim_size(1) > 0 && pos.dim_size(2) == 4, errors::InvalidArgument("instance mode - pos must have shape [>0, >0, 4]"));
43
+ OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
44
+ OP_REQUIRES(ctx, resolution.dims() == 1 && resolution.dim_size(0) == 2, errors::InvalidArgument("resolution must have shape [2]"));
45
+ }
46
+ else
47
+ {
48
+ OP_REQUIRES(ctx, pos.dims() == 2 && pos.dim_size(0) > 0 && pos.dim_size(1) == 4, errors::InvalidArgument("range mode - pos must have shape [>0, 4]"));
49
+ OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
50
+ OP_REQUIRES(ctx, resolution.dims() == 1 && resolution.dim_size(0) == 2, errors::InvalidArgument("resolution must have shape [2]"));
51
+ OP_REQUIRES(ctx, ranges.dims() == 2 && ranges.dim_size(0) > 0 && ranges.dim_size(1) == 2, errors::InvalidArgument("range mode - ranges must have shape [>0, 2]"));
52
+ }
53
+
54
+ // Get output shape.
55
+ const int32_t* res_in = resolution.flat<int32_t>().data(); // This is in CPU memory.
56
+ int height = res_in[0];
57
+ int width = res_in[1];
58
+ int depth = instance_mode ? pos.dim_size(0) : ranges.dim_size(0);
59
+ OP_REQUIRES(ctx, height > 0 && width > 0, errors::InvalidArgument("resolution must be [>0, >0]"));
60
+
61
+ // Get position and triangle buffer sizes in int32/float32.
62
+ int posCount = 4 * pos.dim_size(0) * (instance_mode ? pos.dim_size(1) : 1);
63
+ int triCount = 3 * tri.dim_size(0);
64
+
65
+ // Init context and GL?
66
+ bool initCtx = !m_glState.glFBO;
67
+ if (initCtx)
68
+ {
69
+ const DeviceBase::GpuDeviceInfo* g = ctx->device()->tensorflow_gpu_device_info();
70
+ int cudaDeviceIdx = g ? g->gpu_id : -1;
71
+ rasterizeInitGLContext(ctx, m_glState, cudaDeviceIdx); // In common/rasterize.cpp
72
+ }
73
+ else
74
+ setGLContext(m_glState.glctx); // (Re-)Activate GL context.
75
+
76
+ // Resize all buffers.
77
+ bool changes = false;
78
+ rasterizeResizeBuffers(ctx, m_glState, changes, posCount, triCount, width, height, depth); // In common/rasterize_gl.cpp
79
+ if (changes)
80
+ {
81
+ #ifdef _WIN32
82
+ // Workaround for occasional blank first frame on Windows.
83
+ releaseGLContext();
84
+ setGLContext(m_glState.glctx);
85
+ #endif
86
+ }
87
+
88
+ // Copy input data to GL and render.
89
+ const float* posPtr = pos.flat<float>().data();
90
+ const int32_t* rangesPtr = instance_mode ? 0 : ranges.flat<int32_t>().data(); // This is in CPU memory.
91
+ const int32_t* triPtr = (initCtx || !m_tri_const) ? tri.flat<int32_t>().data() : NULL; // Copy triangles only if needed.
92
+ int vtxPerInstance = instance_mode ? pos.dim_size(1) : 0;
93
+ rasterizeRender(ctx, m_glState, stream, posPtr, posCount, vtxPerInstance, triPtr, triCount, rangesPtr, width, height, depth, -1);
94
+
95
+ // Allocate output tensors.
96
+ TensorShape output_shape;
97
+ output_shape.AddDim(depth);
98
+ output_shape.AddDim(height);
99
+ output_shape.AddDim(width);
100
+ output_shape.AddDim(4);
101
+ float* outputPtr[2];
102
+ for (int i=0; i < 2; i++)
103
+ {
104
+ if (i >= num_outputs)
105
+ output_shape.set_dim(3, 0); // Zero channels for unwanted out_db tensor.
106
+ Tensor* output_tensor = NULL;
107
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(i, output_shape, &output_tensor));
108
+ if (i < num_outputs)
109
+ outputPtr[i] = output_tensor->flat<float>().data();
110
+ }
111
+
112
+ // Copy rasterized results into CUDA buffers.
113
+ rasterizeCopyResults(ctx, m_glState, stream, outputPtr, width, height, depth);
114
+
115
+ // Done. Release GL context.
116
+ releaseGLContext();
117
+ }
118
+ };
119
+
120
+ REGISTER_OP("RasterizeFwd")
121
+ .Input ("pos: float")
122
+ .Input ("tri: int32")
123
+ .Input ("resolution: int32")
124
+ .Input ("ranges: int32")
125
+ .Output ("out: float")
126
+ .Output ("out_db: float")
127
+ .Attr ("enable_db: int")
128
+ .Attr ("tri_const: int");
129
+
130
+ REGISTER_KERNEL_BUILDER(Name("RasterizeFwd").Device(DEVICE_GPU).HostMemory("resolution").HostMemory("ranges"), RasterizeFwdOp);
131
+
132
+ //------------------------------------------------------------------------
133
+ // Gradient TensorFlow op.
134
+
135
+ template <bool ENABLE_DB>
136
+ struct RasterizeGradOp : public OpKernel
137
+ {
138
+ RasterizeGradParams m_attribs;
139
+
140
+ RasterizeGradOp(OpKernelConstruction* ctx): OpKernel(ctx)
141
+ {
142
+ memset(&m_attribs, 0, sizeof(m_attribs));
143
+ }
144
+
145
+ void Compute(OpKernelContext* ctx)
146
+ {
147
+ RasterizeGradParams& p = m_attribs;
148
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
149
+
150
+ // Input tensors.
151
+ const Tensor& pos = ctx->input(0);
152
+ const Tensor& tri = ctx->input(1);
153
+ const Tensor& out = ctx->input(2);
154
+ const Tensor& dy = ctx->input(3);
155
+ const Tensor& ddb = ctx->input(ENABLE_DB ? 4 : 3);
156
+
157
+ // Determine instance mode.
158
+ p.instance_mode = (pos.dims() > 2) ? 1 : 0;
159
+
160
+ // Shape is taken from the rasterizer output tensor.
161
+ OP_REQUIRES(ctx, out.dims() == 4, errors::InvalidArgument("out must be rank-4"));
162
+ p.depth = out.dim_size(0);
163
+ p.height = out.dim_size(1);
164
+ p.width = out.dim_size(2);
165
+ OP_REQUIRES(ctx, p.depth > 0 && p.height > 0 && p.width > 0, errors::InvalidArgument("resolution must be [>0, >0, >0]"));
166
+
167
+ // Check other shapes.
168
+ if (p.instance_mode)
169
+ OP_REQUIRES(ctx, pos.dims() == 3 && pos.dim_size(0) == p.depth && pos.dim_size(1) > 0 && pos.dim_size(2) == 4, errors::InvalidArgument("pos must have shape [depth, >0, 4]"));
170
+ else
171
+ OP_REQUIRES(ctx, pos.dims() == 2 && pos.dim_size(0) > 0 && pos.dim_size(1) == 4, errors::InvalidArgument("pos must have shape [>0, 4]"));
172
+ OP_REQUIRES(ctx, tri.dims() == 2 && tri.dim_size(0) > 0 && tri.dim_size(1) == 3, errors::InvalidArgument("tri must have shape [>0, 3]"));
173
+ OP_REQUIRES(ctx, out.dims() == 4 && out.dim_size(0) == p.depth && out.dim_size(1) == p.height && out.dim_size(2) == p.width && out.dim_size(3) == 4, errors::InvalidArgument("out must have shape [depth, height, width, 4]"));
174
+ OP_REQUIRES(ctx, dy.dims() == 4 && dy.dim_size(0) == p.depth && dy.dim_size(1) == p.height && dy.dim_size(2) == p.width && dy.dim_size(3) == 4, errors::InvalidArgument("dy must have shape [depth, height, width, 4]"));
175
+ if (ENABLE_DB)
176
+ OP_REQUIRES(ctx, ddb.dims() == 4 && ddb.dim_size(0) == p.depth && ddb.dim_size(1) == p.height && ddb.dim_size(2) == p.width && ddb.dim_size(3) == 4, errors::InvalidArgument("ddb must have shape [depth, height, width, 4]"));
177
+
178
+ // Populate parameters.
179
+ p.numTriangles = tri.dim_size(0);
180
+ p.numVertices = p.instance_mode ? pos.dim_size(1) : pos.dim_size(0);
181
+ p.pos = pos.flat<float>().data();
182
+ p.tri = tri.flat<int>().data();
183
+ p.out = out.flat<float>().data();
184
+ p.dy = dy.flat<float>().data();
185
+ p.ddb = ENABLE_DB ? ddb.flat<float>().data() : 0;
186
+
187
+ // Set up pixel position to clip space x, y transform.
188
+ p.xs = 2.f / (float)p.width;
189
+ p.xo = 1.f / (float)p.width - 1.f;
190
+ p.ys = 2.f / (float)p.height;
191
+ p.yo = 1.f / (float)p.height - 1.f;
192
+
193
+ // Allocate output tensor for position gradients.
194
+ Tensor* grad_tensor = NULL;
195
+ TensorShape grad_shape;
196
+ if (p.instance_mode)
197
+ grad_shape.AddDim(p.depth);
198
+ grad_shape.AddDim(p.numVertices);
199
+ grad_shape.AddDim(4);
200
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, grad_shape, &grad_tensor));
201
+ p.grad = grad_tensor->flat<float>().data();
202
+
203
+ // Clear the output buffers.
204
+ size_t gradBytes = (p.instance_mode ? p.depth : 1) * p.numVertices * 4 * sizeof(float);
205
+ cudaMemsetAsync(p.grad, 0, gradBytes, stream);
206
+
207
+ // Verify that buffers are aligned to allow float2/float4 operations.
208
+ OP_REQUIRES(ctx, !((uintptr_t)p.pos & 15), errors::Internal("pos input tensor not aligned to float4"));
209
+ OP_REQUIRES(ctx, !((uintptr_t)p.dy & 7), errors::Internal("dy input tensor not aligned to float2"));
210
+ if (ENABLE_DB)
211
+ OP_REQUIRES(ctx, !((uintptr_t)p.ddb & 15), errors::Internal("ddb input tensor not aligned to float4"));
212
+
213
+ // Choose launch parameters.
214
+ dim3 blockSize = getLaunchBlockSize(RAST_GRAD_MAX_KERNEL_BLOCK_WIDTH, RAST_GRAD_MAX_KERNEL_BLOCK_HEIGHT, p.width, p.height);
215
+ dim3 gridSize = getLaunchGridSize(blockSize, p.width, p.height, p.depth);
216
+
217
+ // Launch CUDA kernel.
218
+ void* args[] = {&p};
219
+ void* func = ENABLE_DB ? (void*)RasterizeGradKernelDb : (void*)RasterizeGradKernel;
220
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(func, gridSize, blockSize, args, 0, stream));
221
+ }
222
+ };
223
+
224
+ REGISTER_OP("RasterizeGrad")
225
+ .Input ("pos: float")
226
+ .Input ("tri: int32")
227
+ .Input ("out: float")
228
+ .Input ("dy: float")
229
+ .Output ("grad: float");
230
+
231
+ REGISTER_OP("RasterizeGradDb")
232
+ .Input ("pos: float")
233
+ .Input ("tri: int32")
234
+ .Input ("out: float")
235
+ .Input ("dy: float")
236
+ .Input ("ddb: float")
237
+ .Output ("grad: float");
238
+
239
+ REGISTER_KERNEL_BUILDER(Name("RasterizeGrad") .Device(DEVICE_GPU), RasterizeGradOp<false>);
240
+ REGISTER_KERNEL_BUILDER(Name("RasterizeGradDb").Device(DEVICE_GPU), RasterizeGradOp<true>);
241
+
242
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/tensorflow/tf_texture.cu ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ //------------------------------------------------------------------------
10
+ // Common op attribute parser.
11
+
12
+ static __host__ void parseOpAttributes(OpKernelConstruction* ctx, TextureKernelParams& p)
13
+ {
14
+ // Mip and filter modes.
15
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("filter_mode", &p.filterMode));
16
+ OP_REQUIRES(ctx, p.filterMode >= 0 && p.filterMode < TEX_MODE_COUNT, errors::InvalidArgument("filter_mode unsupported"));
17
+ p.enableMip = (p.filterMode == TEX_MODE_LINEAR_MIPMAP_NEAREST || p.filterMode == TEX_MODE_LINEAR_MIPMAP_LINEAR);
18
+
19
+ // Mip level clamp.
20
+ if (p.enableMip)
21
+ {
22
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("max_mip_level", &p.mipLevelLimit));
23
+ OP_REQUIRES(ctx, p.mipLevelLimit >= -1, errors::InvalidArgument("invalid max_mip_level"));
24
+ ctx->GetAttr("tex_const", &p.texConst); // Only available in forward op.
25
+ }
26
+
27
+ // Boundary mode.
28
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("boundary_mode", &p.boundaryMode));
29
+ OP_REQUIRES(ctx, p.boundaryMode >= 0 && p.boundaryMode < TEX_BOUNDARY_MODE_COUNT, errors::InvalidArgument("boundary_mode unsupported"));
30
+ }
31
+
32
+ //------------------------------------------------------------------------
33
+ // Forward TensorFlow op.
34
+
35
+ struct TextureFwdOp : public OpKernel
36
+ {
37
+ TextureKernelParams m_attribs;
38
+ PersistentTensor m_persistentMipTensor; // Used if texture is constant and mips are enabled.
39
+ bool m_persistentMipTensorInitialized;
40
+
41
+ TextureFwdOp(OpKernelConstruction* ctx): OpKernel(ctx)
42
+ {
43
+ memset(&m_attribs, 0, sizeof(m_attribs));
44
+ m_persistentMipTensorInitialized = false;
45
+ parseOpAttributes(ctx, m_attribs);
46
+ }
47
+
48
+ void Compute(OpKernelContext* ctx)
49
+ {
50
+ TextureKernelParams& p = m_attribs;
51
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
52
+ bool cube_mode = (p.boundaryMode == TEX_BOUNDARY_MODE_CUBE);
53
+
54
+ // Get input.
55
+ const Tensor& tex = ctx->input(0);
56
+ const Tensor& uv = ctx->input(1);
57
+ const Tensor& uv_da = ctx->input(p.enableMip ? 2 : 1);
58
+
59
+ // Extract input dimensions.
60
+ p.n = (uv.dims() > 0) ? uv.dim_size(0) : 0;
61
+ p.imgHeight = (uv.dims() > 1) ? uv.dim_size(1) : 0;
62
+ p.imgWidth = (uv.dims() > 2) ? uv.dim_size(2) : 0;
63
+ p.texDepth = (tex.dims() > 0) ? tex.dim_size(0) : 0;
64
+ if (!cube_mode)
65
+ {
66
+ p.texHeight = (tex.dims() > 1) ? tex.dim_size(1) : 0;
67
+ p.texWidth = (tex.dims() > 2) ? tex.dim_size(2) : 0;
68
+ p.channels = (tex.dims() > 3) ? tex.dim_size(3) : 0;
69
+ }
70
+ else
71
+ {
72
+ p.texHeight = (tex.dims() > 2) ? tex.dim_size(2) : 0;
73
+ p.texWidth = (tex.dims() > 3) ? tex.dim_size(3) : 0;
74
+ p.channels = (tex.dims() > 4) ? tex.dim_size(4) : 0;
75
+ }
76
+
77
+ // Sanity checks.
78
+ if (!cube_mode)
79
+ {
80
+ OP_REQUIRES(ctx, tex.dims() == 4 && tex.dim_size(0) > 0 && tex.dim_size(1) > 0 && tex.dim_size(2) > 0 && tex.dim_size(3) > 0, errors::InvalidArgument("tex must have shape[>0, >0, >0, >0]"));
81
+ OP_REQUIRES(ctx, uv.dims() == 4 && uv.dim_size(0) > 0 && uv.dim_size(1) > 0 && uv.dim_size(2) > 0 && uv.dim_size(3) == 2, errors::InvalidArgument("uv must have shape [>0, >0, >0, 2]"));
82
+ }
83
+ else
84
+ {
85
+ OP_REQUIRES(ctx, tex.dims() == 5 && tex.dim_size(0) > 0 && tex.dim_size(1) == 6 && tex.dim_size(2) > 0 && tex.dim_size(3) > 0 && tex.dim_size(4) > 0, errors::InvalidArgument("tex must have shape[>0, 6, >0, >0, >0] in cube map mode"));
86
+ OP_REQUIRES(ctx, uv.dims() == 4 && uv.dim_size(0) > 0 && uv.dim_size(1) > 0 && uv.dim_size(2) > 0 && uv.dim_size(3) == 3, errors::InvalidArgument("uv must have shape [>0, >0, >0, 3] in cube map mode"));
87
+ OP_REQUIRES(ctx, tex.dim_size(2) == tex.dim_size(3), errors::InvalidArgument("texture shape must be square in cube map mode"));
88
+ }
89
+ OP_REQUIRES(ctx, tex.dim_size(0) == 1 || tex.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs tex, uv"));
90
+ OP_REQUIRES(ctx, p.texWidth <= (1 << TEX_MAX_MIP_LEVEL) && p.texHeight <= (1 << TEX_MAX_MIP_LEVEL), errors::InvalidArgument("texture size too large"));
91
+ if (p.enableMip)
92
+ {
93
+ if (!cube_mode)
94
+ OP_REQUIRES(ctx, uv_da.dims() == 4 && uv_da.dim_size(0) == p.n && uv_da.dim_size(1) == p.imgHeight && uv_da.dim_size(2) == p.imgWidth && uv_da.dim_size(3) == 4, errors::InvalidArgument("uv_da must have shape [minibatch_size, height, width, 4]"));
95
+ else
96
+ OP_REQUIRES(ctx, uv_da.dims() == 4 && uv_da.dim_size(0) == p.n && uv_da.dim_size(1) == p.imgHeight && uv_da.dim_size(2) == p.imgWidth && uv_da.dim_size(3) == 6, errors::InvalidArgument("uv_da must have shape [minibatch_size, height, width, 6] in cube map mode"));
97
+ }
98
+
99
+ // Get input pointers.
100
+ p.tex[0] = tex.flat<float>().data();
101
+ p.uv = uv.flat<float>().data();
102
+ p.uvDA = p.enableMip ? uv_da.flat<float>().data() : 0;
103
+
104
+ // Allocate output tensor.
105
+ Tensor* out_tensor = NULL;
106
+ TensorShape out_shape;
107
+ out_shape.AddDim(p.n);
108
+ out_shape.AddDim(p.imgHeight);
109
+ out_shape.AddDim(p.imgWidth);
110
+ out_shape.AddDim(p.channels);
111
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, out_shape, &out_tensor));
112
+ p.out = out_tensor->flat<float>().data();
113
+
114
+ // Choose kernel variants based on channel count.
115
+ void* args[] = {&p};
116
+ int channel_div_idx = 0;
117
+ if (!(p.channels & 3))
118
+ channel_div_idx = 2; // Channel count divisible by 4.
119
+ else if (!(p.channels & 1))
120
+ channel_div_idx = 1; // Channel count divisible by 2.
121
+
122
+ // Mip-related setup.
123
+ float* pmip = 0;
124
+ if (p.enableMip)
125
+ {
126
+ // Generate mip offsets.
127
+ int mipOffsets[TEX_MAX_MIP_LEVEL];
128
+ int mipTotal = calculateMipInfo(ctx, p, mipOffsets);
129
+
130
+ // Mip output tensor.
131
+ Tensor* mip_tensor = NULL;
132
+ TensorShape mip_shape;
133
+ mip_shape.AddDim(mipTotal);
134
+
135
+ // If texture is constant, calculate mip stack only once.
136
+ bool computeMip = true;
137
+ if (p.texConst)
138
+ {
139
+ // First execution?
140
+ if (!m_persistentMipTensorInitialized)
141
+ {
142
+ // Allocate a persistent mip tensor.
143
+ OP_REQUIRES_OK(ctx, ctx->allocate_persistent(DT_FLOAT, mip_shape, &m_persistentMipTensor, &mip_tensor));
144
+ m_persistentMipTensorInitialized = true;
145
+ }
146
+ else
147
+ {
148
+ // Reuse the persistent tensor, do not recompute mip levels.
149
+ mip_tensor = m_persistentMipTensor.AccessTensor(ctx);
150
+ computeMip = false;
151
+ }
152
+
153
+ // Set as output tensor as well.
154
+ ctx->set_output(1, *mip_tensor);
155
+ }
156
+ else
157
+ {
158
+ // Allocate an output tensor as usual.
159
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(1, mip_shape, &mip_tensor));
160
+ }
161
+
162
+ pmip = mip_tensor->flat<float>().data(); // Pointer to data.
163
+ for (int i=1; i <= p.mipLevelMax; i++)
164
+ p.tex[i] = pmip + mipOffsets[i]; // Pointers to mip levels.
165
+
166
+ // Build mip levels if needed.
167
+ if (computeMip)
168
+ {
169
+ for (int i=1; i <= p.mipLevelMax; i++)
170
+ {
171
+ int2 ms = mipLevelSize(p, i);
172
+ int3 sz = make_int3(ms.x, ms.y, p.texDepth);
173
+ dim3 blockSize = getLaunchBlockSize(TEX_FWD_MAX_MIP_KERNEL_BLOCK_WIDTH, TEX_FWD_MAX_MIP_KERNEL_BLOCK_HEIGHT, sz.x, sz.y);
174
+ dim3 gridSize = getLaunchGridSize(blockSize, sz.x, sz.y, sz.z * (cube_mode ? 6 : 1));
175
+ p.mipLevelOut = i;
176
+
177
+ void* build_func_tbl[3] = { (void*)MipBuildKernel1, (void*)MipBuildKernel2, (void*)MipBuildKernel4 };
178
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(build_func_tbl[channel_div_idx], gridSize, blockSize, args, 0, stream));
179
+ }
180
+ }
181
+ }
182
+
183
+ // Verify that buffers are aligned to allow float2/float4 operations. Unused pointers are zero so always aligned.
184
+ if (!cube_mode)
185
+ OP_REQUIRES(ctx, !((uintptr_t)p.uv & 7), errors::Internal("uv input tensor not aligned to float2"));
186
+ if ((p.channels & 3) == 0)
187
+ {
188
+ OP_REQUIRES(ctx, !((uintptr_t)p.tex[0] & 15), errors::Internal("tex input tensor not aligned to float4"));
189
+ OP_REQUIRES(ctx, !((uintptr_t)p.out & 15), errors::Internal("out output tensor not aligned to float4"));
190
+ OP_REQUIRES(ctx, !((uintptr_t)pmip & 15), errors::Internal("mip output tensor not aligned to float4"));
191
+ }
192
+ if ((p.channels & 1) == 0)
193
+ {
194
+ OP_REQUIRES(ctx, !((uintptr_t)p.tex[0] & 7), errors::Internal("tex input tensor not aligned to float2"));
195
+ OP_REQUIRES(ctx, !((uintptr_t)p.out & 7), errors::Internal("out output tensor not aligned to float2"));
196
+ OP_REQUIRES(ctx, !((uintptr_t)pmip & 7), errors::Internal("mip output tensor not aligned to float2"));
197
+ }
198
+ if (!cube_mode)
199
+ OP_REQUIRES(ctx, !((uintptr_t)p.uvDA & 15), errors::Internal("uv_da input tensor not aligned to float4"));
200
+ else
201
+ OP_REQUIRES(ctx, !((uintptr_t)p.uvDA & 7), errors::Internal("uv_da input tensor not aligned to float2"));
202
+
203
+ // Choose launch parameters for texture lookup kernel.
204
+ dim3 blockSize = getLaunchBlockSize(TEX_FWD_MAX_KERNEL_BLOCK_WIDTH, TEX_FWD_MAX_KERNEL_BLOCK_HEIGHT, p.imgWidth, p.imgHeight);
205
+ dim3 gridSize = getLaunchGridSize(blockSize, p.imgWidth, p.imgHeight, p.n);
206
+
207
+ // Choose kernel based on filter mode, cube mode, and datatype.
208
+ void* func_tbl[TEX_MODE_COUNT * 3 * 2] = {
209
+ (void*)TextureFwdKernelNearest1,
210
+ (void*)TextureFwdKernelNearest2,
211
+ (void*)TextureFwdKernelNearest4,
212
+ (void*)TextureFwdKernelLinear1,
213
+ (void*)TextureFwdKernelLinear2,
214
+ (void*)TextureFwdKernelLinear4,
215
+ (void*)TextureFwdKernelLinearMipmapNearest1,
216
+ (void*)TextureFwdKernelLinearMipmapNearest2,
217
+ (void*)TextureFwdKernelLinearMipmapNearest4,
218
+ (void*)TextureFwdKernelLinearMipmapLinear1,
219
+ (void*)TextureFwdKernelLinearMipmapLinear2,
220
+ (void*)TextureFwdKernelLinearMipmapLinear4,
221
+ (void*)TextureFwdKernelCubeNearest1,
222
+ (void*)TextureFwdKernelCubeNearest2,
223
+ (void*)TextureFwdKernelCubeNearest4,
224
+ (void*)TextureFwdKernelCubeLinear1,
225
+ (void*)TextureFwdKernelCubeLinear2,
226
+ (void*)TextureFwdKernelCubeLinear4,
227
+ (void*)TextureFwdKernelCubeLinearMipmapNearest1,
228
+ (void*)TextureFwdKernelCubeLinearMipmapNearest2,
229
+ (void*)TextureFwdKernelCubeLinearMipmapNearest4,
230
+ (void*)TextureFwdKernelCubeLinearMipmapLinear1,
231
+ (void*)TextureFwdKernelCubeLinearMipmapLinear2,
232
+ (void*)TextureFwdKernelCubeLinearMipmapLinear4,
233
+ };
234
+
235
+ // Function index.
236
+ int func_idx = p.filterMode;
237
+ if (cube_mode)
238
+ func_idx += TEX_MODE_COUNT;
239
+ func_idx = func_idx * 3 + channel_div_idx;
240
+
241
+ // Launch kernel.
242
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(func_tbl[func_idx], gridSize, blockSize, args, 0, stream));
243
+ }
244
+ };
245
+
246
+ REGISTER_OP("TextureFwd")
247
+ .Input ("tex: float")
248
+ .Input ("uv: float")
249
+ .Output ("out: float")
250
+ .Attr ("filter_mode: int")
251
+ .Attr ("boundary_mode: int");
252
+
253
+ REGISTER_OP("TextureFwdMip")
254
+ .Input ("tex: float")
255
+ .Input ("uv: float")
256
+ .Input ("uv_da: float")
257
+ .Output ("out: float")
258
+ .Output ("mip: float")
259
+ .Attr ("filter_mode: int")
260
+ .Attr ("boundary_mode: int")
261
+ .Attr ("tex_const: int")
262
+ .Attr ("max_mip_level: int");
263
+
264
+ REGISTER_KERNEL_BUILDER(Name("TextureFwd") .Device(DEVICE_GPU), TextureFwdOp);
265
+ REGISTER_KERNEL_BUILDER(Name("TextureFwdMip").Device(DEVICE_GPU), TextureFwdOp);
266
+
267
+ //------------------------------------------------------------------------
268
+ // Gradient TensorFlow op.
269
+
270
+ struct TextureGradOp : public OpKernel
271
+ {
272
+ TextureKernelParams m_attribs;
273
+
274
+ TextureGradOp(OpKernelConstruction* ctx): OpKernel(ctx)
275
+ {
276
+ memset(&m_attribs, 0, sizeof(m_attribs));
277
+ parseOpAttributes(ctx, m_attribs);
278
+ }
279
+
280
+ void Compute(OpKernelContext* ctx)
281
+ {
282
+ TextureKernelParams& p = m_attribs;
283
+ cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
284
+ bool cube_mode = (p.boundaryMode == TEX_BOUNDARY_MODE_CUBE);
285
+
286
+ // Get input.
287
+ const Tensor& tex = ctx->input(0);
288
+ const Tensor& uv = ctx->input(1);
289
+ const Tensor& dy = ctx->input(2);
290
+ const Tensor& uv_da = ctx->input(p.enableMip ? 3 : 2);
291
+ const Tensor& mip = ctx->input(p.enableMip ? 4 : 2);
292
+
293
+ // Extract input dimensions.
294
+ p.n = (uv.dims() > 0) ? uv.dim_size(0) : 0;
295
+ p.imgHeight = (uv.dims() > 1) ? uv.dim_size(1) : 0;
296
+ p.imgWidth = (uv.dims() > 2) ? uv.dim_size(2) : 0;
297
+ p.texDepth = (tex.dims() > 0) ? tex.dim_size(0) : 0;
298
+ if (!cube_mode)
299
+ {
300
+ p.texHeight = (tex.dims() > 1) ? tex.dim_size(1) : 0;
301
+ p.texWidth = (tex.dims() > 2) ? tex.dim_size(2) : 0;
302
+ p.channels = (tex.dims() > 3) ? tex.dim_size(3) : 0;
303
+ }
304
+ else
305
+ {
306
+ p.texHeight = (tex.dims() > 2) ? tex.dim_size(2) : 0;
307
+ p.texWidth = (tex.dims() > 3) ? tex.dim_size(3) : 0;
308
+ p.channels = (tex.dims() > 4) ? tex.dim_size(4) : 0;
309
+ }
310
+
311
+ // Sanity checks.
312
+ if (!cube_mode)
313
+ {
314
+ OP_REQUIRES(ctx, tex.dims() == 4 && tex.dim_size(0) > 0 && tex.dim_size(1) > 0 && tex.dim_size(2) > 0 && tex.dim_size(3) > 0, errors::InvalidArgument("tex must have shape[>0, >0, >0, >0]"));
315
+ OP_REQUIRES(ctx, uv.dims() == 4 && uv.dim_size(0) > 0 && uv.dim_size(1) > 0 && uv.dim_size(2) > 0 && uv.dim_size(3) == 2, errors::InvalidArgument("uv must have shape [>0, >0, >0, 2]"));
316
+ }
317
+ else
318
+ {
319
+ OP_REQUIRES(ctx, tex.dims() == 5 && tex.dim_size(0) > 0 && tex.dim_size(1) == 6 && tex.dim_size(2) > 0 && tex.dim_size(3) > 0 && tex.dim_size(4) > 0, errors::InvalidArgument("tex must have shape[>0, 6, >0, >0, >0] in cube map mode"));
320
+ OP_REQUIRES(ctx, uv.dims() == 4 && uv.dim_size(0) > 0 && uv.dim_size(1) > 0 && uv.dim_size(2) > 0 && uv.dim_size(3) == 3, errors::InvalidArgument("uv must have shape [>0, >0, >0, 3] in cube map mode"));
321
+ OP_REQUIRES(ctx, tex.dim_size(2) == tex.dim_size(3), errors::InvalidArgument("texture shape must be square in cube map mode"));
322
+ }
323
+ OP_REQUIRES(ctx, tex.dim_size(0) == 1 || tex.dim_size(0) == p.n, errors::InvalidArgument("minibatch size mismatch between inputs tex, uv"));
324
+ OP_REQUIRES(ctx, dy.dims() == 4 && dy.dim_size(0) == p.n && dy.dim_size(1) == p.imgHeight && dy.dim_size(2) == p.imgWidth && dy.dim_size(3) == p.channels, errors::InvalidArgument("dy must have shape [minibatch_size, height, width, channels]"));
325
+ if (p.enableMip)
326
+ {
327
+ if (!cube_mode)
328
+ OP_REQUIRES(ctx, uv_da.dims() == 4 && uv_da.dim_size(0) == p.n && uv_da.dim_size(1) == p.imgHeight && uv_da.dim_size(2) == p.imgWidth && uv_da.dim_size(3) == 4, errors::InvalidArgument("uv_da must have shape [minibatch_size, height, width, 4]"));
329
+ else
330
+ OP_REQUIRES(ctx, uv_da.dims() == 4 && uv_da.dim_size(0) == p.n && uv_da.dim_size(1) == p.imgHeight && uv_da.dim_size(2) == p.imgWidth && uv_da.dim_size(3) == 6, errors::InvalidArgument("uv_da must have shape [minibatch_size, height, width, 6] in cube map mode"));
331
+ }
332
+
333
+ // Get input pointers.
334
+ p.tex[0] = tex.flat<float>().data();
335
+ p.uv = uv.flat<float>().data();
336
+ p.dy = dy.flat<float>().data();
337
+ p.uvDA = p.enableMip ? uv_da.flat<float>().data() : 0;
338
+ float* pmip = p.enableMip ? (float*)mip.flat<float>().data() : 0;
339
+
340
+ // Allocate output tensor for tex gradient.
341
+ Tensor* grad_tex_tensor = NULL;
342
+ TensorShape grad_tex_shape;
343
+ grad_tex_shape.AddDim(p.texDepth);
344
+ if (cube_mode)
345
+ grad_tex_shape.AddDim(6);
346
+ grad_tex_shape.AddDim(p.texHeight);
347
+ grad_tex_shape.AddDim(p.texWidth);
348
+ grad_tex_shape.AddDim(p.channels);
349
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, grad_tex_shape, &grad_tex_tensor));
350
+ p.gradTex[0] = grad_tex_tensor->flat<float>().data();
351
+
352
+ // Allocate output tensor for uv gradient.
353
+ if (p.filterMode != TEX_MODE_NEAREST)
354
+ {
355
+ TensorShape grad_uv_shape;
356
+ Tensor* grad_uv_tensor = NULL;
357
+ grad_uv_shape.AddDim(p.n);
358
+ grad_uv_shape.AddDim(p.imgHeight);
359
+ grad_uv_shape.AddDim(p.imgWidth);
360
+ grad_uv_shape.AddDim(uv.dim_size(3));
361
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(1, grad_uv_shape, &grad_uv_tensor));
362
+ p.gradUV = grad_uv_tensor->flat<float>().data();
363
+
364
+ // Allocate output tensor for uv_da gradient.
365
+ if (p.filterMode == TEX_MODE_LINEAR_MIPMAP_LINEAR)
366
+ {
367
+ Tensor* grad_uv_da_tensor = NULL;
368
+ grad_uv_shape.set_dim(3, uv_da.dim_size(3));
369
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(2, grad_uv_shape, &grad_uv_da_tensor));
370
+ p.gradUVDA = grad_uv_da_tensor->flat<float>().data();
371
+ }
372
+ }
373
+
374
+ // Choose kernel variants based on channel count.
375
+ int channel_div_idx = 0;
376
+ if (!(p.channels & 3))
377
+ channel_div_idx = 2; // Channel count divisible by 4.
378
+ else if (!(p.channels & 1))
379
+ channel_div_idx = 1; // Channel count divisible by 2.
380
+
381
+ // Mip-related setup.
382
+ Tensor grad_mip_tensor;
383
+ float* pgradMip = 0;
384
+ if (p.enableMip)
385
+ {
386
+ // Generate mip offsets.
387
+ int mipOffsets[TEX_MAX_MIP_LEVEL];
388
+ int mipTotal = calculateMipInfo(ctx, p, mipOffsets);
389
+
390
+ // Get space for temporary mip gradients.
391
+ TensorShape grad_mip_shape;
392
+ grad_mip_shape.AddDim(mipTotal);
393
+ ctx->allocate_temp(DT_FLOAT, grad_mip_shape, &grad_mip_tensor);
394
+ pgradMip = grad_mip_tensor.flat<float>().data();
395
+ for (int i=1; i <= p.mipLevelMax; i++)
396
+ {
397
+ p.tex[i] = pmip + mipOffsets[i]; // Pointers to mip levels.
398
+ p.gradTex[i] = pgradMip + mipOffsets[i]; // Pointers to mip gradients.
399
+ }
400
+
401
+ // Clear mip gradients.
402
+ OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(pgradMip, 0, mipTotal * sizeof(float), stream));
403
+ }
404
+
405
+ // Initialize texture gradients to zero.
406
+ int texBytes = p.texHeight * p.texWidth * p.texDepth * p.channels * sizeof(float);
407
+ if (cube_mode)
408
+ texBytes *= 6;
409
+ OP_CHECK_CUDA_ERROR(ctx, cudaMemsetAsync(p.gradTex[0], 0, texBytes, stream));
410
+
411
+ // Verify that buffers are aligned to allow float2/float4 operations. Unused pointers are zero so always aligned.
412
+ if (!cube_mode)
413
+ {
414
+ OP_REQUIRES(ctx, !((uintptr_t)p.uv & 7), errors::Internal("uv input tensor not aligned to float2"));
415
+ OP_REQUIRES(ctx, !((uintptr_t)p.gradUV & 7), errors::Internal("grad_uv output tensor not aligned to float2"));
416
+ OP_REQUIRES(ctx, !((uintptr_t)p.uvDA & 15), errors::Internal("uv_da input tensor not aligned to float4"));
417
+ OP_REQUIRES(ctx, !((uintptr_t)p.gradUVDA & 15), errors::Internal("grad_uv_da output tensor not aligned to float4"));
418
+ }
419
+ else
420
+ {
421
+ OP_REQUIRES(ctx, !((uintptr_t)p.uvDA & 7), errors::Internal("uv_da input tensor not aligned to float2"));
422
+ OP_REQUIRES(ctx, !((uintptr_t)p.gradUVDA & 7), errors::Internal("grad_uv_da output tensor not aligned to float2"));
423
+ }
424
+ if ((p.channels & 3) == 0)
425
+ {
426
+ OP_REQUIRES(ctx, !((uintptr_t)p.tex[0] & 15), errors::Internal("tex input tensor not aligned to float4"));
427
+ OP_REQUIRES(ctx, !((uintptr_t)p.gradTex[0] & 15), errors::Internal("grad_tex output tensor not aligned to float4"));
428
+ OP_REQUIRES(ctx, !((uintptr_t)p.dy & 15), errors::Internal("dy input tensor not aligned to float4"));
429
+ OP_REQUIRES(ctx, !((uintptr_t)pmip & 15), errors::Internal("mip input tensor not aligned to float4"));
430
+ OP_REQUIRES(ctx, !((uintptr_t)pgradMip & 15), errors::Internal("internal mip gradient tensor not aligned to float4"));
431
+ }
432
+ if ((p.channels & 1) == 0)
433
+ {
434
+ OP_REQUIRES(ctx, !((uintptr_t)p.tex[0] & 7), errors::Internal("tex input tensor not aligned to float2"));
435
+ OP_REQUIRES(ctx, !((uintptr_t)p.gradTex[0] & 7), errors::Internal("grad_tex output tensor not aligned to float2"));
436
+ OP_REQUIRES(ctx, !((uintptr_t)p.dy & 7), errors::Internal("dy output tensor not aligned to float2"));
437
+ OP_REQUIRES(ctx, !((uintptr_t)pmip & 7), errors::Internal("mip input tensor not aligned to float2"));
438
+ OP_REQUIRES(ctx, !((uintptr_t)pgradMip & 7), errors::Internal("internal mip gradient tensor not aligned to float2"));
439
+ }
440
+
441
+ // Choose launch parameters for main gradient kernel.
442
+ void* args[] = {&p};
443
+ dim3 blockSize = getLaunchBlockSize(TEX_GRAD_MAX_KERNEL_BLOCK_WIDTH, TEX_GRAD_MAX_KERNEL_BLOCK_HEIGHT, p.imgWidth, p.imgHeight);
444
+ dim3 gridSize = getLaunchGridSize(blockSize, p.imgWidth, p.imgHeight, p.n);
445
+
446
+ void* func_tbl[TEX_MODE_COUNT * 2] = {
447
+ (void*)TextureGradKernelNearest,
448
+ (void*)TextureGradKernelLinear,
449
+ (void*)TextureGradKernelLinearMipmapNearest,
450
+ (void*)TextureGradKernelLinearMipmapLinear,
451
+ (void*)TextureGradKernelCubeNearest,
452
+ (void*)TextureGradKernelCubeLinear,
453
+ (void*)TextureGradKernelCubeLinearMipmapNearest,
454
+ (void*)TextureGradKernelCubeLinearMipmapLinear,
455
+ };
456
+
457
+ // Function index.
458
+ int func_idx = p.filterMode;
459
+ if (cube_mode)
460
+ func_idx += TEX_MODE_COUNT;
461
+
462
+ // Launch main gradient kernel.
463
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(func_tbl[func_idx], gridSize, blockSize, args, 0, stream));
464
+
465
+ // Launch kernel to pull gradients from mip levels.
466
+ if (p.enableMip)
467
+ {
468
+ dim3 blockSize = getLaunchBlockSize(TEX_GRAD_MAX_MIP_KERNEL_BLOCK_WIDTH, TEX_GRAD_MAX_MIP_KERNEL_BLOCK_HEIGHT, p.texWidth, p.texHeight);
469
+ dim3 gridSize = getLaunchGridSize(blockSize, p.texWidth, p.texHeight, p.texDepth * (cube_mode ? 6 : 1));
470
+ int sharedBytes = blockSize.x * blockSize.y * p.channels * sizeof(float);
471
+
472
+ void* mip_grad_func_tbl[3] = { (void*)MipGradKernel1, (void*)MipGradKernel2, (void*)MipGradKernel4 };
473
+ OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel(mip_grad_func_tbl[channel_div_idx], gridSize, blockSize, args, sharedBytes, stream));
474
+ }
475
+ }
476
+ };
477
+
478
+ REGISTER_OP("TextureGradNearest")
479
+ .Input ("tex: float")
480
+ .Input ("uv: float")
481
+ .Input ("dy: float")
482
+ .Output ("grad_tex: float")
483
+ .Attr ("filter_mode: int")
484
+ .Attr ("boundary_mode: int");
485
+
486
+ REGISTER_OP("TextureGradLinear")
487
+ .Input ("tex: float")
488
+ .Input ("uv: float")
489
+ .Input ("dy: float")
490
+ .Output ("grad_tex: float")
491
+ .Output ("grad_uv: float")
492
+ .Attr ("filter_mode: int")
493
+ .Attr ("boundary_mode: int");
494
+
495
+ REGISTER_OP("TextureGradLinearMipmapNearest")
496
+ .Input ("tex: float")
497
+ .Input ("uv: float")
498
+ .Input ("dy: float")
499
+ .Input ("uv_da: float")
500
+ .Input ("mip: float")
501
+ .Output ("grad_tex: float")
502
+ .Output ("grad_uv: float")
503
+ .Attr ("filter_mode: int")
504
+ .Attr ("boundary_mode: int")
505
+ .Attr ("max_mip_level: int");
506
+
507
+ REGISTER_OP("TextureGradLinearMipmapLinear")
508
+ .Input ("tex: float")
509
+ .Input ("uv: float")
510
+ .Input ("dy: float")
511
+ .Input ("uv_da: float")
512
+ .Input ("mip: float")
513
+ .Output ("grad_tex: float")
514
+ .Output ("grad_uv: float")
515
+ .Output ("grad_uv_da: float")
516
+ .Attr ("filter_mode: int")
517
+ .Attr ("boundary_mode: int")
518
+ .Attr ("max_mip_level: int");
519
+
520
+ REGISTER_KERNEL_BUILDER(Name("TextureGradNearest") .Device(DEVICE_GPU), TextureGradOp);
521
+ REGISTER_KERNEL_BUILDER(Name("TextureGradLinear") .Device(DEVICE_GPU), TextureGradOp);
522
+ REGISTER_KERNEL_BUILDER(Name("TextureGradLinearMipmapNearest").Device(DEVICE_GPU), TextureGradOp);
523
+ REGISTER_KERNEL_BUILDER(Name("TextureGradLinearMipmapLinear") .Device(DEVICE_GPU), TextureGradOp);
524
+
525
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/torch/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ from .ops import RasterizeCudaContext, RasterizeGLContext, get_log_level, set_log_level, rasterize, DepthPeeler, interpolate, texture, texture_construct_mip, antialias, antialias_construct_topology_hash
10
+ __all__ = ["RasterizeCudaContext", "RasterizeGLContext", "get_log_level", "set_log_level", "rasterize", "DepthPeeler", "interpolate", "texture", "texture_construct_mip", "antialias", "antialias_construct_topology_hash"]
extensions/nvdiffrast/nvdiffrast/torch/ops.py ADDED
@@ -0,0 +1,734 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ # and proprietary rights in and to this software, related documentation
5
+ # and any modifications thereto. Any use, reproduction, disclosure or
6
+ # distribution of this software and related documentation without an express
7
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ import importlib
10
+ import logging
11
+ import numpy as np
12
+ import os
13
+ import torch
14
+ import torch.utils.cpp_extension
15
+ from . import _C
16
+
17
+ #----------------------------------------------------------------------------
18
+ # C++/Cuda plugin compiler/loader.
19
+
20
+ _cached_plugin = {}
21
+ def _get_plugin(gl=False):
22
+ assert isinstance(gl, bool)
23
+
24
+ # Modified with precompiled torch CUDA extension
25
+ if not gl:
26
+ return _C
27
+
28
+ # Return cached plugin if already loaded.
29
+ if _cached_plugin.get(gl, None) is not None:
30
+ return _cached_plugin[gl]
31
+
32
+ # Make sure we can find the necessary compiler and libary binaries.
33
+ if os.name == 'nt':
34
+ lib_dir = os.path.dirname(__file__) + r"\..\lib"
35
+ def find_cl_path():
36
+ import glob
37
+ def get_sort_key(x):
38
+ # Primary criterion is VS version, secondary is edition, third is internal MSVC version.
39
+ x = x.split('\\')[3:]
40
+ x[1] = {'BuildTools': '~0', 'Community': '~1', 'Pro': '~2', 'Professional': '~3', 'Enterprise': '~4'}.get(x[1], x[1])
41
+ return x
42
+ vs_relative_path = r"\Microsoft Visual Studio\*\*\VC\Tools\MSVC\*\bin\Hostx64\x64"
43
+ paths = glob.glob(r"C:\Program Files" + vs_relative_path)
44
+ paths += glob.glob(r"C:\Program Files (x86)" + vs_relative_path)
45
+ if paths:
46
+ return sorted(paths, key=get_sort_key)[-1]
47
+
48
+ # If cl.exe is not on path, try to find it.
49
+ if os.system("where cl.exe >nul 2>nul") != 0:
50
+ cl_path = find_cl_path()
51
+ if cl_path is None:
52
+ raise RuntimeError("Could not locate a supported Microsoft Visual C++ installation")
53
+ os.environ['PATH'] += ';' + cl_path
54
+
55
+ # Compiler options.
56
+ common_opts = ['-DNVDR_TORCH']
57
+ cc_opts = []
58
+ if os.name == 'nt':
59
+ cc_opts += ['/wd4067', '/wd4624'] # Disable warnings in torch headers.
60
+
61
+ # Linker options for the GL-interfacing plugin.
62
+ ldflags = []
63
+ if gl:
64
+ if os.name == 'posix':
65
+ ldflags = ['-lGL', '-lEGL']
66
+ elif os.name == 'nt':
67
+ libs = ['gdi32', 'opengl32', 'user32', 'setgpu']
68
+ ldflags = ['/LIBPATH:' + lib_dir] + ['/DEFAULTLIB:' + x for x in libs]
69
+
70
+ # List of source files.
71
+ if gl:
72
+ source_files = [
73
+ '../common/common.cpp',
74
+ '../common/glutil.cpp',
75
+ '../common/rasterize_gl.cpp',
76
+ 'torch_bindings_gl.cpp',
77
+ 'torch_rasterize_gl.cpp',
78
+ ]
79
+ else:
80
+ source_files = [
81
+ '../common/cudaraster/impl/Buffer.cpp',
82
+ '../common/cudaraster/impl/CudaRaster.cpp',
83
+ '../common/cudaraster/impl/RasterImpl.cu',
84
+ '../common/cudaraster/impl/RasterImpl.cpp',
85
+ '../common/common.cpp',
86
+ '../common/rasterize.cu',
87
+ '../common/interpolate.cu',
88
+ '../common/texture.cu',
89
+ '../common/texture.cpp',
90
+ '../common/antialias.cu',
91
+ 'torch_bindings.cpp',
92
+ 'torch_rasterize.cpp',
93
+ 'torch_interpolate.cpp',
94
+ 'torch_texture.cpp',
95
+ 'torch_antialias.cpp',
96
+ ]
97
+
98
+ # Some containers set this to contain old architectures that won't compile. We only need the one installed in the machine.
99
+ os.environ['TORCH_CUDA_ARCH_LIST'] = ''
100
+
101
+ # On Linux, show a warning if GLEW is being forcibly loaded when compiling the GL plugin.
102
+ if gl and (os.name == 'posix') and ('libGLEW' in os.environ.get('LD_PRELOAD', '')):
103
+ logging.getLogger('nvdiffrast').warning("Warning: libGLEW is being loaded via LD_PRELOAD, and will probably conflict with the OpenGL plugin")
104
+
105
+ # Try to detect if a stray lock file is left in cache directory and show a warning. This sometimes happens on Windows if the build is interrupted at just the right moment.
106
+ plugin_name = 'nvdiffrast_plugin' + ('_gl' if gl else '')
107
+ try:
108
+ lock_fn = os.path.join(torch.utils.cpp_extension._get_build_directory(plugin_name, False), 'lock')
109
+ if os.path.exists(lock_fn):
110
+ logging.getLogger('nvdiffrast').warning("Lock file exists in build directory: '%s'" % lock_fn)
111
+ except:
112
+ pass
113
+
114
+ # Speed up compilation on Windows.
115
+ if os.name == 'nt':
116
+ # Skip telemetry sending step in vcvarsall.bat
117
+ os.environ['VSCMD_SKIP_SENDTELEMETRY'] = '1'
118
+
119
+ # Opportunistically patch distutils to cache MSVC environments.
120
+ try:
121
+ import distutils._msvccompiler
122
+ import functools
123
+ if not hasattr(distutils._msvccompiler._get_vc_env, '__wrapped__'):
124
+ distutils._msvccompiler._get_vc_env = functools.lru_cache()(distutils._msvccompiler._get_vc_env)
125
+ except:
126
+ pass
127
+
128
+ # Compile and load.
129
+ source_paths = [os.path.join(os.path.dirname(__file__), fn) for fn in source_files]
130
+ torch.utils.cpp_extension.load(name=plugin_name, sources=source_paths, extra_cflags=common_opts+cc_opts, extra_cuda_cflags=common_opts+['-lineinfo'], extra_ldflags=ldflags, with_cuda=True, verbose=False)
131
+
132
+ # Import, cache, and return the compiled module.
133
+ _cached_plugin[gl] = importlib.import_module(plugin_name)
134
+ return _cached_plugin[gl]
135
+
136
+ #----------------------------------------------------------------------------
137
+ # Log level.
138
+ #----------------------------------------------------------------------------
139
+
140
+ def get_log_level():
141
+ '''Get current log level.
142
+
143
+ Returns:
144
+ Current log level in nvdiffrast. See `set_log_level()` for possible values.
145
+ '''
146
+ return _get_plugin().get_log_level()
147
+
148
+ def set_log_level(level):
149
+ '''Set log level.
150
+
151
+ Log levels follow the convention on the C++ side of Torch:
152
+ 0 = Info,
153
+ 1 = Warning,
154
+ 2 = Error,
155
+ 3 = Fatal.
156
+ The default log level is 1.
157
+
158
+ Args:
159
+ level: New log level as integer. Internal nvdiffrast messages of this
160
+ severity or higher will be printed, while messages of lower
161
+ severity will be silent.
162
+ '''
163
+ _get_plugin().set_log_level(level)
164
+
165
+ #----------------------------------------------------------------------------
166
+ # CudaRaster state wrapper.
167
+ #----------------------------------------------------------------------------
168
+
169
+ class RasterizeCudaContext:
170
+ def __init__(self, device=None):
171
+ '''Create a new Cuda rasterizer context.
172
+
173
+ The context is deleted and internal storage is released when the object is
174
+ destroyed.
175
+
176
+ Args:
177
+ device (Optional): Cuda device on which the context is created. Type can be
178
+ `torch.device`, string (e.g., `'cuda:1'`), or int. If not
179
+ specified, context will be created on currently active Cuda
180
+ device.
181
+ Returns:
182
+ The newly created Cuda rasterizer context.
183
+ '''
184
+ if device is None:
185
+ cuda_device_idx = torch.cuda.current_device()
186
+ else:
187
+ with torch.cuda.device(device):
188
+ cuda_device_idx = torch.cuda.current_device()
189
+ self.cpp_wrapper = _get_plugin().RasterizeCRStateWrapper(cuda_device_idx)
190
+ self.output_db = True
191
+ self.active_depth_peeler = None
192
+
193
+ #----------------------------------------------------------------------------
194
+ # GL state wrapper.
195
+ #----------------------------------------------------------------------------
196
+
197
+ class RasterizeGLContext:
198
+ def __init__(self, output_db=True, mode='automatic', device=None):
199
+ '''Create a new OpenGL rasterizer context.
200
+
201
+ Creating an OpenGL context is a slow operation so you should usually reuse the same
202
+ context in all calls to `rasterize()` on the same CPU thread. The OpenGL context
203
+ is deleted when the object is destroyed.
204
+
205
+ Side note: When using the OpenGL context in a rasterization operation, the
206
+ context's internal framebuffer object is automatically enlarged to accommodate the
207
+ rasterization operation's output shape, but it is never shrunk in size until the
208
+ context is destroyed. Thus, if you need to rasterize, say, deep low-resolution
209
+ tensors and also shallow high-resolution tensors, you can conserve GPU memory by
210
+ creating two separate OpenGL contexts for these tasks. In this scenario, using the
211
+ same OpenGL context for both tasks would end up reserving GPU memory for a deep,
212
+ high-resolution output tensor.
213
+
214
+ Args:
215
+ output_db (bool): Compute and output image-space derivates of barycentrics.
216
+ mode: OpenGL context handling mode. Valid values are 'manual' and 'automatic'.
217
+ device (Optional): Cuda device on which the context is created. Type can be
218
+ `torch.device`, string (e.g., `'cuda:1'`), or int. If not
219
+ specified, context will be created on currently active Cuda
220
+ device.
221
+ Returns:
222
+ The newly created OpenGL rasterizer context.
223
+ '''
224
+ assert output_db is True or output_db is False
225
+ assert mode in ['automatic', 'manual']
226
+ self.output_db = output_db
227
+ self.mode = mode
228
+ if device is None:
229
+ cuda_device_idx = torch.cuda.current_device()
230
+ else:
231
+ with torch.cuda.device(device):
232
+ cuda_device_idx = torch.cuda.current_device()
233
+ self.cpp_wrapper = _get_plugin(gl=True).RasterizeGLStateWrapper(output_db, mode == 'automatic', cuda_device_idx)
234
+ self.active_depth_peeler = None # For error checking only.
235
+
236
+ def set_context(self):
237
+ '''Set (activate) OpenGL context in the current CPU thread.
238
+ Only available if context was created in manual mode.
239
+ '''
240
+ assert self.mode == 'manual'
241
+ self.cpp_wrapper.set_context()
242
+
243
+ def release_context(self):
244
+ '''Release (deactivate) currently active OpenGL context.
245
+ Only available if context was created in manual mode.
246
+ '''
247
+ assert self.mode == 'manual'
248
+ self.cpp_wrapper.release_context()
249
+
250
+ #----------------------------------------------------------------------------
251
+ # Rasterize.
252
+ #----------------------------------------------------------------------------
253
+
254
+ class _rasterize_func(torch.autograd.Function):
255
+ @staticmethod
256
+ def forward(ctx, raster_ctx, pos, tri, resolution, ranges, grad_db, peeling_idx):
257
+ if isinstance(raster_ctx, RasterizeGLContext):
258
+ out, out_db = _get_plugin(gl=True).rasterize_fwd_gl(raster_ctx.cpp_wrapper, pos, tri, resolution, ranges, peeling_idx)
259
+ else:
260
+ out, out_db = _get_plugin().rasterize_fwd_cuda(raster_ctx.cpp_wrapper, pos, tri, resolution, ranges, peeling_idx)
261
+ ctx.save_for_backward(pos, tri, out)
262
+ ctx.saved_grad_db = grad_db
263
+ return out, out_db
264
+
265
+ @staticmethod
266
+ def backward(ctx, dy, ddb):
267
+ pos, tri, out = ctx.saved_tensors
268
+ if ctx.saved_grad_db:
269
+ g_pos = _get_plugin().rasterize_grad_db(pos, tri, out, dy, ddb)
270
+ else:
271
+ g_pos = _get_plugin().rasterize_grad(pos, tri, out, dy)
272
+ return None, g_pos, None, None, None, None, None
273
+
274
+ # Op wrapper.
275
+ def rasterize(glctx, pos, tri, resolution, ranges=None, grad_db=True):
276
+ '''Rasterize triangles.
277
+
278
+ All input tensors must be contiguous and reside in GPU memory except for
279
+ the `ranges` tensor that, if specified, has to reside in CPU memory. The
280
+ output tensors will be contiguous and reside in GPU memory.
281
+
282
+ Args:
283
+ glctx: Rasterizer context of type `RasterizeGLContext` or `RasterizeCudaContext`.
284
+ pos: Vertex position tensor with dtype `torch.float32`. To enable range
285
+ mode, this tensor should have a 2D shape [num_vertices, 4]. To enable
286
+ instanced mode, use a 3D shape [minibatch_size, num_vertices, 4].
287
+ tri: Triangle tensor with shape [num_triangles, 3] and dtype `torch.int32`.
288
+ resolution: Output resolution as integer tuple (height, width).
289
+ ranges: In range mode, tensor with shape [minibatch_size, 2] and dtype
290
+ `torch.int32`, specifying start indices and counts into `tri`.
291
+ Ignored in instanced mode.
292
+ grad_db: Propagate gradients of image-space derivatives of barycentrics
293
+ into `pos` in backward pass. Ignored if using an OpenGL context that
294
+ was not configured to output image-space derivatives.
295
+
296
+ Returns:
297
+ A tuple of two tensors. The first output tensor has shape [minibatch_size,
298
+ height, width, 4] and contains the main rasterizer output in order (u, v, z/w,
299
+ triangle_id). If the OpenGL context was configured to output image-space
300
+ derivatives of barycentrics, the second output tensor will also have shape
301
+ [minibatch_size, height, width, 4] and contain said derivatives in order
302
+ (du/dX, du/dY, dv/dX, dv/dY). Otherwise it will be an empty tensor with shape
303
+ [minibatch_size, height, width, 0].
304
+ '''
305
+ assert isinstance(glctx, (RasterizeGLContext, RasterizeCudaContext))
306
+ assert grad_db is True or grad_db is False
307
+ grad_db = grad_db and glctx.output_db
308
+
309
+ # Sanitize inputs.
310
+ assert isinstance(pos, torch.Tensor) and isinstance(tri, torch.Tensor)
311
+ resolution = tuple(resolution)
312
+ if ranges is None:
313
+ ranges = torch.empty(size=(0, 2), dtype=torch.int32, device='cpu')
314
+ else:
315
+ assert isinstance(ranges, torch.Tensor)
316
+
317
+ # Check that context is not currently reserved for depth peeling.
318
+ if glctx.active_depth_peeler is not None:
319
+ return RuntimeError("Cannot call rasterize() during depth peeling operation, use rasterize_next_layer() instead")
320
+
321
+ # Instantiate the function.
322
+ return _rasterize_func.apply(glctx, pos, tri, resolution, ranges, grad_db, -1)
323
+
324
+ #----------------------------------------------------------------------------
325
+ # Depth peeler context manager for rasterizing multiple depth layers.
326
+ #----------------------------------------------------------------------------
327
+
328
+ class DepthPeeler:
329
+ def __init__(self, glctx, pos, tri, resolution, ranges=None, grad_db=True):
330
+ '''Create a depth peeler object for rasterizing multiple depth layers.
331
+
332
+ Arguments are the same as in `rasterize()`.
333
+
334
+ Returns:
335
+ The newly created depth peeler.
336
+ '''
337
+ assert isinstance(glctx, (RasterizeGLContext, RasterizeCudaContext))
338
+ assert grad_db is True or grad_db is False
339
+ grad_db = grad_db and glctx.output_db
340
+
341
+ # Sanitize inputs as usual.
342
+ assert isinstance(pos, torch.Tensor) and isinstance(tri, torch.Tensor)
343
+ resolution = tuple(resolution)
344
+ if ranges is None:
345
+ ranges = torch.empty(size=(0, 2), dtype=torch.int32, device='cpu')
346
+ else:
347
+ assert isinstance(ranges, torch.Tensor)
348
+
349
+ # Store all the parameters.
350
+ self.raster_ctx = glctx
351
+ self.pos = pos
352
+ self.tri = tri
353
+ self.resolution = resolution
354
+ self.ranges = ranges
355
+ self.grad_db = grad_db
356
+ self.peeling_idx = None
357
+
358
+ def __enter__(self):
359
+ if self.raster_ctx is None:
360
+ raise RuntimeError("Cannot re-enter a terminated depth peeling operation")
361
+ if self.raster_ctx.active_depth_peeler is not None:
362
+ raise RuntimeError("Cannot have multiple depth peelers active simultaneously in a rasterization context")
363
+ self.raster_ctx.active_depth_peeler = self
364
+ self.peeling_idx = 0
365
+ return self
366
+
367
+ def __exit__(self, *args):
368
+ assert self.raster_ctx.active_depth_peeler is self
369
+ self.raster_ctx.active_depth_peeler = None
370
+ self.raster_ctx = None # Remove all references to input tensor so they're not left dangling.
371
+ self.pos = None
372
+ self.tri = None
373
+ self.resolution = None
374
+ self.ranges = None
375
+ self.grad_db = None
376
+ self.peeling_idx = None
377
+ return None
378
+
379
+ def rasterize_next_layer(self):
380
+ '''Rasterize next depth layer.
381
+
382
+ Operation is equivalent to `rasterize()` except that previously reported
383
+ surface points are culled away.
384
+
385
+ Returns:
386
+ A tuple of two tensors as in `rasterize()`.
387
+ '''
388
+ assert self.raster_ctx.active_depth_peeler is self
389
+ assert self.peeling_idx >= 0
390
+ result = _rasterize_func.apply(self.raster_ctx, self.pos, self.tri, self.resolution, self.ranges, self.grad_db, self.peeling_idx)
391
+ self.peeling_idx += 1
392
+ return result
393
+
394
+ #----------------------------------------------------------------------------
395
+ # Interpolate.
396
+ #----------------------------------------------------------------------------
397
+
398
+ # Output pixel differentials for at least some attributes.
399
+ class _interpolate_func_da(torch.autograd.Function):
400
+ @staticmethod
401
+ def forward(ctx, attr, rast, tri, rast_db, diff_attrs_all, diff_attrs_list):
402
+ out, out_da = _get_plugin().interpolate_fwd_da(attr, rast, tri, rast_db, diff_attrs_all, diff_attrs_list)
403
+ ctx.save_for_backward(attr, rast, tri, rast_db)
404
+ ctx.saved_misc = diff_attrs_all, diff_attrs_list
405
+ return out, out_da
406
+
407
+ @staticmethod
408
+ def backward(ctx, dy, dda):
409
+ attr, rast, tri, rast_db = ctx.saved_tensors
410
+ diff_attrs_all, diff_attrs_list = ctx.saved_misc
411
+ g_attr, g_rast, g_rast_db = _get_plugin().interpolate_grad_da(attr, rast, tri, dy, rast_db, dda, diff_attrs_all, diff_attrs_list)
412
+ return g_attr, g_rast, None, g_rast_db, None, None
413
+
414
+ # No pixel differential for any attribute.
415
+ class _interpolate_func(torch.autograd.Function):
416
+ @staticmethod
417
+ def forward(ctx, attr, rast, tri):
418
+ out, out_da = _get_plugin().interpolate_fwd(attr, rast, tri)
419
+ ctx.save_for_backward(attr, rast, tri)
420
+ return out, out_da
421
+
422
+ @staticmethod
423
+ def backward(ctx, dy, _):
424
+ attr, rast, tri = ctx.saved_tensors
425
+ g_attr, g_rast = _get_plugin().interpolate_grad(attr, rast, tri, dy)
426
+ return g_attr, g_rast, None
427
+
428
+ # Op wrapper.
429
+ def interpolate(attr, rast, tri, rast_db=None, diff_attrs=None):
430
+ """Interpolate vertex attributes.
431
+
432
+ All input tensors must be contiguous and reside in GPU memory. The output tensors
433
+ will be contiguous and reside in GPU memory.
434
+
435
+ Args:
436
+ attr: Attribute tensor with dtype `torch.float32`.
437
+ Shape is [num_vertices, num_attributes] in range mode, or
438
+ [minibatch_size, num_vertices, num_attributes] in instanced mode.
439
+ Broadcasting is supported along the minibatch axis.
440
+ rast: Main output tensor from `rasterize()`.
441
+ tri: Triangle tensor with shape [num_triangles, 3] and dtype `torch.int32`.
442
+ rast_db: (Optional) Tensor containing image-space derivatives of barycentrics,
443
+ i.e., the second output tensor from `rasterize()`. Enables computing
444
+ image-space derivatives of attributes.
445
+ diff_attrs: (Optional) List of attribute indices for which image-space
446
+ derivatives are to be computed. Special value 'all' is equivalent
447
+ to list [0, 1, ..., num_attributes - 1].
448
+
449
+ Returns:
450
+ A tuple of two tensors. The first output tensor contains interpolated
451
+ attributes and has shape [minibatch_size, height, width, num_attributes].
452
+ If `rast_db` and `diff_attrs` were specified, the second output tensor contains
453
+ the image-space derivatives of the selected attributes and has shape
454
+ [minibatch_size, height, width, 2 * len(diff_attrs)]. The derivatives of the
455
+ first selected attribute A will be on channels 0 and 1 as (dA/dX, dA/dY), etc.
456
+ Otherwise, the second output tensor will be an empty tensor with shape
457
+ [minibatch_size, height, width, 0].
458
+ """
459
+ # Sanitize the list of pixel differential attributes.
460
+ if diff_attrs is None:
461
+ diff_attrs = []
462
+ elif diff_attrs != 'all':
463
+ diff_attrs = np.asarray(diff_attrs, np.int32)
464
+ assert len(diff_attrs.shape) == 1
465
+ diff_attrs = diff_attrs.tolist()
466
+
467
+ diff_attrs_all = int(diff_attrs == 'all')
468
+ diff_attrs_list = [] if diff_attrs_all else diff_attrs
469
+
470
+ # Check inputs.
471
+ assert all(isinstance(x, torch.Tensor) for x in (attr, rast, tri))
472
+ if diff_attrs:
473
+ assert isinstance(rast_db, torch.Tensor)
474
+
475
+ # Choose stub.
476
+ if diff_attrs:
477
+ return _interpolate_func_da.apply(attr, rast, tri, rast_db, diff_attrs_all, diff_attrs_list)
478
+ else:
479
+ return _interpolate_func.apply(attr, rast, tri)
480
+
481
+ #----------------------------------------------------------------------------
482
+ # Texture
483
+ #----------------------------------------------------------------------------
484
+
485
+ # Linear-mipmap-linear and linear-mipmap-nearest: Mipmaps enabled.
486
+ class _texture_func_mip(torch.autograd.Function):
487
+ @staticmethod
488
+ def forward(ctx, filter_mode, tex, uv, uv_da, mip_level_bias, mip_wrapper, filter_mode_enum, boundary_mode_enum, *mip_stack):
489
+ empty = torch.tensor([])
490
+ if uv_da is None:
491
+ uv_da = empty
492
+ if mip_level_bias is None:
493
+ mip_level_bias = empty
494
+ if mip_wrapper is None:
495
+ mip_wrapper = _get_plugin().TextureMipWrapper()
496
+ out = _get_plugin().texture_fwd_mip(tex, uv, uv_da, mip_level_bias, mip_wrapper, mip_stack, filter_mode_enum, boundary_mode_enum)
497
+ ctx.save_for_backward(tex, uv, uv_da, mip_level_bias, *mip_stack)
498
+ ctx.saved_misc = filter_mode, mip_wrapper, filter_mode_enum, boundary_mode_enum
499
+ return out
500
+
501
+ @staticmethod
502
+ def backward(ctx, dy):
503
+ tex, uv, uv_da, mip_level_bias, *mip_stack = ctx.saved_tensors
504
+ filter_mode, mip_wrapper, filter_mode_enum, boundary_mode_enum = ctx.saved_misc
505
+ if filter_mode == 'linear-mipmap-linear':
506
+ g_tex, g_uv, g_uv_da, g_mip_level_bias, g_mip_stack = _get_plugin().texture_grad_linear_mipmap_linear(tex, uv, dy, uv_da, mip_level_bias, mip_wrapper, mip_stack, filter_mode_enum, boundary_mode_enum)
507
+ return (None, g_tex, g_uv, g_uv_da, g_mip_level_bias, None, None, None) + tuple(g_mip_stack)
508
+ else: # linear-mipmap-nearest
509
+ g_tex, g_uv, g_mip_stack = _get_plugin().texture_grad_linear_mipmap_nearest(tex, uv, dy, uv_da, mip_level_bias, mip_wrapper, mip_stack, filter_mode_enum, boundary_mode_enum)
510
+ return (None, g_tex, g_uv, None, None, None, None, None) + tuple(g_mip_stack)
511
+
512
+ # Linear and nearest: Mipmaps disabled.
513
+ class _texture_func(torch.autograd.Function):
514
+ @staticmethod
515
+ def forward(ctx, filter_mode, tex, uv, filter_mode_enum, boundary_mode_enum):
516
+ out = _get_plugin().texture_fwd(tex, uv, filter_mode_enum, boundary_mode_enum)
517
+ ctx.save_for_backward(tex, uv)
518
+ ctx.saved_misc = filter_mode, filter_mode_enum, boundary_mode_enum
519
+ return out
520
+
521
+ @staticmethod
522
+ def backward(ctx, dy):
523
+ tex, uv = ctx.saved_tensors
524
+ filter_mode, filter_mode_enum, boundary_mode_enum = ctx.saved_misc
525
+ if filter_mode == 'linear':
526
+ g_tex, g_uv = _get_plugin().texture_grad_linear(tex, uv, dy, filter_mode_enum, boundary_mode_enum)
527
+ return None, g_tex, g_uv, None, None
528
+ else: # nearest
529
+ g_tex = _get_plugin().texture_grad_nearest(tex, uv, dy, filter_mode_enum, boundary_mode_enum)
530
+ return None, g_tex, None, None, None
531
+
532
+ # Op wrapper.
533
+ def texture(tex, uv, uv_da=None, mip_level_bias=None, mip=None, filter_mode='auto', boundary_mode='wrap', max_mip_level=None):
534
+ """Perform texture sampling.
535
+
536
+ All input tensors must be contiguous and reside in GPU memory. The output tensor
537
+ will be contiguous and reside in GPU memory.
538
+
539
+ Args:
540
+ tex: Texture tensor with dtype `torch.float32`. For 2D textures, must have shape
541
+ [minibatch_size, tex_height, tex_width, tex_channels]. For cube map textures,
542
+ must have shape [minibatch_size, 6, tex_height, tex_width, tex_channels] where
543
+ tex_width and tex_height are equal. Note that `boundary_mode` must also be set
544
+ to 'cube' to enable cube map mode. Broadcasting is supported along the minibatch axis.
545
+ uv: Tensor containing per-pixel texture coordinates. When sampling a 2D texture,
546
+ must have shape [minibatch_size, height, width, 2]. When sampling a cube map
547
+ texture, must have shape [minibatch_size, height, width, 3].
548
+ uv_da: (Optional) Tensor containing image-space derivatives of texture coordinates.
549
+ Must have same shape as `uv` except for the last dimension that is to be twice
550
+ as long.
551
+ mip_level_bias: (Optional) Per-pixel bias for mip level selection. If `uv_da` is omitted,
552
+ determines mip level directly. Must have shape [minibatch_size, height, width].
553
+ mip: (Optional) Preconstructed mipmap stack from a `texture_construct_mip()` call, or a list
554
+ of tensors specifying a custom mipmap stack. When specifying a custom mipmap stack,
555
+ the tensors in the list must follow the same format as `tex` except for width and
556
+ height that must follow the usual rules for mipmap sizes. The base level texture
557
+ is still supplied in `tex` and must not be included in the list. Gradients of a
558
+ custom mipmap stack are not automatically propagated to base texture but the mipmap
559
+ tensors will receive gradients of their own. If a mipmap stack is not specified
560
+ but the chosen filter mode requires it, the mipmap stack is constructed internally
561
+ and discarded afterwards.
562
+ filter_mode: Texture filtering mode to be used. Valid values are 'auto', 'nearest',
563
+ 'linear', 'linear-mipmap-nearest', and 'linear-mipmap-linear'. Mode 'auto'
564
+ selects 'linear' if neither `uv_da` or `mip_level_bias` is specified, and
565
+ 'linear-mipmap-linear' when at least one of them is specified, these being
566
+ the highest-quality modes possible depending on the availability of the
567
+ image-space derivatives of the texture coordinates or direct mip level information.
568
+ boundary_mode: Valid values are 'wrap', 'clamp', 'zero', and 'cube'. If `tex` defines a
569
+ cube map, this must be set to 'cube'. The default mode 'wrap' takes fractional
570
+ part of texture coordinates. Mode 'clamp' clamps texture coordinates to the
571
+ centers of the boundary texels. Mode 'zero' virtually extends the texture with
572
+ all-zero values in all directions.
573
+ max_mip_level: If specified, limits the number of mipmaps constructed and used in mipmap-based
574
+ filter modes.
575
+
576
+ Returns:
577
+ A tensor containing the results of the texture sampling with shape
578
+ [minibatch_size, height, width, tex_channels]. Cube map fetches with invalid uv coordinates
579
+ (e.g., zero vectors) output all zeros and do not propagate gradients.
580
+ """
581
+
582
+ # Default filter mode.
583
+ if filter_mode == 'auto':
584
+ filter_mode = 'linear-mipmap-linear' if (uv_da is not None or mip_level_bias is not None) else 'linear'
585
+
586
+ # Sanitize inputs.
587
+ if max_mip_level is None:
588
+ max_mip_level = -1
589
+ else:
590
+ max_mip_level = int(max_mip_level)
591
+ assert max_mip_level >= 0
592
+
593
+ # Check inputs.
594
+ assert isinstance(tex, torch.Tensor) and isinstance(uv, torch.Tensor)
595
+ if 'mipmap' in filter_mode:
596
+ assert isinstance(uv_da, torch.Tensor) or isinstance(mip_level_bias, torch.Tensor)
597
+
598
+ # If mipping disabled via max level=0, we may as well use simpler filtering internally.
599
+ if max_mip_level == 0 and filter_mode in ['linear-mipmap-nearest', 'linear-mipmap-linear']:
600
+ filter_mode = 'linear'
601
+
602
+ # Convert filter mode to internal enumeration.
603
+ filter_mode_dict = {'nearest': 0, 'linear': 1, 'linear-mipmap-nearest': 2, 'linear-mipmap-linear': 3}
604
+ filter_mode_enum = filter_mode_dict[filter_mode]
605
+
606
+ # Convert boundary mode to internal enumeration.
607
+ boundary_mode_dict = {'cube': 0, 'wrap': 1, 'clamp': 2, 'zero': 3}
608
+ boundary_mode_enum = boundary_mode_dict[boundary_mode]
609
+
610
+ # Construct a mipmap if necessary.
611
+ if 'mipmap' in filter_mode:
612
+ mip_wrapper, mip_stack = None, []
613
+ if mip is not None:
614
+ assert isinstance(mip, (_get_plugin().TextureMipWrapper, list))
615
+ if isinstance(mip, list):
616
+ assert all(isinstance(x, torch.Tensor) for x in mip)
617
+ mip_stack = mip
618
+ else:
619
+ mip_wrapper = mip
620
+ else:
621
+ mip_wrapper = _get_plugin().texture_construct_mip(tex, max_mip_level, boundary_mode == 'cube')
622
+
623
+ # Choose stub.
624
+ if filter_mode == 'linear-mipmap-linear' or filter_mode == 'linear-mipmap-nearest':
625
+ return _texture_func_mip.apply(filter_mode, tex, uv, uv_da, mip_level_bias, mip_wrapper, filter_mode_enum, boundary_mode_enum, *mip_stack)
626
+ else:
627
+ return _texture_func.apply(filter_mode, tex, uv, filter_mode_enum, boundary_mode_enum)
628
+
629
+ # Mipmap precalculation for cases where the texture stays constant.
630
+ def texture_construct_mip(tex, max_mip_level=None, cube_mode=False):
631
+ """Construct a mipmap stack for a texture.
632
+
633
+ This function can be used for constructing a mipmap stack for a texture that is known to remain
634
+ constant. This avoids reconstructing it every time `texture()` is called.
635
+
636
+ Args:
637
+ tex: Texture tensor with the same constraints as in `texture()`.
638
+ max_mip_level: If specified, limits the number of mipmaps constructed.
639
+ cube_mode: Must be set to True if `tex` specifies a cube map texture.
640
+
641
+ Returns:
642
+ An opaque object containing the mipmap stack. This can be supplied in a call to `texture()`
643
+ in the `mip` argument.
644
+ """
645
+
646
+ assert isinstance(tex, torch.Tensor)
647
+ assert cube_mode is True or cube_mode is False
648
+ if max_mip_level is None:
649
+ max_mip_level = -1
650
+ else:
651
+ max_mip_level = int(max_mip_level)
652
+ assert max_mip_level >= 0
653
+ return _get_plugin().texture_construct_mip(tex, max_mip_level, cube_mode)
654
+
655
+ #----------------------------------------------------------------------------
656
+ # Antialias.
657
+ #----------------------------------------------------------------------------
658
+
659
+ class _antialias_func(torch.autograd.Function):
660
+ @staticmethod
661
+ def forward(ctx, color, rast, pos, tri, topology_hash, pos_gradient_boost):
662
+ out, work_buffer = _get_plugin().antialias_fwd(color, rast, pos, tri, topology_hash)
663
+ ctx.save_for_backward(color, rast, pos, tri)
664
+ ctx.saved_misc = pos_gradient_boost, work_buffer
665
+ return out
666
+
667
+ @staticmethod
668
+ def backward(ctx, dy):
669
+ color, rast, pos, tri = ctx.saved_tensors
670
+ pos_gradient_boost, work_buffer = ctx.saved_misc
671
+ g_color, g_pos = _get_plugin().antialias_grad(color, rast, pos, tri, dy, work_buffer)
672
+ if pos_gradient_boost != 1.0:
673
+ g_pos = g_pos * pos_gradient_boost
674
+ return g_color, None, g_pos, None, None, None
675
+
676
+ # Op wrapper.
677
+ def antialias(color, rast, pos, tri, topology_hash=None, pos_gradient_boost=1.0):
678
+ """Perform antialiasing.
679
+
680
+ All input tensors must be contiguous and reside in GPU memory. The output tensor
681
+ will be contiguous and reside in GPU memory.
682
+
683
+ Note that silhouette edge determination is based on vertex indices in the triangle
684
+ tensor. For it to work properly, a vertex belonging to multiple triangles must be
685
+ referred to using the same vertex index in each triangle. Otherwise, nvdiffrast will always
686
+ classify the adjacent edges as silhouette edges, which leads to bad performance and
687
+ potentially incorrect gradients. If you are unsure whether your data is good, check
688
+ which pixels are modified by the antialias operation and compare to the example in the
689
+ documentation.
690
+
691
+ Args:
692
+ color: Input image to antialias with shape [minibatch_size, height, width, num_channels].
693
+ rast: Main output tensor from `rasterize()`.
694
+ pos: Vertex position tensor used in the rasterization operation.
695
+ tri: Triangle tensor used in the rasterization operation.
696
+ topology_hash: (Optional) Preconstructed topology hash for the triangle tensor. If not
697
+ specified, the topology hash is constructed internally and discarded afterwards.
698
+ pos_gradient_boost: (Optional) Multiplier for gradients propagated to `pos`.
699
+
700
+ Returns:
701
+ A tensor containing the antialiased image with the same shape as `color` input tensor.
702
+ """
703
+
704
+ # Check inputs.
705
+ assert all(isinstance(x, torch.Tensor) for x in (color, rast, pos, tri))
706
+
707
+ # Construct topology hash unless provided by user.
708
+ if topology_hash is not None:
709
+ assert isinstance(topology_hash, _get_plugin().TopologyHashWrapper)
710
+ else:
711
+ topology_hash = _get_plugin().antialias_construct_topology_hash(tri)
712
+
713
+ # Instantiate the function.
714
+ return _antialias_func.apply(color, rast, pos, tri, topology_hash, pos_gradient_boost)
715
+
716
+ # Topology hash precalculation for cases where the triangle array stays constant.
717
+ def antialias_construct_topology_hash(tri):
718
+ """Construct a topology hash for a triangle tensor.
719
+
720
+ This function can be used for constructing a topology hash for a triangle tensor that is
721
+ known to remain constant. This avoids reconstructing it every time `antialias()` is called.
722
+
723
+ Args:
724
+ tri: Triangle tensor with shape [num_triangles, 3]. Must be contiguous and reside in
725
+ GPU memory.
726
+
727
+ Returns:
728
+ An opaque object containing the topology hash. This can be supplied in a call to
729
+ `antialias()` in the `topology_hash` argument.
730
+ """
731
+ assert isinstance(tri, torch.Tensor)
732
+ return _get_plugin().antialias_construct_topology_hash(tri)
733
+
734
+ #----------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/torch/torch_antialias.cpp ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "torch_common.inl"
10
+ #include "torch_types.h"
11
+ #include "../common/common.h"
12
+ #include "../common/antialias.h"
13
+
14
+ //------------------------------------------------------------------------
15
+ // Kernel prototypes.
16
+
17
+ void AntialiasFwdMeshKernel (const AntialiasKernelParams p);
18
+ void AntialiasFwdDiscontinuityKernel(const AntialiasKernelParams p);
19
+ void AntialiasFwdAnalysisKernel (const AntialiasKernelParams p);
20
+ void AntialiasGradKernel (const AntialiasKernelParams p);
21
+
22
+ //------------------------------------------------------------------------
23
+ // Topology hash construction.
24
+
25
+ TopologyHashWrapper antialias_construct_topology_hash(torch::Tensor tri)
26
+ {
27
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(tri));
28
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
29
+ AntialiasKernelParams p = {}; // Initialize all fields to zero.
30
+
31
+ // Check inputs.
32
+ NVDR_CHECK_DEVICE(tri);
33
+ NVDR_CHECK_CONTIGUOUS(tri);
34
+ NVDR_CHECK_I32(tri);
35
+ NVDR_CHECK(tri.sizes().size() == 2 && tri.size(0) > 0 && tri.size(1) == 3, "tri must have shape [>0, 3]");
36
+
37
+ // Fill in kernel parameters.
38
+ p.numTriangles = tri.size(0);
39
+ p.numVertices = 0x7fffffff; // Let's not require vertex positions just to enable an error check.
40
+ p.tri = tri.data_ptr<int>();
41
+
42
+ // Kernel parameters.
43
+ p.allocTriangles = 64;
44
+ while (p.allocTriangles < p.numTriangles)
45
+ p.allocTriangles <<= 1; // Must be power of two.
46
+
47
+ // Construct the hash tensor and get pointer.
48
+ torch::TensorOptions opts = torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA);
49
+ torch::Tensor ev_hash = torch::zeros({(uint64_t)p.allocTriangles * AA_HASH_ELEMENTS_PER_TRIANGLE(p.allocTriangles) * 4}, opts);
50
+ p.evHash = (uint4*)(ev_hash.data_ptr<int>());
51
+
52
+ // Check alignment.
53
+ NVDR_CHECK(!((uintptr_t)p.evHash & 15), "ev_hash internal tensor not aligned to int4");
54
+
55
+ // Populate the hash.
56
+ void* args[] = {&p};
57
+ NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel((void*)AntialiasFwdMeshKernel, (p.numTriangles - 1) / AA_MESH_KERNEL_THREADS_PER_BLOCK + 1, AA_MESH_KERNEL_THREADS_PER_BLOCK, args, 0, stream));
58
+
59
+ // Return.
60
+ TopologyHashWrapper hash_wrap;
61
+ hash_wrap.ev_hash = ev_hash;
62
+ return hash_wrap;
63
+ }
64
+
65
+ //------------------------------------------------------------------------
66
+ // Forward op.
67
+
68
+ std::tuple<torch::Tensor, torch::Tensor> antialias_fwd(torch::Tensor color, torch::Tensor rast, torch::Tensor pos, torch::Tensor tri, TopologyHashWrapper topology_hash_wrap)
69
+ {
70
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(color));
71
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
72
+ AntialiasKernelParams p = {}; // Initialize all fields to zero.
73
+ p.instance_mode = (pos.sizes().size() > 2) ? 1 : 0;
74
+ torch::Tensor& topology_hash = topology_hash_wrap.ev_hash; // Unwrap.
75
+
76
+ // Check inputs.
77
+ NVDR_CHECK_DEVICE(color, rast, pos, tri, topology_hash);
78
+ NVDR_CHECK_CONTIGUOUS(color, rast, pos, tri, topology_hash);
79
+ NVDR_CHECK_F32(color, rast, pos);
80
+ NVDR_CHECK_I32(tri, topology_hash);
81
+
82
+ // Sanity checks.
83
+ NVDR_CHECK(color.sizes().size() == 4 && color.size(0) > 0 && color.size(1) > 0 && color.size(2) > 0 && color.size(3) > 0, "color must have shape[>0, >0, >0, >0]");
84
+ NVDR_CHECK(rast.sizes().size() == 4 && rast.size(0) > 0 && rast.size(1) > 0 && rast.size(2) > 0 && rast.size(3) == 4, "rast must have shape[>0, >0, >0, 4]");
85
+ NVDR_CHECK(tri.sizes().size() == 2 && tri.size(0) > 0 && tri.size(1) == 3, "tri must have shape [>0, 3]");
86
+ NVDR_CHECK(color.size(1) == rast.size(1) && color.size(2) == rast.size(2), "color and rast inputs must have same spatial dimensions");
87
+ if (p.instance_mode)
88
+ {
89
+ NVDR_CHECK(pos.sizes().size() == 3 && pos.size(0) > 0 && pos.size(1) > 0 && pos.size(2) == 4, "pos must have shape [>0, >0, 4] or [>0, 4]");
90
+ NVDR_CHECK(rast.size(0) == color.size(0) && pos.size(0) == color.size(0), "minibatch size mismatch between inputs color, rast, pos");
91
+ }
92
+ else
93
+ {
94
+ NVDR_CHECK(pos.sizes().size() == 2 && pos.size(0) > 0 && pos.size(1) == 4, "pos must have shape [>0, >0, 4] or [>0, 4]");
95
+ NVDR_CHECK(rast.size(0) == color.size(0), "minibatch size mismatch between inputs color, rast");
96
+ }
97
+
98
+ // Extract input dimensions.
99
+ p.numVertices = pos.size(p.instance_mode ? 1 : 0);
100
+ p.numTriangles = tri.size(0);
101
+ p.n = color.size(0);
102
+ p.height = color.size(1);
103
+ p.width = color.size(2);
104
+ p.channels = color.size(3);
105
+
106
+ // Get input pointers.
107
+ p.color = color.data_ptr<float>();
108
+ p.rasterOut = rast.data_ptr<float>();
109
+ p.tri = tri.data_ptr<int>();
110
+ p.pos = pos.data_ptr<float>();
111
+ p.evHash = (uint4*)(topology_hash.data_ptr<int>());
112
+
113
+ // Misc parameters.
114
+ p.xh = .5f * (float)p.width;
115
+ p.yh = .5f * (float)p.height;
116
+
117
+ // Determine hash allocation size.
118
+ p.allocTriangles = 64;
119
+ while (p.allocTriangles < p.numTriangles)
120
+ p.allocTriangles <<= 1; // Must be power of two.
121
+
122
+ // Allocate output tensors.
123
+ torch::Tensor out = color.detach().clone(); // Use color as base.
124
+ torch::TensorOptions opts = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA);
125
+ torch::Tensor work_buffer = torch::empty({p.n * p.width * p.height * 8 + 4}, opts); // 8 int for a maximum of two work items per pixel.
126
+ p.output = out.data_ptr<float>();
127
+ p.workBuffer = (int4*)(work_buffer.data_ptr<float>());
128
+
129
+ // Clear the work counters.
130
+ NVDR_CHECK_CUDA_ERROR(cudaMemsetAsync(p.workBuffer, 0, sizeof(int4), stream));
131
+
132
+ // Verify that buffers are aligned to allow float2/float4 operations.
133
+ NVDR_CHECK(!((uintptr_t)p.pos & 15), "pos input tensor not aligned to float4");
134
+ NVDR_CHECK(!((uintptr_t)p.rasterOut & 7), "raster_out input tensor not aligned to float2");
135
+ NVDR_CHECK(!((uintptr_t)p.workBuffer & 15), "work_buffer internal tensor not aligned to int4");
136
+ NVDR_CHECK(!((uintptr_t)p.evHash & 15), "topology_hash internal tensor not aligned to int4");
137
+
138
+ // Choose launch parameters for the discontinuity finder kernel and launch.
139
+ void* args[] = {&p};
140
+ dim3 blockSize(AA_DISCONTINUITY_KERNEL_BLOCK_WIDTH, AA_DISCONTINUITY_KERNEL_BLOCK_HEIGHT, 1);
141
+ dim3 gridSize = getLaunchGridSize(blockSize, p.width, p.height, p.n);
142
+ NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel((void*)AntialiasFwdDiscontinuityKernel, gridSize, blockSize, args, 0, stream));
143
+
144
+ // Determine optimum block size for the persistent analysis kernel and launch.
145
+ int device = 0;
146
+ int numCTA = 0;
147
+ int numSM = 0;
148
+ NVDR_CHECK_CUDA_ERROR(cudaGetDevice(&device));
149
+ NVDR_CHECK_CUDA_ERROR(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numCTA, (void*)AntialiasFwdAnalysisKernel, AA_ANALYSIS_KERNEL_THREADS_PER_BLOCK, 0));
150
+ NVDR_CHECK_CUDA_ERROR(cudaDeviceGetAttribute(&numSM, cudaDevAttrMultiProcessorCount, device));
151
+ NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel((void*)AntialiasFwdAnalysisKernel, numCTA * numSM, AA_ANALYSIS_KERNEL_THREADS_PER_BLOCK, args, 0, stream));
152
+
153
+ // Return results.
154
+ return std::tuple<torch::Tensor, torch::Tensor>(out, work_buffer);
155
+ }
156
+
157
+ //------------------------------------------------------------------------
158
+ // Gradient op.
159
+
160
+ std::tuple<torch::Tensor, torch::Tensor> antialias_grad(torch::Tensor color, torch::Tensor rast, torch::Tensor pos, torch::Tensor tri, torch::Tensor dy, torch::Tensor work_buffer)
161
+ {
162
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(color));
163
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
164
+ AntialiasKernelParams p = {}; // Initialize all fields to zero.
165
+ p.instance_mode = (pos.sizes().size() > 2) ? 1 : 0;
166
+
167
+ // Check inputs.
168
+ NVDR_CHECK_DEVICE(color, rast, pos, tri, dy, work_buffer);
169
+ NVDR_CHECK_CONTIGUOUS(color, rast, pos, tri, work_buffer);
170
+ NVDR_CHECK_F32(color, rast, pos, dy, work_buffer);
171
+ NVDR_CHECK_I32(tri);
172
+
173
+ // Sanity checks.
174
+ NVDR_CHECK(dy.sizes().size() == 4 && dy.size(0) > 0 && dy.size(1) > 0 && dy.size(2) > 0 && dy.size(3) > 0, "dy must have shape[>0, >0, >0, >0]");
175
+ NVDR_CHECK(color.sizes().size() == 4 && color.size(0) > 0 && color.size(1) > 0 && color.size(2) > 0 && color.size(3) > 0, "color must have shape[>0, >0, >0, >0]");
176
+ NVDR_CHECK(rast.sizes().size() == 4 && rast.size(0) > 0 && rast.size(1) > 0 && rast.size(2) > 0 && rast.size(3) == 4, "raster_out must have shape[>0, >0, >0, 4]");
177
+ NVDR_CHECK(tri.sizes().size() == 2 && tri.size(0) > 0 && tri.size(1) == 3, "tri must have shape [>0, 3]");
178
+ NVDR_CHECK(color.size(1) == rast.size(1) && color.size(2) == rast.size(2), "color and raster_out inputs must have same spatial dimensions");
179
+ NVDR_CHECK(color.size(1) == dy.size(1) && color.size(2) == dy.size(2) && color.size(3) == dy.size(3), "color and dy inputs must have same dimensions");
180
+ if (p.instance_mode)
181
+ {
182
+ NVDR_CHECK(pos.sizes().size() == 3 && pos.size(0) > 0 && pos.size(1) > 0 && pos.size(2) == 4, "pos must have shape [>0, >0, 4] or [>0, 4]");
183
+ NVDR_CHECK(rast.size(0) == color.size(0) && pos.size(0) == color.size(0), "minibatch size mismatch between inputs color, raster_out, pos");
184
+ NVDR_CHECK(dy.size(0) == color.size(0) && rast.size(0) == color.size(0) && pos.size(0) ==color.size(0), "minibatch size mismatch between inputs dy, color, raster_out, pos");
185
+ }
186
+ else
187
+ {
188
+ NVDR_CHECK(pos.sizes().size() == 2 && pos.size(0) > 0 && pos.size(1) == 4, "pos must have shape [>0, >0, 4] or [>0, 4]");
189
+ NVDR_CHECK(rast.size(0) == color.size(0), "minibatch size mismatch between inputs color, raster_out");
190
+ NVDR_CHECK(dy.size(0) == color.size(0) && rast.size(0) == color.size(0), "minibatch size mismatch between inputs dy, color, raster_out");
191
+ }
192
+
193
+ // Extract input dimensions.
194
+ p.numVertices = pos.size(p.instance_mode ? 1 : 0);
195
+ p.numTriangles = tri.size(0);
196
+ p.n = color.size(0);
197
+ p.height = color.size(1);
198
+ p.width = color.size(2);
199
+ p.channels = color.size(3);
200
+
201
+ // Ensure dy is contiguous.
202
+ torch::Tensor dy_ = dy.contiguous();
203
+
204
+ // Get input pointers.
205
+ p.color = color.data_ptr<float>();
206
+ p.rasterOut = rast.data_ptr<float>();
207
+ p.tri = tri.data_ptr<int>();
208
+ p.pos = pos.data_ptr<float>();
209
+ p.dy = dy_.data_ptr<float>();
210
+ p.workBuffer = (int4*)(work_buffer.data_ptr<float>());
211
+
212
+ // Misc parameters.
213
+ p.xh = .5f * (float)p.width;
214
+ p.yh = .5f * (float)p.height;
215
+
216
+ // Allocate output tensors.
217
+ torch::Tensor grad_color = dy_.detach().clone(); // Use dy as base.
218
+ torch::Tensor grad_pos = torch::zeros_like(pos);
219
+ p.gradColor = grad_color.data_ptr<float>();
220
+ p.gradPos = grad_pos.data_ptr<float>();
221
+
222
+ // Clear gradient kernel work counter.
223
+ NVDR_CHECK_CUDA_ERROR(cudaMemsetAsync(&p.workBuffer[0].y, 0, sizeof(int), stream));
224
+
225
+ // Verify that buffers are aligned to allow float2/float4 operations.
226
+ NVDR_CHECK(!((uintptr_t)p.pos & 15), "pos input tensor not aligned to float4");
227
+ NVDR_CHECK(!((uintptr_t)p.workBuffer & 15), "work_buffer internal tensor not aligned to int4");
228
+
229
+ // Determine optimum block size for the gradient kernel and launch.
230
+ void* args[] = {&p};
231
+ int device = 0;
232
+ int numCTA = 0;
233
+ int numSM = 0;
234
+ NVDR_CHECK_CUDA_ERROR(cudaGetDevice(&device));
235
+ NVDR_CHECK_CUDA_ERROR(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numCTA, (void*)AntialiasGradKernel, AA_GRAD_KERNEL_THREADS_PER_BLOCK, 0));
236
+ NVDR_CHECK_CUDA_ERROR(cudaDeviceGetAttribute(&numSM, cudaDevAttrMultiProcessorCount, device));
237
+ NVDR_CHECK_CUDA_ERROR(cudaLaunchKernel((void*)AntialiasGradKernel, numCTA * numSM, AA_GRAD_KERNEL_THREADS_PER_BLOCK, args, 0, stream));
238
+
239
+ // Return results.
240
+ return std::tuple<torch::Tensor, torch::Tensor>(grad_color, grad_pos);
241
+ }
242
+
243
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/torch/torch_bindings.cpp ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "torch_common.inl"
10
+ #include "torch_types.h"
11
+ #include <tuple>
12
+
13
+ //------------------------------------------------------------------------
14
+ // Op prototypes. Return type macros for readability.
15
+
16
+ #define OP_RETURN_T torch::Tensor
17
+ #define OP_RETURN_TT std::tuple<torch::Tensor, torch::Tensor>
18
+ #define OP_RETURN_TTT std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
19
+ #define OP_RETURN_TTTT std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor>
20
+ #define OP_RETURN_TTV std::tuple<torch::Tensor, torch::Tensor, std::vector<torch::Tensor> >
21
+ #define OP_RETURN_TTTTV std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor, std::vector<torch::Tensor> >
22
+
23
+ OP_RETURN_TT rasterize_fwd_cuda (RasterizeCRStateWrapper& stateWrapper, torch::Tensor pos, torch::Tensor tri, std::tuple<int, int> resolution, torch::Tensor ranges, int peeling_idx);
24
+ OP_RETURN_T rasterize_grad (torch::Tensor pos, torch::Tensor tri, torch::Tensor out, torch::Tensor dy);
25
+ OP_RETURN_T rasterize_grad_db (torch::Tensor pos, torch::Tensor tri, torch::Tensor out, torch::Tensor dy, torch::Tensor ddb);
26
+ OP_RETURN_TT interpolate_fwd (torch::Tensor attr, torch::Tensor rast, torch::Tensor tri);
27
+ OP_RETURN_TT interpolate_fwd_da (torch::Tensor attr, torch::Tensor rast, torch::Tensor tri, torch::Tensor rast_db, bool diff_attrs_all, std::vector<int>& diff_attrs_vec);
28
+ OP_RETURN_TT interpolate_grad (torch::Tensor attr, torch::Tensor rast, torch::Tensor tri, torch::Tensor dy);
29
+ OP_RETURN_TTT interpolate_grad_da (torch::Tensor attr, torch::Tensor rast, torch::Tensor tri, torch::Tensor dy, torch::Tensor rast_db, torch::Tensor dda, bool diff_attrs_all, std::vector<int>& diff_attrs_vec);
30
+ TextureMipWrapper texture_construct_mip (torch::Tensor tex, int max_mip_level, bool cube_mode);
31
+ OP_RETURN_T texture_fwd (torch::Tensor tex, torch::Tensor uv, int filter_mode, int boundary_mode);
32
+ OP_RETURN_T texture_fwd_mip (torch::Tensor tex, torch::Tensor uv, torch::Tensor uv_da, torch::Tensor mip_level_bias, TextureMipWrapper mip_wrapper, std::vector<torch::Tensor> mip_stack, int filter_mode, int boundary_mode);
33
+ OP_RETURN_T texture_grad_nearest (torch::Tensor tex, torch::Tensor uv, torch::Tensor dy, int filter_mode, int boundary_mode);
34
+ OP_RETURN_TT texture_grad_linear (torch::Tensor tex, torch::Tensor uv, torch::Tensor dy, int filter_mode, int boundary_mode);
35
+ OP_RETURN_TTV texture_grad_linear_mipmap_nearest (torch::Tensor tex, torch::Tensor uv, torch::Tensor dy, torch::Tensor uv_da, torch::Tensor mip_level_bias, TextureMipWrapper mip_wrapper, std::vector<torch::Tensor> mip_stack, int filter_mode, int boundary_mode);
36
+ OP_RETURN_TTTTV texture_grad_linear_mipmap_linear (torch::Tensor tex, torch::Tensor uv, torch::Tensor dy, torch::Tensor uv_da, torch::Tensor mip_level_bias, TextureMipWrapper mip_wrapper, std::vector<torch::Tensor> mip_stack, int filter_mode, int boundary_mode);
37
+ TopologyHashWrapper antialias_construct_topology_hash (torch::Tensor tri);
38
+ OP_RETURN_TT antialias_fwd (torch::Tensor color, torch::Tensor rast, torch::Tensor pos, torch::Tensor tri, TopologyHashWrapper topology_hash);
39
+ OP_RETURN_TT antialias_grad (torch::Tensor color, torch::Tensor rast, torch::Tensor pos, torch::Tensor tri, torch::Tensor dy, torch::Tensor work_buffer);
40
+
41
+ //------------------------------------------------------------------------
42
+
43
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
44
+ // State classes.
45
+ pybind11::class_<RasterizeCRStateWrapper>(m, "RasterizeCRStateWrapper").def(pybind11::init<int>());
46
+ pybind11::class_<TextureMipWrapper>(m, "TextureMipWrapper").def(pybind11::init<>());
47
+ pybind11::class_<TopologyHashWrapper>(m, "TopologyHashWrapper");
48
+
49
+ // Plumbing to torch/c10 logging system.
50
+ m.def("get_log_level", [](void) { return FLAGS_caffe2_log_level; }, "get log level");
51
+ m.def("set_log_level", [](int level){ FLAGS_caffe2_log_level = level; }, "set log level");
52
+
53
+ // Ops.
54
+ m.def("rasterize_fwd_cuda", &rasterize_fwd_cuda, "rasterize forward op (cuda)");
55
+ m.def("rasterize_grad", &rasterize_grad, "rasterize gradient op ignoring db gradients");
56
+ m.def("rasterize_grad_db", &rasterize_grad_db, "rasterize gradient op with db gradients");
57
+ m.def("interpolate_fwd", &interpolate_fwd, "interpolate forward op with attribute derivatives");
58
+ m.def("interpolate_fwd_da", &interpolate_fwd_da, "interpolate forward op without attribute derivatives");
59
+ m.def("interpolate_grad", &interpolate_grad, "interpolate gradient op with attribute derivatives");
60
+ m.def("interpolate_grad_da", &interpolate_grad_da, "interpolate gradient op without attribute derivatives");
61
+ m.def("texture_construct_mip", &texture_construct_mip, "texture mipmap construction");
62
+ m.def("texture_fwd", &texture_fwd, "texture forward op without mipmapping");
63
+ m.def("texture_fwd_mip", &texture_fwd_mip, "texture forward op with mipmapping");
64
+ m.def("texture_grad_nearest", &texture_grad_nearest, "texture gradient op in nearest mode");
65
+ m.def("texture_grad_linear", &texture_grad_linear, "texture gradient op in linear mode");
66
+ m.def("texture_grad_linear_mipmap_nearest", &texture_grad_linear_mipmap_nearest, "texture gradient op in linear-mipmap-nearest mode");
67
+ m.def("texture_grad_linear_mipmap_linear", &texture_grad_linear_mipmap_linear, "texture gradient op in linear-mipmap-linear mode");
68
+ m.def("antialias_construct_topology_hash", &antialias_construct_topology_hash, "antialias topology hash construction");
69
+ m.def("antialias_fwd", &antialias_fwd, "antialias forward op");
70
+ m.def("antialias_grad", &antialias_grad, "antialias gradient op");
71
+ }
72
+
73
+ //------------------------------------------------------------------------
extensions/nvdiffrast/nvdiffrast/torch/torch_bindings_gl.cpp ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
2
+ //
3
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
4
+ // and proprietary rights in and to this software, related documentation
5
+ // and any modifications thereto. Any use, reproduction, disclosure or
6
+ // distribution of this software and related documentation without an express
7
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
8
+
9
+ #include "torch_common.inl"
10
+ #include "torch_types.h"
11
+ #include <tuple>
12
+
13
+ //------------------------------------------------------------------------
14
+ // Op prototypes.
15
+
16
+ std::tuple<torch::Tensor, torch::Tensor> rasterize_fwd_gl(RasterizeGLStateWrapper& stateWrapper, torch::Tensor pos, torch::Tensor tri, std::tuple<int, int> resolution, torch::Tensor ranges, int peeling_idx);
17
+
18
+ //------------------------------------------------------------------------
19
+
20
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
21
+ // State classes.
22
+ pybind11::class_<RasterizeGLStateWrapper>(m, "RasterizeGLStateWrapper").def(pybind11::init<bool, bool, int>())
23
+ .def("set_context", &RasterizeGLStateWrapper::setContext)
24
+ .def("release_context", &RasterizeGLStateWrapper::releaseContext);
25
+
26
+ // Ops.
27
+ m.def("rasterize_fwd_gl", &rasterize_fwd_gl, "rasterize forward op (opengl)");
28
+ }
29
+
30
+ //------------------------------------------------------------------------