Spaces:
Build error
Build error
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +79 -0
- LICENSE.txt +95 -0
- Notice +193 -0
- app.py +316 -0
- app_hg.py +316 -0
- assets/logo.png +0 -0
- assets/overview_3.png +0 -0
- assets/radar.png +0 -0
- assets/runtime.png +0 -0
- assets/teaser.png +3 -0
- demos/example_000.png +0 -0
- demos/example_001.png +0 -0
- demos/example_002.png +0 -0
- demos/example_003.png +3 -0
- demos/example_list.txt +5 -0
- env_install.sh +6 -0
- infer/__init__.py +30 -0
- infer/__pycache__/__init__.cpython-311.pyc +0 -0
- infer/__pycache__/__init__.cpython-312.pyc +0 -0
- infer/__pycache__/__init__.cpython-38.pyc +0 -0
- infer/__pycache__/__init__.cpython-39.pyc +0 -0
- infer/__pycache__/gif_render.cpython-311.pyc +0 -0
- infer/__pycache__/gif_render.cpython-38.pyc +0 -0
- infer/__pycache__/gif_render.cpython-39.pyc +0 -0
- infer/__pycache__/image_to_views.cpython-311.pyc +0 -0
- infer/__pycache__/image_to_views.cpython-38.pyc +0 -0
- infer/__pycache__/image_to_views.cpython-39.pyc +0 -0
- infer/__pycache__/rembg.cpython-38.pyc +0 -0
- infer/__pycache__/rembg.cpython-39.pyc +0 -0
- infer/__pycache__/removebg.cpython-311.pyc +0 -0
- infer/__pycache__/removebg.cpython-38.pyc +0 -0
- infer/__pycache__/removebg.cpython-39.pyc +0 -0
- infer/__pycache__/text_to_image.cpython-311.pyc +0 -0
- infer/__pycache__/text_to_image.cpython-38.pyc +0 -0
- infer/__pycache__/text_to_image.cpython-39.pyc +0 -0
- infer/__pycache__/utils.cpython-311.pyc +0 -0
- infer/__pycache__/utils.cpython-312.pyc +0 -0
- infer/__pycache__/utils.cpython-38.pyc +0 -0
- infer/__pycache__/utils.cpython-39.pyc +0 -0
- infer/__pycache__/views_to_mesh.cpython-311.pyc +0 -0
- infer/__pycache__/views_to_mesh.cpython-38.pyc +0 -0
- infer/__pycache__/views_to_mesh.cpython-39.pyc +0 -0
- infer/gif_render.py +77 -0
- infer/image_to_views.py +124 -0
- infer/removebg.py +101 -0
- infer/text_to_image.py +103 -0
- infer/utils.py +85 -0
- infer/views_to_mesh.py +152 -0
- main.py +162 -0
- mvd/__init__.py +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,82 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
assets/teaser.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
demos/example_003.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
outputs/app_output/0/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
39 |
+
outputs/app_output/0/output.gif filter=lfs diff=lfs merge=lfs -text
|
40 |
+
outputs/app_output/0/texture.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
outputs/app_output/1/img.png filter=lfs diff=lfs merge=lfs -text
|
42 |
+
outputs/app_output/1/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
43 |
+
outputs/app_output/1/output.gif filter=lfs diff=lfs merge=lfs -text
|
44 |
+
outputs/app_output/1/texture.png filter=lfs diff=lfs merge=lfs -text
|
45 |
+
outputs/app_output/11/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
46 |
+
outputs/app_output/11/output.gif filter=lfs diff=lfs merge=lfs -text
|
47 |
+
outputs/app_output/11/texture.png filter=lfs diff=lfs merge=lfs -text
|
48 |
+
outputs/app_output/12/img.png filter=lfs diff=lfs merge=lfs -text
|
49 |
+
outputs/app_output/12/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
50 |
+
outputs/app_output/12/output.gif filter=lfs diff=lfs merge=lfs -text
|
51 |
+
outputs/app_output/12/texture.png filter=lfs diff=lfs merge=lfs -text
|
52 |
+
outputs/app_output/13/img.png filter=lfs diff=lfs merge=lfs -text
|
53 |
+
outputs/app_output/13/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
54 |
+
outputs/app_output/13/output.gif filter=lfs diff=lfs merge=lfs -text
|
55 |
+
outputs/app_output/13/texture.png filter=lfs diff=lfs merge=lfs -text
|
56 |
+
outputs/app_output/14/img.png filter=lfs diff=lfs merge=lfs -text
|
57 |
+
outputs/app_output/14/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
58 |
+
outputs/app_output/15/img.png filter=lfs diff=lfs merge=lfs -text
|
59 |
+
outputs/app_output/15/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
60 |
+
outputs/app_output/16/img.png filter=lfs diff=lfs merge=lfs -text
|
61 |
+
outputs/app_output/16/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
62 |
+
outputs/app_output/17/img.png filter=lfs diff=lfs merge=lfs -text
|
63 |
+
outputs/app_output/17/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
64 |
+
outputs/app_output/17/output.gif filter=lfs diff=lfs merge=lfs -text
|
65 |
+
outputs/app_output/17/texture.png filter=lfs diff=lfs merge=lfs -text
|
66 |
+
outputs/app_output/18/img.png filter=lfs diff=lfs merge=lfs -text
|
67 |
+
outputs/app_output/18/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
68 |
+
outputs/app_output/18/output.gif filter=lfs diff=lfs merge=lfs -text
|
69 |
+
outputs/app_output/18/texture.png filter=lfs diff=lfs merge=lfs -text
|
70 |
+
outputs/app_output/19/img.png filter=lfs diff=lfs merge=lfs -text
|
71 |
+
outputs/app_output/19/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
72 |
+
outputs/app_output/19/output.gif filter=lfs diff=lfs merge=lfs -text
|
73 |
+
outputs/app_output/19/texture.png filter=lfs diff=lfs merge=lfs -text
|
74 |
+
outputs/app_output/2/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
75 |
+
outputs/app_output/20/img.png filter=lfs diff=lfs merge=lfs -text
|
76 |
+
outputs/app_output/20/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
77 |
+
outputs/app_output/20/output.gif filter=lfs diff=lfs merge=lfs -text
|
78 |
+
outputs/app_output/20/texture.png filter=lfs diff=lfs merge=lfs -text
|
79 |
+
outputs/app_output/22/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
80 |
+
outputs/app_output/22/output.gif filter=lfs diff=lfs merge=lfs -text
|
81 |
+
outputs/app_output/22/texture.png filter=lfs diff=lfs merge=lfs -text
|
82 |
+
outputs/app_output/23/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
83 |
+
outputs/app_output/23/output.gif filter=lfs diff=lfs merge=lfs -text
|
84 |
+
outputs/app_output/23/texture.png filter=lfs diff=lfs merge=lfs -text
|
85 |
+
outputs/app_output/24/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
86 |
+
outputs/app_output/24/output.gif filter=lfs diff=lfs merge=lfs -text
|
87 |
+
outputs/app_output/24/texture.png filter=lfs diff=lfs merge=lfs -text
|
88 |
+
outputs/app_output/25/img.png filter=lfs diff=lfs merge=lfs -text
|
89 |
+
outputs/app_output/25/img_nobg.png filter=lfs diff=lfs merge=lfs -text
|
90 |
+
outputs/app_output/25/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
91 |
+
outputs/app_output/25/output.gif filter=lfs diff=lfs merge=lfs -text
|
92 |
+
outputs/app_output/25/texture.png filter=lfs diff=lfs merge=lfs -text
|
93 |
+
outputs/app_output/26/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
94 |
+
outputs/app_output/27/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
95 |
+
outputs/app_output/27/output.gif filter=lfs diff=lfs merge=lfs -text
|
96 |
+
outputs/app_output/27/texture.png filter=lfs diff=lfs merge=lfs -text
|
97 |
+
outputs/app_output/28/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
98 |
+
outputs/app_output/3/img.png filter=lfs diff=lfs merge=lfs -text
|
99 |
+
outputs/app_output/3/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
100 |
+
outputs/app_output/4/img.png filter=lfs diff=lfs merge=lfs -text
|
101 |
+
outputs/app_output/4/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
102 |
+
outputs/app_output/6/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
103 |
+
outputs/app_output/7/img.png filter=lfs diff=lfs merge=lfs -text
|
104 |
+
outputs/app_output/7/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
105 |
+
outputs/app_output/7/output.gif filter=lfs diff=lfs merge=lfs -text
|
106 |
+
outputs/app_output/7/texture.png filter=lfs diff=lfs merge=lfs -text
|
107 |
+
outputs/app_output/8/img.png filter=lfs diff=lfs merge=lfs -text
|
108 |
+
outputs/app_output/8/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
109 |
+
outputs/app_output/8/output.gif filter=lfs diff=lfs merge=lfs -text
|
110 |
+
outputs/app_output/8/texture.png filter=lfs diff=lfs merge=lfs -text
|
111 |
+
outputs/test/mesh.glb filter=lfs diff=lfs merge=lfs -text
|
112 |
+
outputs/test/output.gif filter=lfs diff=lfs merge=lfs -text
|
113 |
+
outputs/test/texture.png filter=lfs diff=lfs merge=lfs -text
|
114 |
+
weights/open3d_cpu-0.17.0-cp39-cp39-manylinux_2_17_x86_64.whl filter=lfs diff=lfs merge=lfs -text
|
LICENSE.txt
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
2 |
+
Tencent Hunyuan3D Release Date: 2024.11.5
|
3 |
+
By clicking to agree or by using, reproducing, modifying, distributing, performing or displaying any portion or element of the Tencent Hunyuan Works, including via any Hosted Service, You will be deemed to have recognized and accepted the content of this Agreement, which is effective immediately.
|
4 |
+
|
5 |
+
1.DEFINITIONS.
|
6 |
+
|
7 |
+
a.“Acceptable Use Policy” shall mean the policy made available by Tencent as set forth in the Exhibit A.
|
8 |
+
b.“Agreement” shall mean the terms and conditions for use, reproduction, distribution, modification, performance and displaying of Tencent Hunyuan Works or any portion or element thereof set forth herein.
|
9 |
+
c.“Documentation” shall mean the specifications, manuals and documentation for Tencent Hunyuan made publicly available by Tencent.
|
10 |
+
d.“Hosted Service” shall mean a hosted service offered via an application programming interface (API), web access, or any other electronic or remote means.
|
11 |
+
e.“Non-Commercial” shall mean a use of the Tencent Hunyuan Works for academic, research and education purposes only.
|
12 |
+
f.“Licensee,” “You” or “Your” shall mean a natural person or legal entity exercising the rights granted by this Agreement and/or using the Tencent Hunyuan Works for any purpose and in any field of use.
|
13 |
+
g.“Materials” shall mean, collectively, Tencent’s proprietary Tencent Hunyuan and Documentation (and any portion thereof) as made available by Tencent under this Agreement.
|
14 |
+
h.“Model Derivatives” shall mean all: (i) modifications to Tencent Hunyuan or any Model Derivative of Tencent Hunyuan; (ii) works based on Tencent Hunyuan or any Model Derivative of Tencent Hunyuan; or (iii) any other machine learning model which is created by transfer of patterns of the weights, parameters, operations, or Output of Tencent Hunyuan or any Model Derivative of Tencent Hunyuan, to that model in order to cause that model to perform similarly to Tencent Hunyuan or a Model Derivative of Tencent Hunyuan, including distillation methods, methods that use intermediate data representations, or methods based on the generation of synthetic data Outputs by Tencent Hunyuan or a Model Derivative of Tencent Hunyuan for training that model. For clarity, Outputs by themselves are not deemed Model Derivatives.
|
15 |
+
i.“Output” shall mean the information and/or content output of Tencent Hunyuan or a Model Derivative that results from operating or otherwise using Tencent Hunyuan or a Model Derivative, including via a Hosted Service.
|
16 |
+
j.“Tencent,” “We” or “Us” shall mean THL A29 Limited.
|
17 |
+
k.“Tencent Hunyuan” shall mean the large language models, text/image/video/audio/3D generation models, and multimodal large language models and their software and algorithms, including trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing made publicly available by Us, including, without limitation to, Tencent Hunyuan 3D released at [https://github.com/Tencent/Hunyuan3D-1/tree/main].
|
18 |
+
l.“Tencent Hunyuan Works” shall mean: (i) the Materials; (ii) Model Derivatives; and (iii) all derivative works thereof.
|
19 |
+
m.“Third Party” or “Third Parties” shall mean individuals or legal entities that are not under common control with Us or You.
|
20 |
+
n.“including” shall mean including but not limited to.
|
21 |
+
|
22 |
+
2.GRANT OF RIGHTS.
|
23 |
+
|
24 |
+
We grant You a non-exclusive, non-transferable, non-commercial, royalty-free limited license under Tencent’s intellectual property or other rights owned by Us embodied in or utilized by the Materials to use, reproduce, distribute, create derivative works of (including Model Derivatives), and make modifications to the Materials, only in accordance with the terms of this Agreement and the Acceptable Use Policy, and You must not violate (or encourage or permit anyone else to violate) any term of this Agreement or the Acceptable Use Policy.
|
25 |
+
|
26 |
+
3.DISTRIBUTION.
|
27 |
+
|
28 |
+
You may, subject to Your compliance with this Agreement, distribute or make available to Third Parties the Tencent Hunyuan Works provided that You meet all of the following conditions:
|
29 |
+
a.You must provide all such Third Party recipients of the Tencent Hunyuan Works or products or services using them a copy of this Agreement;
|
30 |
+
b.You must cause any modified files to carry prominent notices stating that You changed the files;
|
31 |
+
c.You are encouraged to: (i) publish at least one technology introduction blogpost or one public statement expressing Your experience of using the Tencent Hunyuan Works; and (ii) mark the products or services developed by using the Tencent Hunyuan Works to indicate that the product/service is “Powered by Tencent Hunyuan”; and
|
32 |
+
d.All distributions to Third Parties (other than through a Hosted Service) must be accompanied by a “Notice” text file that contains the following notice: “Tencent Hunyuan is licensed under the Tencent Hunyuan Non-Commercial License Agreement, Copyright © 2024 Tencent. All Rights Reserved. The trademark rights of “Tencent Hunyuan” are owned by Tencent or its affiliate.”
|
33 |
+
You may add Your own copyright statement to Your modifications and, except as set forth in this Section and in Section 5, may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Model Derivatives as a whole, provided Your use, reproduction, modification, distribution, performance and display of the work otherwise complies with the terms and conditions of this Agreement. If You receive Tencent Hunyuan Works from a Licensee as part of an integrated end user product, then this Section 3 of this Agreement will not apply to You.
|
34 |
+
|
35 |
+
4.ADDITIONAL NON-COMMERCIAL TERMS
|
36 |
+
|
37 |
+
If, on the Tencent Hunyuan version release date, the monthly active users of all products or services made available by or for Licensee is greater than 100 million monthly active users in the preceding calendar month, You must request a license from Tencent, which Tencent may grant to You in its sole discretion, and You are not authorized to exercise any of the rights under this Agreement unless or until Tencent otherwise expressly grants You such rights.
|
38 |
+
|
39 |
+
5.RULES OF USE.
|
40 |
+
|
41 |
+
a.Your use of the Tencent Hunyuan Works must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Tencent Hunyuan Works, which is hereby incorporated by reference into this Agreement. You must include the use restrictions referenced in these Sections 5(a) and 5(b) as an enforceable provision in any agreement (e.g., license agreement, terms of use, etc.) governing the use and/or distribution of Tencent Hunyuan Works and You must provide notice to subsequent users to whom You distribute that Tencent Hunyuan Works are subject to the use restrictions in these Sections 5(a) and 5(b).
|
42 |
+
b.You must not use the Tencent Hunyuan Works or any Output or results of the Tencent Hunyuan Works to improve any other large language model (other than Tencent Hunyuan or Model Derivatives thereof).
|
43 |
+
|
44 |
+
6.INTELLECTUAL PROPERTY.
|
45 |
+
|
46 |
+
a.Subject to Tencent’s ownership of Tencent Hunyuan Works made by or for Tencent and intellectual property rights therein, conditioned upon Your compliance with the terms and conditions of this Agreement, as between You and Tencent, You will be the owner of any derivative works and modifications of the Materials and any Model Derivatives that are made by or for You.
|
47 |
+
b.No trademark licenses are granted under this Agreement, and in connection with the Tencent Hunyuan Works, Licensee may not use any name or mark owned by or associated with Tencent or any of its affiliates, except as required for reasonable and customary use in describing and distributing the Tencent Hunyuan Works. Tencent hereby grants You a license to use “Tencent Hunyuan” (the “Mark”) solely as required to comply with the provisions of Section 3(c), provided that You comply with any applicable laws related to trademark protection. All goodwill arising out of Your use of the Mark will inure to the benefit of Tencent.
|
48 |
+
c.If You commence a lawsuit or other proceedings (including a cross-claim or counterclaim in a lawsuit) against Us or any person or entity alleging that the Materials or any Output, or any portion of any of the foregoing, infringe any intellectual property or other right owned or licensable by You, then all licenses granted to You under this Agreement shall terminate as of the date such lawsuit or other proceeding is filed. You will defend, indemnify and hold harmless Us from and against any claim by any Third Party arising out of or related to Your or the Third Party’s use or distribution of the Tencent Hunyuan Works.
|
49 |
+
d.Tencent claims no rights in Outputs You generate. You and Your users are solely responsible for Outputs and their subsequent uses.
|
50 |
+
|
51 |
+
7.DISCLAIMERS OF WARRANTY AND LIMITATIONS OF LIABILITY.
|
52 |
+
|
53 |
+
a.We are not obligated to support, update, provide training for, or develop any further version of the Tencent Hunyuan Works or to grant any license thereto.
|
54 |
+
b.UNLESS AND ONLY TO THE EXTENT REQUIRED BY APPLICABLE LAW, THE TENCENT HUNYUAN WORKS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED “AS IS” WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES OF ANY KIND INCLUDING ANY WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, COURSE OF DEALING, USAGE OF TRADE, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING, REPRODUCING, MODIFYING, PERFORMING, DISPLAYING OR DISTRIBUTING ANY OF THE TENCENT HUNYUAN WORKS OR OUTPUTS AND ASSUME ANY AND ALL RISKS ASSOCIATED WITH YOUR OR A THIRD PARTY’S USE OR DISTRIBUTION OF ANY OF THE TENCENT HUNYUAN WORKS OR OUTPUTS AND YOUR EXERCISE OF RIGHTS AND PERMISSIONS UNDER THIS AGREEMENT.
|
55 |
+
c.TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL TENCENT OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, FOR ANY DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, CONSEQUENTIAL OR PUNITIVE DAMAGES, OR LOST PROFITS OF ANY KIND ARISING FROM THIS AGREEMENT OR RELATED TO ANY OF THE TENCENT HUNYUAN WORKS OR OUTPUTS, EVEN IF TENCENT OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
|
56 |
+
|
57 |
+
8.SURVIVAL AND TERMINATION.
|
58 |
+
|
59 |
+
a.The term of this Agreement shall commence upon Your acceptance of this Agreement or access to the Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein.
|
60 |
+
b.We may terminate this Agreement if You breach any of the terms or conditions of this Agreement. Upon termination of this Agreement, You must promptly delete and cease use of the Tencent Hunyuan Works. Sections 6(a), 6(c), 7 and 9 shall survive the termination of this Agreement.
|
61 |
+
|
62 |
+
9.GOVERNING LAW AND JURISDICTION.
|
63 |
+
|
64 |
+
a.This Agreement and any dispute arising out of or relating to it will be governed by the laws of the Hong Kong Special Administrative Region of the People’s Republic of China, without regard to conflict of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement.
|
65 |
+
b.Exclusive jurisdiction and venue for any dispute arising out of or relating to this Agreement will be a court of competent jurisdiction in the Hong Kong Special Administrative Region of the People’s Republic of China, and Tencent and Licensee consent to the exclusive jurisdiction of such court with respect to any such dispute.
|
66 |
+
|
67 |
+
|
68 |
+
EXHIBIT A
|
69 |
+
|
70 |
+
ACCEPTABLE USE POLICY
|
71 |
+
|
72 |
+
Tencent reserves the right to update this Acceptable Use Policy from time to time.
|
73 |
+
Last modified: 2024.11.5
|
74 |
+
|
75 |
+
Tencent endeavors to promote safe and fair use of its tools and features, including Tencent Hunyuan. You agree not to use Tencent Hunyuan or Model Derivatives:
|
76 |
+
1.In any way that violates any applicable national, federal, state, local, international or any other law or regulation;
|
77 |
+
2.To harm Yourself or others;
|
78 |
+
3.To repurpose or distribute output from Tencent Hunyuan or any Model Derivatives to harm Yourself or others;
|
79 |
+
4.To override or circumvent the safety guardrails and safeguards We have put in place;
|
80 |
+
5.For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
|
81 |
+
6.To generate or disseminate verifiably false information and/or content with the purpose of harming others or influencing elections;
|
82 |
+
7.To generate or facilitate false online engagement, including fake reviews and other means of fake online engagement;
|
83 |
+
8.To intentionally defame, disparage or otherwise harass others;
|
84 |
+
9.To generate and/or disseminate malware (including ransomware) or any other content to be used for the purpose of harming electronic systems;
|
85 |
+
10.To generate or disseminate personal identifiable information with the purpose of harming others;
|
86 |
+
11.To generate or disseminate information (including images, code, posts, articles), and place the information in any public context (including –through the use of bot generated tweets), without expressly and conspicuously identifying that the information and/or content is machine generated;
|
87 |
+
12.To impersonate another individual without consent, authorization, or legal right;
|
88 |
+
13.To make high-stakes automated decisions in domains that affect an individual’s safety, rights or wellbeing (e.g., law enforcement, migration, medicine/health, management of critical infrastructure, safety components of products, essential services, credit, employment, housing, education, social scoring, or insurance);
|
89 |
+
14.In a manner that violates or disrespects the social ethics and moral standards of other countries or regions;
|
90 |
+
15.To perform, facilitate, threaten, incite, plan, promote or encourage violent extremism or terrorism;
|
91 |
+
16.For any use intended to discriminate against or harm individuals or groups based on protected characteristics or categories, online or offline social behavior or known or predicted personal or personality characteristics;
|
92 |
+
17.To intentionally exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
|
93 |
+
18.For military purposes;
|
94 |
+
19.To engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or other professional practices.
|
95 |
+
THIS ACCEPTABLE USE POLICY INCORPORATES BY REFERENCE THE USER-BASED RESTRICTIONS OUTLINED IN THE CREATIVEML OPEN RAIL++-M LICENSE. https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md
|
Notice
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Usage and Legal Notices:
|
2 |
+
|
3 |
+
Tencent is pleased to support the open source community by making Hunyuan 3D available.
|
4 |
+
|
5 |
+
Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved. The below software and/or models in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) THL A29 Limited.
|
6 |
+
|
7 |
+
Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT except for the third-party components listed below. Hunyuan 3D does not impose any additional limitations beyond what is outlined in the repsective licenses of these third-party components. Users must comply with all terms and conditions of original licenses of these third-party components and must ensure that the usage of the third party components adheres to all relevant laws and regulations.
|
8 |
+
|
9 |
+
For avoidance of doubts, Hunyuan 3D means the large language models and their software and algorithms, including trained model weights, parameters (including optimizer states), machine-learning model code, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing made publicly available by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
10 |
+
|
11 |
+
|
12 |
+
Other dependencies and licenses:
|
13 |
+
|
14 |
+
|
15 |
+
Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
16 |
+
The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
17 |
+
--------------------------------------------------------------------
|
18 |
+
1. instantmesh
|
19 |
+
Copyright (c) instantmesh original author and authors
|
20 |
+
Please note this software has been modified by Tencent in this distribution.
|
21 |
+
|
22 |
+
|
23 |
+
Terms of the Apache License Version 2.0:
|
24 |
+
--------------------------------------------------------------------
|
25 |
+
Apache License
|
26 |
+
|
27 |
+
Version 2.0, January 2004
|
28 |
+
|
29 |
+
http://www.apache.org/licenses/
|
30 |
+
|
31 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
32 |
+
1. Definitions.
|
33 |
+
|
34 |
+
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
35 |
+
|
36 |
+
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
37 |
+
|
38 |
+
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
39 |
+
|
40 |
+
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
41 |
+
|
42 |
+
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
43 |
+
|
44 |
+
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
45 |
+
|
46 |
+
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
47 |
+
|
48 |
+
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
49 |
+
|
50 |
+
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
51 |
+
|
52 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
53 |
+
|
54 |
+
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
55 |
+
|
56 |
+
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
57 |
+
|
58 |
+
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
59 |
+
|
60 |
+
You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
61 |
+
|
62 |
+
You must cause any modified files to carry prominent notices stating that You changed the files; and
|
63 |
+
|
64 |
+
You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
65 |
+
|
66 |
+
If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
67 |
+
|
68 |
+
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
69 |
+
|
70 |
+
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
71 |
+
|
72 |
+
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
73 |
+
|
74 |
+
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
75 |
+
|
76 |
+
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
77 |
+
|
78 |
+
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
79 |
+
|
80 |
+
END OF TERMS AND CONDITIONS
|
81 |
+
|
82 |
+
|
83 |
+
For the license of other third party dependencies, please refer to the following URL:
|
84 |
+
https://github.com/TencentARC/InstantMesh/blob/main/LICENSE
|
85 |
+
https://github.com/TencentARC/InstantMesh/tree/main?tab=readme-ov-file#-acknowledgements
|
86 |
+
|
87 |
+
|
88 |
+
Open Source Model Licensed under the MIT and CreativeML Open RAIL++-M License:
|
89 |
+
The below Model in this distribution may have been modified by Tencent.
|
90 |
+
--------------------------------------------------------------------
|
91 |
+
1. Stable Diffusion
|
92 |
+
Copyright (c) 2022 Stability AI and contributors
|
93 |
+
|
94 |
+
|
95 |
+
Terms of the MIT and CreativeML Open RAIL++-M License:
|
96 |
+
--------------------------------------------------------------------
|
97 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
98 |
+
of this software and associated documentation files (the "Software"), to deal
|
99 |
+
in the Software without restriction, including without limitation the rights
|
100 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
101 |
+
copies of the Software, and to permit persons to whom the Software is
|
102 |
+
furnished to do so, subject to the following conditions:
|
103 |
+
|
104 |
+
The above copyright notice and this permission notice shall be included in all
|
105 |
+
copies or substantial portions of the Software.
|
106 |
+
|
107 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
108 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
109 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
110 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
111 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
112 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
113 |
+
SOFTWARE.
|
114 |
+
|
115 |
+
|
116 |
+
CreativeML Open RAIL++-M License
|
117 |
+
dated November 24, 2022
|
118 |
+
|
119 |
+
Section I: PREAMBLE
|
120 |
+
|
121 |
+
Multimodal generative models are being widely adopted and used, and have the potential to transform the way artists, among other individuals, conceive and benefit from AI or ML technologies as a tool for content creation.
|
122 |
+
|
123 |
+
Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations.
|
124 |
+
|
125 |
+
In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the Model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for art and content generation.
|
126 |
+
|
127 |
+
Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this License aims to strike a balance between both in order to enable responsible open-science in the field of AI.
|
128 |
+
|
129 |
+
This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model.
|
130 |
+
|
131 |
+
NOW THEREFORE, You and Licensor agree as follows:
|
132 |
+
|
133 |
+
1. Definitions
|
134 |
+
|
135 |
+
- "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
|
136 |
+
- "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
|
137 |
+
- "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
|
138 |
+
- "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
|
139 |
+
- "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
|
140 |
+
- "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
|
141 |
+
- "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
|
142 |
+
- "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
|
143 |
+
- "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
|
144 |
+
- "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
|
145 |
+
- "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
146 |
+
- "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
|
147 |
+
|
148 |
+
Section II: INTELLECTUAL PROPERTY RIGHTS
|
149 |
+
|
150 |
+
Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
|
151 |
+
|
152 |
+
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
|
153 |
+
3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
|
154 |
+
|
155 |
+
Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
|
156 |
+
|
157 |
+
4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
|
158 |
+
Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
|
159 |
+
You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
|
160 |
+
You must cause any modified files to carry prominent notices stating that You changed the files;
|
161 |
+
You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
|
162 |
+
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
|
163 |
+
5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
|
164 |
+
6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
|
165 |
+
|
166 |
+
Section IV: OTHER PROVISIONS
|
167 |
+
|
168 |
+
7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License.
|
169 |
+
8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
|
170 |
+
9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
|
171 |
+
10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
172 |
+
11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
173 |
+
12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
|
174 |
+
|
175 |
+
END OF TERMS AND CONDITIONS
|
176 |
+
|
177 |
+
Attachment A
|
178 |
+
|
179 |
+
Use Restrictions
|
180 |
+
|
181 |
+
You agree not to use the Model or Derivatives of the Model:
|
182 |
+
|
183 |
+
- In any way that violates any applicable national, federal, state, local or international law or regulation;
|
184 |
+
- For the purpose of exploiting, harming or attempting to exploit or harm minors in any way;
|
185 |
+
- To generate or disseminate verifiably false information and/or content with the purpose of harming others;
|
186 |
+
- To generate or disseminate personal identifiable information that can be used to harm an individual;
|
187 |
+
- To defame, disparage or otherwise harass others;
|
188 |
+
- For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation;
|
189 |
+
- For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics;
|
190 |
+
- To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm;
|
191 |
+
- For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories;
|
192 |
+
- To provide medical advice and medical results interpretation;
|
193 |
+
- To generate or disseminate information for the purpose to be used for administration of justice, law enforcement, immigration or asylum processes, such as predicting an individual will commit fraud/crime commitment (e.g. by text profiling, drawing causal relationships between assertions made in documents, indiscriminate and arbitrarily-targeted use).
|
app.py
ADDED
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
2 |
+
# The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
3 |
+
|
4 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
5 |
+
# The below software and/or models in this distribution may have been
|
6 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
7 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
8 |
+
|
9 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
10 |
+
# except for the third-party components listed below.
|
11 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
12 |
+
# in the repsective licenses of these third-party components.
|
13 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
14 |
+
# components and must ensure that the usage of the third party components adheres to
|
15 |
+
# all relevant laws and regulations.
|
16 |
+
|
17 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
18 |
+
# their software and algorithms, including trained model weights, parameters (including
|
19 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
20 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
21 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
22 |
+
|
23 |
+
import os
|
24 |
+
import warnings
|
25 |
+
import argparse
|
26 |
+
import gradio as gr
|
27 |
+
from glob import glob
|
28 |
+
import shutil
|
29 |
+
import torch
|
30 |
+
import numpy as np
|
31 |
+
from PIL import Image
|
32 |
+
from einops import rearrange
|
33 |
+
|
34 |
+
from infer import seed_everything, save_gif
|
35 |
+
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
36 |
+
|
37 |
+
warnings.simplefilter('ignore', category=UserWarning)
|
38 |
+
warnings.simplefilter('ignore', category=FutureWarning)
|
39 |
+
warnings.simplefilter('ignore', category=DeprecationWarning)
|
40 |
+
|
41 |
+
parser = argparse.ArgumentParser()
|
42 |
+
parser.add_argument("--use_lite", default=False, action="store_true")
|
43 |
+
parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
|
44 |
+
parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
|
45 |
+
parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
|
46 |
+
parser.add_argument("--save_memory", default=False, action="store_true")
|
47 |
+
parser.add_argument("--device", default="cuda:0", type=str)
|
48 |
+
args = parser.parse_args()
|
49 |
+
|
50 |
+
################################################################
|
51 |
+
|
52 |
+
CONST_PORT = 8080
|
53 |
+
CONST_MAX_QUEUE = 1
|
54 |
+
CONST_SERVER = '0.0.0.0'
|
55 |
+
|
56 |
+
CONST_HEADER = '''
|
57 |
+
<h2><b>Official 🤗 Gradio Demo</b></h2><h2><a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'><b>Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D
|
58 |
+
Generationr</b></a></h2>
|
59 |
+
Code: <a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'>GitHub</a>. Techenical report: <a href='https://arxiv.org/abs/placeholder' target='_blank'>ArXiv</a>.
|
60 |
+
|
61 |
+
❗️❗️❗️**Important Notes:**
|
62 |
+
- By default, our demo can export a .obj mesh with vertex colors or a .glb mesh.
|
63 |
+
- If you select "texture mapping," it will export a .obj mesh with a texture map or a .glb mesh.
|
64 |
+
- If you select "render GIF," it will export a GIF image rendering of the .glb file.
|
65 |
+
- If the result is unsatisfactory, please try a different seed value (Default: 0).
|
66 |
+
'''
|
67 |
+
|
68 |
+
CONST_CITATION = r"""
|
69 |
+
If HunYuan3D-1 is helpful, please help to ⭐ the <a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'>Github Repo</a>. Thanks! [![GitHub Stars](https://img.shields.io/github/stars/tencent/Hunyuan3D-1?style=social)](https://github.com/tencent/Hunyuan3D-1)
|
70 |
+
---
|
71 |
+
📝 **Citation**
|
72 |
+
If you find our work useful for your research or applications, please cite using this bibtex:
|
73 |
+
```bibtex
|
74 |
+
@misc{yang2024tencent,
|
75 |
+
title={Tencent Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation},
|
76 |
+
author={Xianghui Yang and Huiwen Shi and Bowen Zhang and Fan Yang and Jiacheng Wang and Hongxu Zhao and Xinhai Liu and Xinzhou Wang and Qingxiang Lin and Jiaao Yu and Lifu Wang and Zhuo Chen and Sicong Liu and Yuhong Liu and Yong Yang and Di Wang and Jie Jiang and Chunchao Guo},
|
77 |
+
year={2024},
|
78 |
+
eprint={2411.02293},
|
79 |
+
archivePrefix={arXiv},
|
80 |
+
primaryClass={cs.CV}
|
81 |
+
}
|
82 |
+
```
|
83 |
+
"""
|
84 |
+
|
85 |
+
################################################################
|
86 |
+
|
87 |
+
def get_example_img_list():
|
88 |
+
print('Loading example img list ...')
|
89 |
+
return sorted(glob('./demos/example_*.png'))
|
90 |
+
|
91 |
+
def get_example_txt_list():
|
92 |
+
print('Loading example txt list ...')
|
93 |
+
txt_list = list()
|
94 |
+
for line in open('./demos/example_list.txt'):
|
95 |
+
txt_list.append(line.strip())
|
96 |
+
return txt_list
|
97 |
+
|
98 |
+
example_is = get_example_img_list()
|
99 |
+
example_ts = get_example_txt_list()
|
100 |
+
################################################################
|
101 |
+
|
102 |
+
worker_xbg = Removebg()
|
103 |
+
print(f"loading {args.text2image_path}")
|
104 |
+
worker_t2i = Text2Image(
|
105 |
+
pretrain = args.text2image_path,
|
106 |
+
device = args.device,
|
107 |
+
save_memory = args.save_memory
|
108 |
+
)
|
109 |
+
worker_i2v = Image2Views(
|
110 |
+
use_lite = args.use_lite,
|
111 |
+
device = args.device,
|
112 |
+
save_memory = args.save_memory
|
113 |
+
)
|
114 |
+
worker_v23 = Views2Mesh(
|
115 |
+
args.mv23d_cfg_path,
|
116 |
+
args.mv23d_ckt_path,
|
117 |
+
use_lite = args.use_lite,
|
118 |
+
device = args.device,
|
119 |
+
save_memory = args.save_memory
|
120 |
+
)
|
121 |
+
worker_gif = GifRenderer(args.device)
|
122 |
+
|
123 |
+
def stage_0_t2i(text, image, seed, step):
|
124 |
+
os.makedirs('./outputs/app_output', exist_ok=True)
|
125 |
+
exists = set(int(_) for _ in os.listdir('./outputs/app_output') if not _.startswith("."))
|
126 |
+
if len(exists) == 30: shutil.rmtree(f"./outputs/app_output/0");cur_id = 0
|
127 |
+
else: cur_id = min(set(range(30)) - exists)
|
128 |
+
if os.path.exists(f"./outputs/app_output/{(cur_id + 1) % 30}"):
|
129 |
+
shutil.rmtree(f"./outputs/app_output/{(cur_id + 1) % 30}")
|
130 |
+
save_folder = f'./outputs/app_output/{cur_id}'
|
131 |
+
os.makedirs(save_folder, exist_ok=True)
|
132 |
+
|
133 |
+
dst = save_folder + '/img.png'
|
134 |
+
|
135 |
+
if not text:
|
136 |
+
if image is None:
|
137 |
+
return dst, save_folder
|
138 |
+
raise gr.Error("Upload image or provide text ...")
|
139 |
+
image.save(dst)
|
140 |
+
return dst, save_folder
|
141 |
+
|
142 |
+
image = worker_t2i(text, seed, step)
|
143 |
+
image.save(dst)
|
144 |
+
dst = worker_xbg(image, save_folder)
|
145 |
+
return dst, save_folder
|
146 |
+
|
147 |
+
def stage_1_xbg(image, save_folder):
|
148 |
+
if isinstance(image, str):
|
149 |
+
image = Image.open(image)
|
150 |
+
dst = save_folder + '/img_nobg.png'
|
151 |
+
rgba = worker_xbg(image)
|
152 |
+
rgba.save(dst)
|
153 |
+
return dst
|
154 |
+
|
155 |
+
def stage_2_i2v(image, seed, step, save_folder):
|
156 |
+
if isinstance(image, str):
|
157 |
+
image = Image.open(image)
|
158 |
+
gif_dst = save_folder + '/views.gif'
|
159 |
+
res_img, pils = worker_i2v(image, seed, step)
|
160 |
+
save_gif(pils, gif_dst)
|
161 |
+
views_img, cond_img = res_img[0], res_img[1]
|
162 |
+
img_array = np.asarray(views_img, dtype=np.uint8)
|
163 |
+
show_img = rearrange(img_array, '(n h) (m w) c -> (n m) h w c', n=3, m=2)
|
164 |
+
show_img = show_img[worker_i2v.order, ...]
|
165 |
+
show_img = rearrange(show_img, '(n m) h w c -> (n h) (m w) c', n=2, m=3)
|
166 |
+
show_img = Image.fromarray(show_img)
|
167 |
+
return views_img, cond_img, show_img
|
168 |
+
|
169 |
+
def stage_3_v23(
|
170 |
+
views_pil,
|
171 |
+
cond_pil,
|
172 |
+
seed,
|
173 |
+
save_folder,
|
174 |
+
target_face_count = 30000,
|
175 |
+
do_texture_mapping = True,
|
176 |
+
do_render =True
|
177 |
+
):
|
178 |
+
do_texture_mapping = do_texture_mapping or do_render
|
179 |
+
obj_dst = save_folder + '/mesh_with_colors.obj'
|
180 |
+
glb_dst = save_folder + '/mesh.glb'
|
181 |
+
worker_v23(
|
182 |
+
views_pil,
|
183 |
+
cond_pil,
|
184 |
+
seed = seed,
|
185 |
+
save_folder = save_folder,
|
186 |
+
target_face_count = target_face_count,
|
187 |
+
do_texture_mapping = do_texture_mapping
|
188 |
+
)
|
189 |
+
return obj_dst, glb_dst
|
190 |
+
|
191 |
+
def stage_4_gif(obj_dst, save_folder, do_render_gif=True):
|
192 |
+
if not do_render_gif: return None
|
193 |
+
gif_dst = save_folder + '/output.gif'
|
194 |
+
worker_gif(
|
195 |
+
save_folder + '/mesh.obj',
|
196 |
+
gif_dst_path = gif_dst
|
197 |
+
)
|
198 |
+
return gif_dst
|
199 |
+
|
200 |
+
#===============================================================
|
201 |
+
with gr.Blocks() as demo:
|
202 |
+
gr.Markdown(CONST_HEADER)
|
203 |
+
with gr.Row(variant="panel"):
|
204 |
+
with gr.Column(scale=2):
|
205 |
+
with gr.Tab("Text to 3D"):
|
206 |
+
with gr.Column():
|
207 |
+
text = gr.TextArea('一只黑白相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。', lines=1, max_lines=10, label='Input text')
|
208 |
+
with gr.Row():
|
209 |
+
textgen_seed = gr.Number(value=0, label="T2I seed", precision=0)
|
210 |
+
textgen_step = gr.Number(value=25, label="T2I step", precision=0)
|
211 |
+
textgen_SEED = gr.Number(value=0, label="Gen seed", precision=0)
|
212 |
+
textgen_STEP = gr.Number(value=50, label="Gen step", precision=0)
|
213 |
+
textgen_max_faces = gr.Number(value=90000, label="max number of faces", precision=0)
|
214 |
+
|
215 |
+
with gr.Row():
|
216 |
+
textgen_do_texture_mapping = gr.Checkbox(label="texture mapping", value=False, interactive=True)
|
217 |
+
textgen_do_render_gif = gr.Checkbox(label="Render gif", value=False, interactive=True)
|
218 |
+
textgen_submit = gr.Button("Generate", variant="primary")
|
219 |
+
|
220 |
+
with gr.Row():
|
221 |
+
gr.Examples(examples=example_ts, inputs=[text], label="Txt examples", examples_per_page=10)
|
222 |
+
|
223 |
+
with gr.Tab("Image to 3D"):
|
224 |
+
with gr.Column():
|
225 |
+
input_image = gr.Image(label="Input image",
|
226 |
+
width=256, height=256, type="pil",
|
227 |
+
image_mode="RGBA", sources="upload",
|
228 |
+
interactive=True)
|
229 |
+
with gr.Row():
|
230 |
+
imggen_SEED = gr.Number(value=0, label="Gen seed", precision=0)
|
231 |
+
imggen_STEP = gr.Number(value=50, label="Gen step", precision=0)
|
232 |
+
imggen_max_faces = gr.Number(value=90000, label="max number of faces", precision=0)
|
233 |
+
|
234 |
+
with gr.Row():
|
235 |
+
imggen_do_texture_mapping = gr.Checkbox(label="texture mapping", value=False, interactive=True)
|
236 |
+
imggen_do_render_gif = gr.Checkbox(label="Render gif", value=False, interactive=True)
|
237 |
+
imggen_submit = gr.Button("Generate", variant="primary")
|
238 |
+
with gr.Row():
|
239 |
+
gr.Examples(examples=example_is, inputs=[input_image], label="Img examples", examples_per_page=10)
|
240 |
+
|
241 |
+
with gr.Column(scale=3):
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column(scale=2):
|
244 |
+
rem_bg_image = gr.Image(label="No backgraound image", type="pil",
|
245 |
+
image_mode="RGBA", interactive=False)
|
246 |
+
with gr.Column(scale=3):
|
247 |
+
result_image = gr.Image(label="Multi views", type="pil", interactive=False)
|
248 |
+
|
249 |
+
with gr.Row():
|
250 |
+
result_3dobj = gr.Model3D(
|
251 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
252 |
+
label="Output Obj",
|
253 |
+
show_label=True,
|
254 |
+
visible=True,
|
255 |
+
camera_position=[90, 90, None],
|
256 |
+
interactive=False
|
257 |
+
)
|
258 |
+
|
259 |
+
result_3dglb = gr.Model3D(
|
260 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
261 |
+
label="Output Glb",
|
262 |
+
show_label=True,
|
263 |
+
visible=True,
|
264 |
+
camera_position=[90, 90, None],
|
265 |
+
interactive=False
|
266 |
+
)
|
267 |
+
result_gif = gr.Image(label="Rendered GIF", interactive=False)
|
268 |
+
|
269 |
+
with gr.Row():
|
270 |
+
gr.Markdown("The glb file displayed on the grario will be dark. We recommend downloading and opening it with 3D software, such as Blender, MeshLab, etc")
|
271 |
+
|
272 |
+
#===============================================================
|
273 |
+
|
274 |
+
none = gr.State(None)
|
275 |
+
save_folder = gr.State()
|
276 |
+
cond_image = gr.State()
|
277 |
+
views_image = gr.State()
|
278 |
+
text_image = gr.State()
|
279 |
+
|
280 |
+
textgen_submit.click(
|
281 |
+
fn=stage_0_t2i, inputs=[text, none, textgen_seed, textgen_step],
|
282 |
+
outputs=[rem_bg_image, save_folder],
|
283 |
+
).success(
|
284 |
+
fn=stage_2_i2v, inputs=[rem_bg_image, textgen_SEED, textgen_STEP, save_folder],
|
285 |
+
outputs=[views_image, cond_image, result_image],
|
286 |
+
).success(
|
287 |
+
fn=stage_3_v23, inputs=[views_image, cond_image, textgen_SEED, save_folder, textgen_max_faces, textgen_do_texture_mapping, textgen_do_render_gif],
|
288 |
+
outputs=[result_3dobj, result_3dglb],
|
289 |
+
).success(
|
290 |
+
fn=stage_4_gif, inputs=[result_3dglb, save_folder, textgen_do_render_gif],
|
291 |
+
outputs=[result_gif],
|
292 |
+
).success(lambda: print('Text_to_3D Done ...'))
|
293 |
+
|
294 |
+
imggen_submit.click(
|
295 |
+
fn=stage_0_t2i, inputs=[none, input_image, textgen_seed, textgen_step],
|
296 |
+
outputs=[text_image, save_folder],
|
297 |
+
).success(
|
298 |
+
fn=stage_1_xbg, inputs=[text_image, save_folder],
|
299 |
+
outputs=[rem_bg_image],
|
300 |
+
).success(
|
301 |
+
fn=stage_2_i2v, inputs=[rem_bg_image, imggen_SEED, imggen_STEP, save_folder],
|
302 |
+
outputs=[views_image, cond_image, result_image],
|
303 |
+
).success(
|
304 |
+
fn=stage_3_v23, inputs=[views_image, cond_image, imggen_SEED, save_folder, imggen_max_faces, imggen_do_texture_mapping, imggen_do_render_gif],
|
305 |
+
outputs=[result_3dobj, result_3dglb],
|
306 |
+
).success(
|
307 |
+
fn=stage_4_gif, inputs=[result_3dglb, save_folder, imggen_do_render_gif],
|
308 |
+
outputs=[result_gif],
|
309 |
+
).success(lambda: print('Image_to_3D Done ...'))
|
310 |
+
|
311 |
+
#===============================================================
|
312 |
+
|
313 |
+
gr.Markdown(CONST_CITATION)
|
314 |
+
demo.queue(max_size=CONST_MAX_QUEUE)
|
315 |
+
demo.launch(server_name=CONST_SERVER, server_port=CONST_PORT)
|
316 |
+
|
app_hg.py
ADDED
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
2 |
+
# The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
3 |
+
|
4 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
5 |
+
# The below software and/or models in this distribution may have been
|
6 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
7 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
8 |
+
|
9 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
10 |
+
# except for the third-party components listed below.
|
11 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
12 |
+
# in the repsective licenses of these third-party components.
|
13 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
14 |
+
# components and must ensure that the usage of the third party components adheres to
|
15 |
+
# all relevant laws and regulations.
|
16 |
+
|
17 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
18 |
+
# their software and algorithms, including trained model weights, parameters (including
|
19 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
20 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
21 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
22 |
+
|
23 |
+
import os
|
24 |
+
import warnings
|
25 |
+
import argparse
|
26 |
+
import gradio as gr
|
27 |
+
from glob import glob
|
28 |
+
import shutil
|
29 |
+
import torch
|
30 |
+
import numpy as np
|
31 |
+
from PIL import Image
|
32 |
+
from einops import rearrange
|
33 |
+
|
34 |
+
from infer import seed_everything, save_gif
|
35 |
+
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
36 |
+
|
37 |
+
warnings.simplefilter('ignore', category=UserWarning)
|
38 |
+
warnings.simplefilter('ignore', category=FutureWarning)
|
39 |
+
warnings.simplefilter('ignore', category=DeprecationWarning)
|
40 |
+
|
41 |
+
parser = argparse.ArgumentParser()
|
42 |
+
parser.add_argument("--use_lite", default=False, action="store_true")
|
43 |
+
parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
|
44 |
+
parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
|
45 |
+
parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
|
46 |
+
parser.add_argument("--save_memory", default=False, action="store_true")
|
47 |
+
parser.add_argument("--device", default="cuda:0", type=str)
|
48 |
+
args = parser.parse_args()
|
49 |
+
|
50 |
+
################################################################
|
51 |
+
|
52 |
+
CONST_PORT = 8080
|
53 |
+
CONST_MAX_QUEUE = 1
|
54 |
+
CONST_SERVER = '0.0.0.0'
|
55 |
+
|
56 |
+
CONST_HEADER = '''
|
57 |
+
<h2><b>Official 🤗 Gradio Demo</b></h2><h2><a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'><b>Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D
|
58 |
+
Generationr</b></a></h2>
|
59 |
+
Code: <a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'>GitHub</a>. Techenical report: <a href='https://arxiv.org/abs/placeholder' target='_blank'>ArXiv</a>.
|
60 |
+
|
61 |
+
❗️❗️❗️**Important Notes:**
|
62 |
+
- By default, our demo can export a .obj mesh with vertex colors or a .glb mesh.
|
63 |
+
- If you select "texture mapping," it will export a .obj mesh with a texture map or a .glb mesh.
|
64 |
+
- If you select "render GIF," it will export a GIF image rendering of the .glb file.
|
65 |
+
- If the result is unsatisfactory, please try a different seed value (Default: 0).
|
66 |
+
'''
|
67 |
+
|
68 |
+
CONST_CITATION = r"""
|
69 |
+
If HunYuan3D-1 is helpful, please help to ⭐ the <a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'>Github Repo</a>. Thanks! [![GitHub Stars](https://img.shields.io/github/stars/tencent/Hunyuan3D-1?style=social)](https://github.com/tencent/Hunyuan3D-1)
|
70 |
+
---
|
71 |
+
📝 **Citation**
|
72 |
+
If you find our work useful for your research or applications, please cite using this bibtex:
|
73 |
+
```bibtex
|
74 |
+
@misc{yang2024tencent,
|
75 |
+
title={Tencent Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation},
|
76 |
+
author={Xianghui Yang and Huiwen Shi and Bowen Zhang and Fan Yang and Jiacheng Wang and Hongxu Zhao and Xinhai Liu and Xinzhou Wang and Qingxiang Lin and Jiaao Yu and Lifu Wang and Zhuo Chen and Sicong Liu and Yuhong Liu and Yong Yang and Di Wang and Jie Jiang and Chunchao Guo},
|
77 |
+
year={2024},
|
78 |
+
eprint={2411.02293},
|
79 |
+
archivePrefix={arXiv},
|
80 |
+
primaryClass={cs.CV}
|
81 |
+
}
|
82 |
+
```
|
83 |
+
"""
|
84 |
+
|
85 |
+
################################################################
|
86 |
+
|
87 |
+
def get_example_img_list():
|
88 |
+
print('Loading example img list ...')
|
89 |
+
return sorted(glob('./demos/example_*.png'))
|
90 |
+
|
91 |
+
def get_example_txt_list():
|
92 |
+
print('Loading example txt list ...')
|
93 |
+
txt_list = list()
|
94 |
+
for line in open('./demos/example_list.txt'):
|
95 |
+
txt_list.append(line.strip())
|
96 |
+
return txt_list
|
97 |
+
|
98 |
+
example_is = get_example_img_list()
|
99 |
+
example_ts = get_example_txt_list()
|
100 |
+
################################################################
|
101 |
+
|
102 |
+
worker_xbg = Removebg()
|
103 |
+
print(f"loading {args.text2image_path}")
|
104 |
+
worker_t2i = Text2Image(
|
105 |
+
pretrain = args.text2image_path,
|
106 |
+
device = args.device,
|
107 |
+
save_memory = args.save_memory
|
108 |
+
)
|
109 |
+
worker_i2v = Image2Views(
|
110 |
+
use_lite = args.use_lite,
|
111 |
+
device = args.device,
|
112 |
+
save_memory = args.save_memory
|
113 |
+
)
|
114 |
+
worker_v23 = Views2Mesh(
|
115 |
+
args.mv23d_cfg_path,
|
116 |
+
args.mv23d_ckt_path,
|
117 |
+
use_lite = args.use_lite,
|
118 |
+
device = args.device,
|
119 |
+
save_memory = args.save_memory
|
120 |
+
)
|
121 |
+
worker_gif = GifRenderer(args.device)
|
122 |
+
|
123 |
+
def stage_0_t2i(text, image, seed, step):
|
124 |
+
os.makedirs('./outputs/app_output', exist_ok=True)
|
125 |
+
exists = set(int(_) for _ in os.listdir('./outputs/app_output') if not _.startswith("."))
|
126 |
+
if len(exists) == 30: shutil.rmtree(f"./outputs/app_output/0");cur_id = 0
|
127 |
+
else: cur_id = min(set(range(30)) - exists)
|
128 |
+
if os.path.exists(f"./outputs/app_output/{(cur_id + 1) % 30}"):
|
129 |
+
shutil.rmtree(f"./outputs/app_output/{(cur_id + 1) % 30}")
|
130 |
+
save_folder = f'./outputs/app_output/{cur_id}'
|
131 |
+
os.makedirs(save_folder, exist_ok=True)
|
132 |
+
|
133 |
+
dst = save_folder + '/img.png'
|
134 |
+
|
135 |
+
if not text:
|
136 |
+
if image is None:
|
137 |
+
return dst, save_folder
|
138 |
+
raise gr.Error("Upload image or provide text ...")
|
139 |
+
image.save(dst)
|
140 |
+
return dst, save_folder
|
141 |
+
|
142 |
+
image = worker_t2i(text, seed, step)
|
143 |
+
image.save(dst)
|
144 |
+
dst = worker_xbg(image, save_folder)
|
145 |
+
return dst, save_folder
|
146 |
+
|
147 |
+
def stage_1_xbg(image, save_folder):
|
148 |
+
if isinstance(image, str):
|
149 |
+
image = Image.open(image)
|
150 |
+
dst = save_folder + '/img_nobg.png'
|
151 |
+
rgba = worker_xbg(image)
|
152 |
+
rgba.save(dst)
|
153 |
+
return dst
|
154 |
+
|
155 |
+
def stage_2_i2v(image, seed, step, save_folder):
|
156 |
+
if isinstance(image, str):
|
157 |
+
image = Image.open(image)
|
158 |
+
gif_dst = save_folder + '/views.gif'
|
159 |
+
res_img, pils = worker_i2v(image, seed, step)
|
160 |
+
save_gif(pils, gif_dst)
|
161 |
+
views_img, cond_img = res_img[0], res_img[1]
|
162 |
+
img_array = np.asarray(views_img, dtype=np.uint8)
|
163 |
+
show_img = rearrange(img_array, '(n h) (m w) c -> (n m) h w c', n=3, m=2)
|
164 |
+
show_img = show_img[worker_i2v.order, ...]
|
165 |
+
show_img = rearrange(show_img, '(n m) h w c -> (n h) (m w) c', n=2, m=3)
|
166 |
+
show_img = Image.fromarray(show_img)
|
167 |
+
return views_img, cond_img, show_img
|
168 |
+
|
169 |
+
def stage_3_v23(
|
170 |
+
views_pil,
|
171 |
+
cond_pil,
|
172 |
+
seed,
|
173 |
+
save_folder,
|
174 |
+
target_face_count = 30000,
|
175 |
+
do_texture_mapping = True,
|
176 |
+
do_render =True
|
177 |
+
):
|
178 |
+
do_texture_mapping = do_texture_mapping or do_render
|
179 |
+
obj_dst = save_folder + '/mesh_with_colors.obj'
|
180 |
+
glb_dst = save_folder + '/mesh.glb'
|
181 |
+
worker_v23(
|
182 |
+
views_pil,
|
183 |
+
cond_pil,
|
184 |
+
seed = seed,
|
185 |
+
save_folder = save_folder,
|
186 |
+
target_face_count = target_face_count,
|
187 |
+
do_texture_mapping = do_texture_mapping
|
188 |
+
)
|
189 |
+
return obj_dst, glb_dst
|
190 |
+
|
191 |
+
def stage_4_gif(obj_dst, save_folder, do_render_gif=True):
|
192 |
+
if not do_render_gif: return None
|
193 |
+
gif_dst = save_folder + '/output.gif'
|
194 |
+
worker_gif(
|
195 |
+
save_folder + '/mesh.obj',
|
196 |
+
gif_dst_path = gif_dst
|
197 |
+
)
|
198 |
+
return gif_dst
|
199 |
+
|
200 |
+
#===============================================================
|
201 |
+
with gr.Blocks() as demo:
|
202 |
+
gr.Markdown(CONST_HEADER)
|
203 |
+
with gr.Row(variant="panel"):
|
204 |
+
with gr.Column(scale=2):
|
205 |
+
with gr.Tab("Text to 3D"):
|
206 |
+
with gr.Column():
|
207 |
+
text = gr.TextArea('一只黑白相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。', lines=1, max_lines=10, label='Input text')
|
208 |
+
with gr.Row():
|
209 |
+
textgen_seed = gr.Number(value=0, label="T2I seed", precision=0)
|
210 |
+
textgen_step = gr.Number(value=25, label="T2I step", precision=0)
|
211 |
+
textgen_SEED = gr.Number(value=0, label="Gen seed", precision=0)
|
212 |
+
textgen_STEP = gr.Number(value=50, label="Gen step", precision=0)
|
213 |
+
textgen_max_faces = gr.Number(value=90000, label="max number of faces", precision=0)
|
214 |
+
|
215 |
+
with gr.Row():
|
216 |
+
textgen_do_texture_mapping = gr.Checkbox(label="texture mapping", value=False, interactive=True)
|
217 |
+
textgen_do_render_gif = gr.Checkbox(label="Render gif", value=False, interactive=True)
|
218 |
+
textgen_submit = gr.Button("Generate", variant="primary")
|
219 |
+
|
220 |
+
with gr.Row():
|
221 |
+
gr.Examples(examples=example_ts, inputs=[text], label="Txt examples", examples_per_page=10)
|
222 |
+
|
223 |
+
with gr.Tab("Image to 3D"):
|
224 |
+
with gr.Column():
|
225 |
+
input_image = gr.Image(label="Input image",
|
226 |
+
width=256, height=256, type="pil",
|
227 |
+
image_mode="RGBA", sources="upload",
|
228 |
+
interactive=True)
|
229 |
+
with gr.Row():
|
230 |
+
imggen_SEED = gr.Number(value=0, label="Gen seed", precision=0)
|
231 |
+
imggen_STEP = gr.Number(value=50, label="Gen step", precision=0)
|
232 |
+
imggen_max_faces = gr.Number(value=90000, label="max number of faces", precision=0)
|
233 |
+
|
234 |
+
with gr.Row():
|
235 |
+
imggen_do_texture_mapping = gr.Checkbox(label="texture mapping", value=False, interactive=True)
|
236 |
+
imggen_do_render_gif = gr.Checkbox(label="Render gif", value=False, interactive=True)
|
237 |
+
imggen_submit = gr.Button("Generate", variant="primary")
|
238 |
+
with gr.Row():
|
239 |
+
gr.Examples(examples=example_is, inputs=[input_image], label="Img examples", examples_per_page=10)
|
240 |
+
|
241 |
+
with gr.Column(scale=3):
|
242 |
+
with gr.Row():
|
243 |
+
with gr.Column(scale=2):
|
244 |
+
rem_bg_image = gr.Image(label="No backgraound image", type="pil",
|
245 |
+
image_mode="RGBA", interactive=False)
|
246 |
+
with gr.Column(scale=3):
|
247 |
+
result_image = gr.Image(label="Multi views", type="pil", interactive=False)
|
248 |
+
|
249 |
+
with gr.Row():
|
250 |
+
result_3dobj = gr.Model3D(
|
251 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
252 |
+
label="Output Obj",
|
253 |
+
show_label=True,
|
254 |
+
visible=True,
|
255 |
+
camera_position=[90, 90, None],
|
256 |
+
interactive=False
|
257 |
+
)
|
258 |
+
|
259 |
+
result_3dglb = gr.Model3D(
|
260 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
261 |
+
label="Output Glb",
|
262 |
+
show_label=True,
|
263 |
+
visible=True,
|
264 |
+
camera_position=[90, 90, None],
|
265 |
+
interactive=False
|
266 |
+
)
|
267 |
+
result_gif = gr.Image(label="Rendered GIF", interactive=False)
|
268 |
+
|
269 |
+
with gr.Row():
|
270 |
+
gr.Markdown("The glb file displayed on the grario will be dark. We recommend downloading and opening it with 3D software, such as Blender, MeshLab, etc")
|
271 |
+
|
272 |
+
#===============================================================
|
273 |
+
|
274 |
+
none = gr.State(None)
|
275 |
+
save_folder = gr.State()
|
276 |
+
cond_image = gr.State()
|
277 |
+
views_image = gr.State()
|
278 |
+
text_image = gr.State()
|
279 |
+
|
280 |
+
textgen_submit.click(
|
281 |
+
fn=stage_0_t2i, inputs=[text, none, textgen_seed, textgen_step],
|
282 |
+
outputs=[rem_bg_image, save_folder],
|
283 |
+
).success(
|
284 |
+
fn=stage_2_i2v, inputs=[rem_bg_image, textgen_SEED, textgen_STEP, save_folder],
|
285 |
+
outputs=[views_image, cond_image, result_image],
|
286 |
+
).success(
|
287 |
+
fn=stage_3_v23, inputs=[views_image, cond_image, textgen_SEED, save_folder, textgen_max_faces, textgen_do_texture_mapping, textgen_do_render_gif],
|
288 |
+
outputs=[result_3dobj, result_3dglb],
|
289 |
+
).success(
|
290 |
+
fn=stage_4_gif, inputs=[result_3dglb, save_folder, textgen_do_render_gif],
|
291 |
+
outputs=[result_gif],
|
292 |
+
).success(lambda: print('Text_to_3D Done ...'))
|
293 |
+
|
294 |
+
imggen_submit.click(
|
295 |
+
fn=stage_0_t2i, inputs=[none, input_image, textgen_seed, textgen_step],
|
296 |
+
outputs=[text_image, save_folder],
|
297 |
+
).success(
|
298 |
+
fn=stage_1_xbg, inputs=[text_image, save_folder],
|
299 |
+
outputs=[rem_bg_image],
|
300 |
+
).success(
|
301 |
+
fn=stage_2_i2v, inputs=[rem_bg_image, imggen_SEED, imggen_STEP, save_folder],
|
302 |
+
outputs=[views_image, cond_image, result_image],
|
303 |
+
).success(
|
304 |
+
fn=stage_3_v23, inputs=[views_image, cond_image, imggen_SEED, save_folder, imggen_max_faces, imggen_do_texture_mapping, imggen_do_render_gif],
|
305 |
+
outputs=[result_3dobj, result_3dglb],
|
306 |
+
).success(
|
307 |
+
fn=stage_4_gif, inputs=[result_3dglb, save_folder, imggen_do_render_gif],
|
308 |
+
outputs=[result_gif],
|
309 |
+
).success(lambda: print('Image_to_3D Done ...'))
|
310 |
+
|
311 |
+
#===============================================================
|
312 |
+
|
313 |
+
gr.Markdown(CONST_CITATION)
|
314 |
+
demo.queue(max_size=CONST_MAX_QUEUE)
|
315 |
+
demo.launch()
|
316 |
+
|
assets/logo.png
ADDED
assets/overview_3.png
ADDED
assets/radar.png
ADDED
assets/runtime.png
ADDED
assets/teaser.png
ADDED
Git LFS Details
|
demos/example_000.png
ADDED
demos/example_001.png
ADDED
demos/example_002.png
ADDED
demos/example_003.png
ADDED
Git LFS Details
|
demos/example_list.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
一片绿色的树叶在白色背景上居中展现,清晰的纹理
|
2 |
+
一只棕白相间的仓鼠,站在白色背景前。照片采用居中构图方式,卡通风格
|
3 |
+
一盆绿色植物生长在红色花盆中,居中,写实
|
4 |
+
a pot of green plants grows in a red flower pot.
|
5 |
+
a lovely rabbit eating carrots
|
env_install.sh
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pip3 install diffusers transformers
|
2 |
+
pip3 install rembg tqdm omegaconf matplotlib opencv-python imageio jaxtyping einops
|
3 |
+
pip3 install SentencePiece accelerate trimesh PyMCubes xatlas libigl ninja gradio
|
4 |
+
pip3 install git+https://github.com/facebookresearch/pytorch3d@stable
|
5 |
+
pip3 install git+https://github.com/NVlabs/nvdiffrast
|
6 |
+
pip3 install open3d
|
infer/__init__.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
2 |
+
# The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
3 |
+
|
4 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
5 |
+
# The below software and/or models in this distribution may have been
|
6 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
7 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
8 |
+
|
9 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
10 |
+
# except for the third-party components listed below.
|
11 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
12 |
+
# in the repsective licenses of these third-party components.
|
13 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
14 |
+
# components and must ensure that the usage of the third party components adheres to
|
15 |
+
# all relevant laws and regulations.
|
16 |
+
|
17 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
18 |
+
# their software and algorithms, including trained model weights, parameters (including
|
19 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
20 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
21 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
22 |
+
|
23 |
+
from .removebg import Removebg
|
24 |
+
from .text_to_image import Text2Image
|
25 |
+
from .image_to_views import Image2Views, save_gif
|
26 |
+
from .views_to_mesh import Views2Mesh
|
27 |
+
from .gif_render import GifRenderer
|
28 |
+
|
29 |
+
from .utils import seed_everything, auto_amp_inference
|
30 |
+
from .utils import get_parameter_number, set_parameter_grad_false
|
infer/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (734 Bytes). View file
|
|
infer/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (552 Bytes). View file
|
|
infer/__pycache__/__init__.cpython-38.pyc
ADDED
Binary file (586 Bytes). View file
|
|
infer/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (586 Bytes). View file
|
|
infer/__pycache__/gif_render.cpython-311.pyc
ADDED
Binary file (2.7 kB). View file
|
|
infer/__pycache__/gif_render.cpython-38.pyc
ADDED
Binary file (1.66 kB). View file
|
|
infer/__pycache__/gif_render.cpython-39.pyc
ADDED
Binary file (1.66 kB). View file
|
|
infer/__pycache__/image_to_views.cpython-311.pyc
ADDED
Binary file (7.11 kB). View file
|
|
infer/__pycache__/image_to_views.cpython-38.pyc
ADDED
Binary file (3.8 kB). View file
|
|
infer/__pycache__/image_to_views.cpython-39.pyc
ADDED
Binary file (3.6 kB). View file
|
|
infer/__pycache__/rembg.cpython-38.pyc
ADDED
Binary file (1.13 kB). View file
|
|
infer/__pycache__/rembg.cpython-39.pyc
ADDED
Binary file (1.13 kB). View file
|
|
infer/__pycache__/removebg.cpython-311.pyc
ADDED
Binary file (5.99 kB). View file
|
|
infer/__pycache__/removebg.cpython-38.pyc
ADDED
Binary file (3.2 kB). View file
|
|
infer/__pycache__/removebg.cpython-39.pyc
ADDED
Binary file (1.77 kB). View file
|
|
infer/__pycache__/text_to_image.cpython-311.pyc
ADDED
Binary file (5.83 kB). View file
|
|
infer/__pycache__/text_to_image.cpython-38.pyc
ADDED
Binary file (3.4 kB). View file
|
|
infer/__pycache__/text_to_image.cpython-39.pyc
ADDED
Binary file (3.37 kB). View file
|
|
infer/__pycache__/utils.cpython-311.pyc
ADDED
Binary file (4.34 kB). View file
|
|
infer/__pycache__/utils.cpython-312.pyc
ADDED
Binary file (3.46 kB). View file
|
|
infer/__pycache__/utils.cpython-38.pyc
ADDED
Binary file (2.67 kB). View file
|
|
infer/__pycache__/utils.cpython-39.pyc
ADDED
Binary file (2.41 kB). View file
|
|
infer/__pycache__/views_to_mesh.cpython-311.pyc
ADDED
Binary file (7.7 kB). View file
|
|
infer/__pycache__/views_to_mesh.cpython-38.pyc
ADDED
Binary file (4.24 kB). View file
|
|
infer/__pycache__/views_to_mesh.cpython-39.pyc
ADDED
Binary file (3.82 kB). View file
|
|
infer/gif_render.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
2 |
+
# The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
3 |
+
|
4 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
5 |
+
# The below software and/or models in this distribution may have been
|
6 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
7 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
8 |
+
|
9 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
10 |
+
# except for the third-party components listed below.
|
11 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
12 |
+
# in the repsective licenses of these third-party components.
|
13 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
14 |
+
# components and must ensure that the usage of the third party components adheres to
|
15 |
+
# all relevant laws and regulations.
|
16 |
+
|
17 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
18 |
+
# their software and algorithms, including trained model weights, parameters (including
|
19 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
20 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
21 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
22 |
+
|
23 |
+
import os, sys
|
24 |
+
sys.path.insert(0, f"{os.path.dirname(os.path.dirname(os.path.abspath(__file__)))}")
|
25 |
+
|
26 |
+
from svrm.ldm.vis_util import render
|
27 |
+
from infer.utils import seed_everything, timing_decorator
|
28 |
+
|
29 |
+
class GifRenderer():
|
30 |
+
'''
|
31 |
+
render frame(s) of mesh using pytorch3d
|
32 |
+
'''
|
33 |
+
def __init__(self, device="cuda:0"):
|
34 |
+
self.device = device
|
35 |
+
|
36 |
+
@timing_decorator("gif render")
|
37 |
+
def __call__(
|
38 |
+
self,
|
39 |
+
obj_filename,
|
40 |
+
elev=0,
|
41 |
+
azim=0,
|
42 |
+
resolution=512,
|
43 |
+
gif_dst_path='',
|
44 |
+
n_views=120,
|
45 |
+
fps=30,
|
46 |
+
rgb=True
|
47 |
+
):
|
48 |
+
render(
|
49 |
+
obj_filename,
|
50 |
+
elev=elev,
|
51 |
+
azim=azim,
|
52 |
+
resolution=resolution,
|
53 |
+
gif_dst_path=gif_dst_path,
|
54 |
+
n_views=n_views,
|
55 |
+
fps=fps,
|
56 |
+
device=self.device,
|
57 |
+
rgb=rgb
|
58 |
+
)
|
59 |
+
|
60 |
+
if __name__ == "__main__":
|
61 |
+
import argparse
|
62 |
+
|
63 |
+
def get_args():
|
64 |
+
parser = argparse.ArgumentParser()
|
65 |
+
parser.add_argument("--mesh_path", type=str, required=True)
|
66 |
+
parser.add_argument("--output_gif_path", type=str, required=True)
|
67 |
+
parser.add_argument("--device", default="cuda:0", type=str)
|
68 |
+
return parser.parse_args()
|
69 |
+
|
70 |
+
args = get_args()
|
71 |
+
|
72 |
+
gif_renderer = GifRenderer(device=args.device)
|
73 |
+
|
74 |
+
gif_renderer(
|
75 |
+
args.mesh_path,
|
76 |
+
gif_dst_path = args.output_gif_path
|
77 |
+
)
|
infer/image_to_views.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
2 |
+
# The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
3 |
+
|
4 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
5 |
+
# The below software and/or models in this distribution may have been
|
6 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
7 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
8 |
+
|
9 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
10 |
+
# except for the third-party components listed below.
|
11 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
12 |
+
# in the repsective licenses of these third-party components.
|
13 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
14 |
+
# components and must ensure that the usage of the third party components adheres to
|
15 |
+
# all relevant laws and regulations.
|
16 |
+
|
17 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
18 |
+
# their software and algorithms, including trained model weights, parameters (including
|
19 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
20 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
21 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
22 |
+
|
23 |
+
import os, sys
|
24 |
+
sys.path.insert(0, f"{os.path.dirname(os.path.dirname(os.path.abspath(__file__)))}")
|
25 |
+
|
26 |
+
import time
|
27 |
+
import torch
|
28 |
+
import random
|
29 |
+
import numpy as np
|
30 |
+
from PIL import Image
|
31 |
+
from einops import rearrange
|
32 |
+
from PIL import Image, ImageSequence
|
33 |
+
|
34 |
+
from infer.utils import seed_everything, timing_decorator, auto_amp_inference
|
35 |
+
from infer.utils import get_parameter_number, set_parameter_grad_false, str_to_bool
|
36 |
+
from mvd.hunyuan3d_mvd_std_pipeline import HunYuan3D_MVD_Std_Pipeline
|
37 |
+
from mvd.hunyuan3d_mvd_lite_pipeline import Hunyuan3d_MVD_Lite_Pipeline
|
38 |
+
|
39 |
+
|
40 |
+
def save_gif(pils, save_path, df=False):
|
41 |
+
# save a list of PIL.Image to gif
|
42 |
+
spf = 4000 / len(pils)
|
43 |
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
44 |
+
pils[0].save(save_path, format="GIF", save_all=True, append_images=pils[1:], duration=spf, loop=0)
|
45 |
+
return save_path
|
46 |
+
|
47 |
+
|
48 |
+
class Image2Views():
|
49 |
+
def __init__(self, device="cuda:0", use_lite=False, save_memory=False):
|
50 |
+
self.device = device
|
51 |
+
if use_lite:
|
52 |
+
self.pipe = Hunyuan3d_MVD_Lite_Pipeline.from_pretrained(
|
53 |
+
"./weights/mvd_lite",
|
54 |
+
torch_dtype = torch.float16,
|
55 |
+
use_safetensors = True,
|
56 |
+
)
|
57 |
+
else:
|
58 |
+
self.pipe = HunYuan3D_MVD_Std_Pipeline.from_pretrained(
|
59 |
+
"./weights/mvd_std",
|
60 |
+
torch_dtype = torch.float16,
|
61 |
+
use_safetensors = True,
|
62 |
+
)
|
63 |
+
self.pipe = self.pipe.to(device)
|
64 |
+
self.order = [0, 1, 2, 3, 4, 5] if use_lite else [0, 2, 4, 5, 3, 1]
|
65 |
+
self.save_memory = save_memory
|
66 |
+
set_parameter_grad_false(self.pipe.unet)
|
67 |
+
print('image2views unet model', get_parameter_number(self.pipe.unet))
|
68 |
+
|
69 |
+
@torch.no_grad()
|
70 |
+
@timing_decorator("image to views")
|
71 |
+
@auto_amp_inference
|
72 |
+
def __call__(self, *args, **kwargs):
|
73 |
+
if self.save_memory:
|
74 |
+
self.pipe = self.pipe.to(self.device)
|
75 |
+
torch.cuda.empty_cache()
|
76 |
+
res = self.call(*args, **kwargs)
|
77 |
+
self.pipe = self.pipe.to("cpu")
|
78 |
+
else:
|
79 |
+
res = self.call(*args, **kwargs)
|
80 |
+
torch.cuda.empty_cache()
|
81 |
+
return res
|
82 |
+
|
83 |
+
def call(self, pil_img, seed=0, steps=50, guidance_scale=2.0):
|
84 |
+
seed_everything(seed)
|
85 |
+
generator = torch.Generator(device=self.device)
|
86 |
+
res_img = self.pipe(pil_img,
|
87 |
+
num_inference_steps=steps,
|
88 |
+
guidance_scale=guidance_scale,
|
89 |
+
generat=generator).images
|
90 |
+
show_image = rearrange(np.asarray(res_img[0], dtype=np.uint8), '(n h) (m w) c -> (n m) h w c', n=3, m=2)
|
91 |
+
pils = [res_img[1]]+[Image.fromarray(show_image[idx]) for idx in self.order]
|
92 |
+
torch.cuda.empty_cache()
|
93 |
+
return res_img, pils
|
94 |
+
|
95 |
+
|
96 |
+
if __name__ == "__main__":
|
97 |
+
import argparse
|
98 |
+
|
99 |
+
def get_args():
|
100 |
+
parser = argparse.ArgumentParser()
|
101 |
+
parser.add_argument("--rgba_path", type=str, required=True)
|
102 |
+
parser.add_argument("--output_views_path", type=str, required=True)
|
103 |
+
parser.add_argument("--output_cond_path", type=str, required=True)
|
104 |
+
parser.add_argument("--seed", default=0, type=int)
|
105 |
+
parser.add_argument("--steps", default=50, type=int)
|
106 |
+
parser.add_argument("--device", default="cuda:0", type=str)
|
107 |
+
parser.add_argument("--use_lite", default='false', type=str)
|
108 |
+
return parser.parse_args()
|
109 |
+
|
110 |
+
args = get_args()
|
111 |
+
|
112 |
+
args.use_lite = str_to_bool(args.use_lite)
|
113 |
+
|
114 |
+
rgba_pil = Image.open(args.rgba_path)
|
115 |
+
|
116 |
+
assert rgba_pil.mode == "RGBA", "rgba_pil must be RGBA mode"
|
117 |
+
|
118 |
+
model = Image2Views(device=args.device, use_lite=args.use_lite)
|
119 |
+
|
120 |
+
(views_pil, cond), _ = model(rgba_pil, seed=args.seed, steps=args.steps)
|
121 |
+
|
122 |
+
views_pil.save(args.output_views_path)
|
123 |
+
cond.save(args.output_cond_path)
|
124 |
+
|
infer/removebg.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, sys
|
2 |
+
sys.path.insert(0, f"{os.path.dirname(os.path.dirname(os.path.abspath(__file__)))}")
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from PIL import Image
|
6 |
+
from rembg import remove, new_session
|
7 |
+
from infer.utils import timing_decorator
|
8 |
+
|
9 |
+
class Removebg():
|
10 |
+
def __init__(self, name="u2net"):
|
11 |
+
self.session = new_session(name)
|
12 |
+
|
13 |
+
@timing_decorator("remove background")
|
14 |
+
def __call__(self, rgb_maybe, force=True):
|
15 |
+
'''
|
16 |
+
args:
|
17 |
+
rgb_maybe: PIL.Image, with RGB mode or RGBA mode
|
18 |
+
force: bool, if input is RGBA mode, covert to RGB then remove bg
|
19 |
+
return:
|
20 |
+
rgba_img: PIL.Image, with RGBA mode
|
21 |
+
'''
|
22 |
+
if rgb_maybe.mode == "RGBA":
|
23 |
+
if force:
|
24 |
+
rgb_maybe = rgb_maybe.convert("RGB")
|
25 |
+
rgba_img = remove(rgb_maybe, session=self.session)
|
26 |
+
else:
|
27 |
+
rgba_img = rgb_maybe
|
28 |
+
else:
|
29 |
+
rgba_img = remove(rgb_maybe, session=self.session)
|
30 |
+
|
31 |
+
rgba_img = white_out_background(rgba_img)
|
32 |
+
|
33 |
+
rgba_img = preprocess(rgba_img)
|
34 |
+
|
35 |
+
return rgba_img
|
36 |
+
|
37 |
+
|
38 |
+
def white_out_background(pil_img):
|
39 |
+
data = pil_img.getdata()
|
40 |
+
new_data = []
|
41 |
+
for r, g, b, a in data:
|
42 |
+
if a < 16: # background
|
43 |
+
new_data.append((255, 255, 255, 0)) # full white color
|
44 |
+
else:
|
45 |
+
is_white = (r>235) and (g>235) and (b>235)
|
46 |
+
new_r = 235 if is_white else r
|
47 |
+
new_g = 235 if is_white else g
|
48 |
+
new_b = 235 if is_white else b
|
49 |
+
new_data.append((new_r, new_g, new_b, a))
|
50 |
+
pil_img.putdata(new_data)
|
51 |
+
return pil_img
|
52 |
+
|
53 |
+
def preprocess(rgba_img, size=(512,512), ratio=1.15):
|
54 |
+
image = np.asarray(rgba_img)
|
55 |
+
rgb, alpha = image[:,:,:3] / 255., image[:,:,3:] / 255.
|
56 |
+
|
57 |
+
# crop
|
58 |
+
coords = np.nonzero(alpha > 0.1)
|
59 |
+
x_min, x_max = coords[0].min(), coords[0].max()
|
60 |
+
y_min, y_max = coords[1].min(), coords[1].max()
|
61 |
+
rgb = (rgb[x_min:x_max, y_min:y_max, :] * 255).astype("uint8")
|
62 |
+
alpha = (alpha[x_min:x_max, y_min:y_max, 0] * 255).astype("uint8")
|
63 |
+
|
64 |
+
# padding
|
65 |
+
h, w = rgb.shape[:2]
|
66 |
+
resize_side = int(max(h, w) * ratio)
|
67 |
+
pad_h, pad_w = resize_side - h, resize_side - w
|
68 |
+
start_h, start_w = pad_h // 2, pad_w // 2
|
69 |
+
new_rgb = np.ones((resize_side, resize_side, 3), dtype=np.uint8) * 255
|
70 |
+
new_alpha = np.zeros((resize_side, resize_side), dtype=np.uint8)
|
71 |
+
new_rgb[start_h:start_h + h, start_w:start_w + w] = rgb
|
72 |
+
new_alpha[start_h:start_h + h, start_w:start_w + w] = alpha
|
73 |
+
rgba_array = np.concatenate((new_rgb, new_alpha[:,:,None]), axis=-1)
|
74 |
+
|
75 |
+
rgba_image = Image.fromarray(rgba_array, 'RGBA')
|
76 |
+
rgba_image = rgba_image.resize(size)
|
77 |
+
return rgba_image
|
78 |
+
|
79 |
+
|
80 |
+
if __name__ == "__main__":
|
81 |
+
|
82 |
+
import argparse
|
83 |
+
|
84 |
+
def get_args():
|
85 |
+
parser = argparse.ArgumentParser()
|
86 |
+
parser.add_argument("--rgb_path", type=str, required=True)
|
87 |
+
parser.add_argument("--output_rgba_path", type=str, required=True)
|
88 |
+
parser.add_argument("--force", default=False, action="store_true")
|
89 |
+
return parser.parse_args()
|
90 |
+
|
91 |
+
args = get_args()
|
92 |
+
|
93 |
+
rgb_maybe = Image.open(args.rgb_path)
|
94 |
+
|
95 |
+
model = Removebg()
|
96 |
+
|
97 |
+
rgba_pil = model(rgb_maybe, args.force)
|
98 |
+
|
99 |
+
rgba_pil.save(args.output_rgba_path)
|
100 |
+
|
101 |
+
|
infer/text_to_image.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
2 |
+
# The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
3 |
+
|
4 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
5 |
+
# The below software and/or models in this distribution may have been
|
6 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
7 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
8 |
+
|
9 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
10 |
+
# except for the third-party components listed below.
|
11 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
12 |
+
# in the repsective licenses of these third-party components.
|
13 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
14 |
+
# components and must ensure that the usage of the third party components adheres to
|
15 |
+
# all relevant laws and regulations.
|
16 |
+
|
17 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
18 |
+
# their software and algorithms, including trained model weights, parameters (including
|
19 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
20 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
21 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
22 |
+
import os , sys
|
23 |
+
sys.path.insert(0, f"{os.path.dirname(os.path.dirname(os.path.abspath(__file__)))}")
|
24 |
+
|
25 |
+
import torch
|
26 |
+
from diffusers import HunyuanDiTPipeline, AutoPipelineForText2Image
|
27 |
+
|
28 |
+
from infer.utils import seed_everything, timing_decorator, auto_amp_inference
|
29 |
+
from infer.utils import get_parameter_number, set_parameter_grad_false
|
30 |
+
|
31 |
+
|
32 |
+
class Text2Image():
|
33 |
+
def __init__(self, pretrain="weights/hunyuanDiT", device="cuda:0", save_memory=None):
|
34 |
+
'''
|
35 |
+
save_memory: if GPU memory is low, can set it
|
36 |
+
'''
|
37 |
+
self.save_memory = save_memory
|
38 |
+
self.device = device
|
39 |
+
self.pipe = AutoPipelineForText2Image.from_pretrained(
|
40 |
+
pretrain,
|
41 |
+
torch_dtype = torch.float16,
|
42 |
+
enable_pag = True,
|
43 |
+
pag_applied_layers = ["blocks.(16|17|18|19)"]
|
44 |
+
)
|
45 |
+
set_parameter_grad_false(self.pipe.transformer)
|
46 |
+
print('text2image transformer model', get_parameter_number(self.pipe.transformer))
|
47 |
+
if not save_memory:
|
48 |
+
self.pipe = self.pipe.to(device)
|
49 |
+
self.neg_txt = "文本,特写,裁剪,出框,最差质量,低质量,JPEG伪影,PGLY,重复,病态,残缺,多余的手指,变异的手," \
|
50 |
+
"画得不好的手,画得不好的脸,变异,畸形,模糊,脱水,糟糕的解剖学,糟糕的比例,多余的肢体,克隆的脸," \
|
51 |
+
"毁容,恶心的比例,畸形的肢体,缺失的手臂,缺失的腿,额外的手臂,额外的腿,融合的手指,手指太多,长脖子"
|
52 |
+
|
53 |
+
@torch.no_grad()
|
54 |
+
@timing_decorator('text to image')
|
55 |
+
@auto_amp_inference
|
56 |
+
def __call__(self, *args, **kwargs):
|
57 |
+
if self.save_memory:
|
58 |
+
self.pipe = self.pipe.to(self.device)
|
59 |
+
torch.cuda.empty_cache()
|
60 |
+
res = self.call(*args, **kwargs)
|
61 |
+
self.pipe = self.pipe.to("cpu")
|
62 |
+
else:
|
63 |
+
res = self.call(*args, **kwargs)
|
64 |
+
torch.cuda.empty_cache()
|
65 |
+
return res
|
66 |
+
|
67 |
+
def call(self, prompt, seed=0, steps=25):
|
68 |
+
'''
|
69 |
+
args:
|
70 |
+
prompr: str
|
71 |
+
seed: int
|
72 |
+
steps: int
|
73 |
+
return:
|
74 |
+
rgb: PIL.Image
|
75 |
+
'''
|
76 |
+
print("prompt is:", prompt)
|
77 |
+
prompt = prompt + ",白色背景,3D风格,最佳质量"
|
78 |
+
seed_everything(seed)
|
79 |
+
generator = torch.Generator(device=self.device)
|
80 |
+
if seed is not None: generator = generator.manual_seed(int(seed))
|
81 |
+
rgb = self.pipe(prompt=prompt, negative_prompt=self.neg_txt, num_inference_steps=steps,
|
82 |
+
pag_scale=1.3, width=1024, height=1024, generator=generator, return_dict=False)[0][0]
|
83 |
+
torch.cuda.empty_cache()
|
84 |
+
return rgb
|
85 |
+
|
86 |
+
if __name__ == "__main__":
|
87 |
+
import argparse
|
88 |
+
|
89 |
+
def get_args():
|
90 |
+
parser = argparse.ArgumentParser()
|
91 |
+
parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
|
92 |
+
parser.add_argument("--text_prompt", default="", type=str)
|
93 |
+
parser.add_argument("--output_img_path", default="./outputs/test/img.jpg", type=str)
|
94 |
+
parser.add_argument("--device", default="cuda:0", type=str)
|
95 |
+
parser.add_argument("--seed", default=0, type=int)
|
96 |
+
parser.add_argument("--steps", default=25, type=int)
|
97 |
+
return parser.parse_args()
|
98 |
+
args = get_args()
|
99 |
+
|
100 |
+
text2image_model = Text2Image(device=args.device)
|
101 |
+
rgb_img = text2image_model(args.text_prompt, seed=args.seed, steps=args.steps)
|
102 |
+
rgb_img.save(args.output_img_path)
|
103 |
+
|
infer/utils.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
2 |
+
# The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
3 |
+
|
4 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
5 |
+
# The below software and/or models in this distribution may have been
|
6 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
7 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
8 |
+
|
9 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
10 |
+
# except for the third-party components listed below.
|
11 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
12 |
+
# in the repsective licenses of these third-party components.
|
13 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
14 |
+
# components and must ensure that the usage of the third party components adheres to
|
15 |
+
# all relevant laws and regulations.
|
16 |
+
|
17 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
18 |
+
# their software and algorithms, including trained model weights, parameters (including
|
19 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
20 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
21 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
22 |
+
|
23 |
+
import os
|
24 |
+
import time
|
25 |
+
import random
|
26 |
+
import numpy as np
|
27 |
+
import torch
|
28 |
+
from torch.cuda.amp import autocast, GradScaler
|
29 |
+
from functools import wraps
|
30 |
+
|
31 |
+
def seed_everything(seed):
|
32 |
+
'''
|
33 |
+
seed everthing
|
34 |
+
'''
|
35 |
+
random.seed(seed)
|
36 |
+
np.random.seed(seed)
|
37 |
+
torch.manual_seed(seed)
|
38 |
+
os.environ["PL_GLOBAL_SEED"] = str(seed)
|
39 |
+
|
40 |
+
def timing_decorator(category: str):
|
41 |
+
'''
|
42 |
+
timing_decorator: record time
|
43 |
+
'''
|
44 |
+
def decorator(func):
|
45 |
+
func.call_count = 0
|
46 |
+
@wraps(func)
|
47 |
+
def wrapper(*args, **kwargs):
|
48 |
+
start_time = time.time()
|
49 |
+
result = func(*args, **kwargs)
|
50 |
+
end_time = time.time()
|
51 |
+
elapsed_time = end_time - start_time
|
52 |
+
func.call_count += 1
|
53 |
+
print(f"[HunYuan3D]-[{category}], cost time: {elapsed_time:.4f}s") # huiwen
|
54 |
+
return result
|
55 |
+
return wrapper
|
56 |
+
return decorator
|
57 |
+
|
58 |
+
def auto_amp_inference(func):
|
59 |
+
'''
|
60 |
+
with torch.cuda.amp.autocast()"
|
61 |
+
xxx
|
62 |
+
'''
|
63 |
+
@wraps(func)
|
64 |
+
def wrapper(*args, **kwargs):
|
65 |
+
with autocast():
|
66 |
+
output = func(*args, **kwargs)
|
67 |
+
return output
|
68 |
+
return wrapper
|
69 |
+
|
70 |
+
def get_parameter_number(model):
|
71 |
+
total_num = sum(p.numel() for p in model.parameters())
|
72 |
+
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
73 |
+
return {'Total': total_num, 'Trainable': trainable_num}
|
74 |
+
|
75 |
+
def set_parameter_grad_false(model):
|
76 |
+
for p in model.parameters():
|
77 |
+
p.requires_grad = False
|
78 |
+
|
79 |
+
def str_to_bool(s):
|
80 |
+
if s.lower() in ['true', 't', 'yes', 'y', '1']:
|
81 |
+
return True
|
82 |
+
elif s.lower() in ['false', 'f', 'no', 'n', '0']:
|
83 |
+
return False
|
84 |
+
else:
|
85 |
+
raise f"bool arg must one of ['true', 't', 'yes', 'y', '1', 'false', 'f', 'no', 'n', '0']"
|
infer/views_to_mesh.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
2 |
+
# The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
3 |
+
|
4 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
5 |
+
# The below software and/or models in this distribution may have been
|
6 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
7 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
8 |
+
|
9 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
10 |
+
# except for the third-party components listed below.
|
11 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
12 |
+
# in the repsective licenses of these third-party components.
|
13 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
14 |
+
# components and must ensure that the usage of the third party components adheres to
|
15 |
+
# all relevant laws and regulations.
|
16 |
+
|
17 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
18 |
+
# their software and algorithms, including trained model weights, parameters (including
|
19 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
20 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
21 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
22 |
+
|
23 |
+
import os, sys
|
24 |
+
sys.path.insert(0, f"{os.path.dirname(os.path.dirname(os.path.abspath(__file__)))}")
|
25 |
+
|
26 |
+
import time
|
27 |
+
import torch
|
28 |
+
import random
|
29 |
+
import numpy as np
|
30 |
+
from PIL import Image
|
31 |
+
from einops import rearrange
|
32 |
+
from PIL import Image, ImageSequence
|
33 |
+
|
34 |
+
from infer.utils import seed_everything, timing_decorator, auto_amp_inference
|
35 |
+
from infer.utils import get_parameter_number, set_parameter_grad_false, str_to_bool
|
36 |
+
from svrm.predictor import MV23DPredictor
|
37 |
+
|
38 |
+
|
39 |
+
class Views2Mesh():
|
40 |
+
def __init__(self, mv23d_cfg_path, mv23d_ckt_path,
|
41 |
+
device="cuda:0", use_lite=False, save_memory=False):
|
42 |
+
'''
|
43 |
+
mv23d_cfg_path: config yaml file
|
44 |
+
mv23d_ckt_path: path to ckpt
|
45 |
+
use_lite: lite version
|
46 |
+
save_memory: cpu auto
|
47 |
+
'''
|
48 |
+
self.mv23d_predictor = MV23DPredictor(mv23d_ckt_path, mv23d_cfg_path, device=device)
|
49 |
+
self.mv23d_predictor.model.eval()
|
50 |
+
self.order = [0, 1, 2, 3, 4, 5] if use_lite else [0, 2, 4, 5, 3, 1]
|
51 |
+
self.device = device
|
52 |
+
self.save_memory = save_memory
|
53 |
+
set_parameter_grad_false(self.mv23d_predictor.model)
|
54 |
+
print('view2mesh model', get_parameter_number(self.mv23d_predictor.model))
|
55 |
+
|
56 |
+
@torch.no_grad()
|
57 |
+
@timing_decorator("views to mesh")
|
58 |
+
@auto_amp_inference
|
59 |
+
def __call__(self, *args, **kwargs):
|
60 |
+
if self.save_memory:
|
61 |
+
self.mv23d_predictor.model = self.mv23d_predictor.model.to(self.device)
|
62 |
+
torch.cuda.empty_cache()
|
63 |
+
res = self.call(*args, **kwargs)
|
64 |
+
self.mv23d_predictor.model = self.mv23d_predictor.model.to("cpu")
|
65 |
+
else:
|
66 |
+
res = self.call(*args, **kwargs)
|
67 |
+
torch.cuda.empty_cache()
|
68 |
+
return res
|
69 |
+
|
70 |
+
def call(
|
71 |
+
self,
|
72 |
+
views_pil=None,
|
73 |
+
cond_pil=None,
|
74 |
+
gif_pil=None,
|
75 |
+
seed=0,
|
76 |
+
target_face_count = 10000,
|
77 |
+
do_texture_mapping = True,
|
78 |
+
save_folder='./outputs/test'
|
79 |
+
):
|
80 |
+
'''
|
81 |
+
can set views_pil, cond_pil simutaously or set gif_pil only
|
82 |
+
seed: int
|
83 |
+
target_face_count: int
|
84 |
+
save_folder: path to save mesh files
|
85 |
+
'''
|
86 |
+
save_dir = save_folder
|
87 |
+
os.makedirs(save_dir, exist_ok=True)
|
88 |
+
|
89 |
+
if views_pil is not None and cond_pil is not None:
|
90 |
+
show_image = rearrange(np.asarray(views_pil, dtype=np.uint8),
|
91 |
+
'(n h) (m w) c -> (n m) h w c', n=3, m=2)
|
92 |
+
views = [Image.fromarray(show_image[idx]) for idx in self.order]
|
93 |
+
image_list = [cond_pil]+ views
|
94 |
+
image_list = [img.convert('RGB') for img in image_list]
|
95 |
+
elif gif_pil is not None:
|
96 |
+
image_list = [img.convert('RGB') for img in ImageSequence.Iterator(gif_pil)]
|
97 |
+
|
98 |
+
image_input = image_list[0]
|
99 |
+
image_list = image_list[1:] + image_list[:1]
|
100 |
+
|
101 |
+
seed_everything(seed)
|
102 |
+
self.mv23d_predictor.predict(
|
103 |
+
image_list,
|
104 |
+
save_dir = save_dir,
|
105 |
+
image_input = image_input,
|
106 |
+
target_face_count = target_face_count,
|
107 |
+
do_texture_mapping = do_texture_mapping
|
108 |
+
)
|
109 |
+
torch.cuda.empty_cache()
|
110 |
+
return save_dir
|
111 |
+
|
112 |
+
|
113 |
+
if __name__ == "__main__":
|
114 |
+
|
115 |
+
import argparse
|
116 |
+
|
117 |
+
def get_args():
|
118 |
+
parser = argparse.ArgumentParser()
|
119 |
+
parser.add_argument("--views_path", type=str, required=True)
|
120 |
+
parser.add_argument("--cond_path", type=str, required=True)
|
121 |
+
parser.add_argument("--save_folder", default="./outputs/test/", type=str)
|
122 |
+
parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
|
123 |
+
parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
|
124 |
+
parser.add_argument("--max_faces_num", default=90000, type=int,
|
125 |
+
help="max num of face, suggest 90000 for effect, 10000 for speed")
|
126 |
+
parser.add_argument("--device", default="cuda:0", type=str)
|
127 |
+
parser.add_argument("--use_lite", default='false', type=str)
|
128 |
+
parser.add_argument("--do_texture_mapping", default='false', type=str)
|
129 |
+
|
130 |
+
return parser.parse_args()
|
131 |
+
|
132 |
+
args = get_args()
|
133 |
+
args.use_lite = str_to_bool(args.use_lite)
|
134 |
+
args.do_texture_mapping = str_to_bool(args.do_texture_mapping)
|
135 |
+
|
136 |
+
views = Image.open(args.views_path)
|
137 |
+
cond = Image.open(args.cond_path)
|
138 |
+
|
139 |
+
views_to_mesh_model = Views2Mesh(
|
140 |
+
args.mv23d_cfg_path,
|
141 |
+
args.mv23d_ckt_path,
|
142 |
+
device = args.device,
|
143 |
+
use_lite = args.use_lite
|
144 |
+
)
|
145 |
+
|
146 |
+
views_to_mesh_model(
|
147 |
+
views, cond, 0,
|
148 |
+
target_face_count = args.max_faces_num,
|
149 |
+
save_folder = args.save_folder,
|
150 |
+
do_texture_mapping = args.do_texture_mapping
|
151 |
+
)
|
152 |
+
|
main.py
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0 and Other Licenses of the Third-Party Components therein:
|
2 |
+
# The below Model in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
3 |
+
|
4 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
5 |
+
# The below software and/or models in this distribution may have been
|
6 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
7 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
8 |
+
|
9 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
10 |
+
# except for the third-party components listed below.
|
11 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
12 |
+
# in the repsective licenses of these third-party components.
|
13 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
14 |
+
# components and must ensure that the usage of the third party components adheres to
|
15 |
+
# all relevant laws and regulations.
|
16 |
+
|
17 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
18 |
+
# their software and algorithms, including trained model weights, parameters (including
|
19 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
20 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
21 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.l
|
22 |
+
|
23 |
+
import os
|
24 |
+
import warnings
|
25 |
+
import torch
|
26 |
+
from PIL import Image
|
27 |
+
import argparse
|
28 |
+
|
29 |
+
from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
|
30 |
+
|
31 |
+
warnings.simplefilter('ignore', category=UserWarning)
|
32 |
+
warnings.simplefilter('ignore', category=FutureWarning)
|
33 |
+
warnings.simplefilter('ignore', category=DeprecationWarning)
|
34 |
+
|
35 |
+
def get_args():
|
36 |
+
parser = argparse.ArgumentParser()
|
37 |
+
parser.add_argument(
|
38 |
+
"--use_lite", default=False, action="store_true"
|
39 |
+
)
|
40 |
+
parser.add_argument(
|
41 |
+
"--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str
|
42 |
+
)
|
43 |
+
parser.add_argument(
|
44 |
+
"--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str
|
45 |
+
)
|
46 |
+
parser.add_argument(
|
47 |
+
"--text2image_path", default="weights/hunyuanDiT", type=str
|
48 |
+
)
|
49 |
+
parser.add_argument(
|
50 |
+
"--save_folder", default="./outputs/test/", type=str
|
51 |
+
)
|
52 |
+
parser.add_argument(
|
53 |
+
"--text_prompt", default="", type=str,
|
54 |
+
)
|
55 |
+
parser.add_argument(
|
56 |
+
"--image_prompt", default="", type=str
|
57 |
+
)
|
58 |
+
parser.add_argument(
|
59 |
+
"--device", default="cuda:0", type=str
|
60 |
+
)
|
61 |
+
parser.add_argument(
|
62 |
+
"--t2i_seed", default=0, type=int
|
63 |
+
)
|
64 |
+
parser.add_argument(
|
65 |
+
"--t2i_steps", default=25, type=int
|
66 |
+
)
|
67 |
+
parser.add_argument(
|
68 |
+
"--gen_seed", default=0, type=int
|
69 |
+
)
|
70 |
+
parser.add_argument(
|
71 |
+
"--gen_steps", default=50, type=int
|
72 |
+
)
|
73 |
+
parser.add_argument(
|
74 |
+
"--max_faces_num", default=80000, type=int,
|
75 |
+
help="max num of face, suggest 80000 for effect, 10000 for speed"
|
76 |
+
)
|
77 |
+
parser.add_argument(
|
78 |
+
"--save_memory", default=False, action="store_true"
|
79 |
+
)
|
80 |
+
parser.add_argument(
|
81 |
+
"--do_texture_mapping", default=False, action="store_true"
|
82 |
+
)
|
83 |
+
parser.add_argument(
|
84 |
+
"--do_render", default=False, action="store_true"
|
85 |
+
)
|
86 |
+
return parser.parse_args()
|
87 |
+
|
88 |
+
|
89 |
+
if __name__ == "__main__":
|
90 |
+
args = get_args()
|
91 |
+
|
92 |
+
assert not (args.text_prompt and args.image_prompt), "Text and image can only be given to one"
|
93 |
+
assert args.text_prompt or args.image_prompt, "Text and image can only be given to one"
|
94 |
+
|
95 |
+
# init model
|
96 |
+
rembg_model = Removebg()
|
97 |
+
image_to_views_model = Image2Views(
|
98 |
+
device=args.device,
|
99 |
+
use_lite=args.use_lite,
|
100 |
+
save_memory=args.save_memory
|
101 |
+
)
|
102 |
+
|
103 |
+
views_to_mesh_model = Views2Mesh(
|
104 |
+
args.mv23d_cfg_path,
|
105 |
+
args.mv23d_ckt_path,
|
106 |
+
args.device,
|
107 |
+
use_lite=args.use_lite,
|
108 |
+
save_memory=args.save_memory
|
109 |
+
)
|
110 |
+
|
111 |
+
if args.text_prompt:
|
112 |
+
text_to_image_model = Text2Image(
|
113 |
+
pretrain = args.text2image_path,
|
114 |
+
device = args.device,
|
115 |
+
save_memory = args.save_memory
|
116 |
+
)
|
117 |
+
if args.do_render:
|
118 |
+
gif_renderer = GifRenderer(device=args.device)
|
119 |
+
|
120 |
+
# ---- ----- ---- ---- ---- ----
|
121 |
+
|
122 |
+
os.makedirs(args.save_folder, exist_ok=True)
|
123 |
+
|
124 |
+
# stage 1, text to image
|
125 |
+
if args.text_prompt:
|
126 |
+
res_rgb_pil = text_to_image_model(
|
127 |
+
args.text_prompt,
|
128 |
+
seed=args.t2i_seed,
|
129 |
+
steps=args.t2i_steps
|
130 |
+
)
|
131 |
+
res_rgb_pil.save(os.path.join(args.save_folder, "img.jpg"))
|
132 |
+
elif args.image_prompt:
|
133 |
+
res_rgb_pil = Image.open(args.image_prompt)
|
134 |
+
|
135 |
+
# stage 2, remove back ground
|
136 |
+
res_rgba_pil = rembg_model(res_rgb_pil)
|
137 |
+
res_rgb_pil.save(os.path.join(args.save_folder, "img_nobg.png"))
|
138 |
+
|
139 |
+
# stage 3, image to views
|
140 |
+
(views_grid_pil, cond_img), view_pil_list = image_to_views_model(
|
141 |
+
res_rgba_pil,
|
142 |
+
seed = args.gen_seed,
|
143 |
+
steps = args.gen_steps
|
144 |
+
)
|
145 |
+
views_grid_pil.save(os.path.join(args.save_folder, "views.jpg"))
|
146 |
+
|
147 |
+
# stage 4, views to mesh
|
148 |
+
views_to_mesh_model(
|
149 |
+
views_grid_pil,
|
150 |
+
cond_img,
|
151 |
+
seed = args.gen_seed,
|
152 |
+
target_face_count = args.max_faces_num,
|
153 |
+
save_folder = args.save_folder,
|
154 |
+
do_texture_mapping = args.do_texture_mapping
|
155 |
+
)
|
156 |
+
|
157 |
+
# stage 5, render gif
|
158 |
+
if args.do_render:
|
159 |
+
gif_renderer(
|
160 |
+
os.path.join(args.save_folder, 'mesh.obj'),
|
161 |
+
gif_dst_path = os.path.join(args.save_folder, 'output.gif'),
|
162 |
+
)
|
mvd/__init__.py
ADDED
File without changes
|