toshas commited on
Commit
a45988a
1 Parent(s): 249df4a

initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +28 -0
  2. .gradio/cached_examples/examples_video/Generated Depth Video/4af7db82fa84bd9e40e4/horse_depth_colored.mp4 +3 -0
  3. .gradio/cached_examples/examples_video/Generated Depth Video/4e1933c53ce6c8430349/walking_depth_colored.mp4 +3 -0
  4. .gradio/cached_examples/examples_video/Generated Depth Video/5f3610b4ebaa6b9eb736/gokart_depth_colored.mp4 +3 -0
  5. .gradio/cached_examples/examples_video/Preprocessed video/15b5cb7fff54245476a1/horse_depth_input.mp4 +3 -0
  6. .gradio/cached_examples/examples_video/Preprocessed video/70d664d4051498fb4a7e/walking_depth_input.mp4 +3 -0
  7. .gradio/cached_examples/examples_video/Preprocessed video/7f8ff10ad824469dfd1e/gokart_depth_input.mp4 +3 -0
  8. .gradio/cached_examples/examples_video/log.csv +4 -0
  9. LICENSE.txt +177 -0
  10. README.md +5 -5
  11. app.py +271 -0
  12. colorize.py +93 -0
  13. files/gokart.mp4 +3 -0
  14. files/horse.mp4 +3 -0
  15. files/walking.mp4 +3 -0
  16. gradio_patches/examples.py +13 -0
  17. requirements.txt +15 -0
  18. rollingdepth_src/.gitignore +28 -0
  19. rollingdepth_src/LICENSE-MODEL.txt +69 -0
  20. rollingdepth_src/LICENSE.txt +177 -0
  21. rollingdepth_src/README.md +127 -0
  22. rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/bug-report.yml +110 -0
  23. rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/config.yml +4 -0
  24. rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/feature_request.md +20 -0
  25. rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/feedback.md +12 -0
  26. rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/new-model-addition.yml +31 -0
  27. rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/translate.md +29 -0
  28. rollingdepth_src/diffusers/.github/PULL_REQUEST_TEMPLATE.md +61 -0
  29. rollingdepth_src/diffusers/.github/actions/setup-miniconda/action.yml +146 -0
  30. rollingdepth_src/diffusers/.github/workflows/benchmark.yml +66 -0
  31. rollingdepth_src/diffusers/.github/workflows/build_docker_images.yml +103 -0
  32. rollingdepth_src/diffusers/.github/workflows/build_documentation.yml +27 -0
  33. rollingdepth_src/diffusers/.github/workflows/build_pr_documentation.yml +23 -0
  34. rollingdepth_src/diffusers/.github/workflows/mirror_community_pipeline.yml +102 -0
  35. rollingdepth_src/diffusers/.github/workflows/nightly_tests.yml +353 -0
  36. rollingdepth_src/diffusers/.github/workflows/notify_slack_about_release.yml +23 -0
  37. rollingdepth_src/diffusers/.github/workflows/pr_dependency_test.yml +35 -0
  38. rollingdepth_src/diffusers/.github/workflows/pr_flax_dependency_test.yml +38 -0
  39. rollingdepth_src/diffusers/.github/workflows/pr_test_fetcher.yml +177 -0
  40. rollingdepth_src/diffusers/.github/workflows/pr_test_peft_backend.yml +132 -0
  41. rollingdepth_src/diffusers/.github/workflows/pr_tests.yml +235 -0
  42. rollingdepth_src/diffusers/.github/workflows/pr_torch_dependency_test.yml +36 -0
  43. rollingdepth_src/diffusers/.github/workflows/push_tests.yml +387 -0
  44. rollingdepth_src/diffusers/.github/workflows/push_tests_fast.yml +125 -0
  45. rollingdepth_src/diffusers/.github/workflows/push_tests_mps.yml +75 -0
  46. rollingdepth_src/diffusers/.github/workflows/pypi_publish.yaml +81 -0
  47. rollingdepth_src/diffusers/.github/workflows/run_tests_from_a_pr.yml +74 -0
  48. rollingdepth_src/diffusers/.github/workflows/ssh-pr-runner.yml +40 -0
  49. rollingdepth_src/diffusers/.github/workflows/ssh-runner.yml +47 -0
  50. rollingdepth_src/diffusers/.github/workflows/stale.yml +27 -0
.gitignore ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # ignore these folders
3
+ /checkpoint
4
+ /checkpoint/*
5
+ /data
6
+ /data/*
7
+ /input
8
+ /input/*
9
+ /output
10
+ /output/*
11
+ /temp
12
+ /temp/*
13
+ /venv
14
+ /venv/*
15
+ /cache
16
+ /cache/*
17
+ /.slurm
18
+
19
+ **/.ipynb_checkpoints/
20
+ .idea
21
+
22
+ # ignore these types
23
+ *.pyc
24
+ *.out
25
+ *.log
26
+ *.mexa64
27
+ *.pdf
28
+ *.tar
.gradio/cached_examples/examples_video/Generated Depth Video/4af7db82fa84bd9e40e4/horse_depth_colored.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da04b50b2d7068a3d725f5522a2040806a379255a42f6b3253b4cb88db7484c9
3
+ size 766319
.gradio/cached_examples/examples_video/Generated Depth Video/4e1933c53ce6c8430349/walking_depth_colored.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2438cf73b0b10f27946c5e468a62ff0d6ffb98107da08fc9ee1f148cfb9f479f
3
+ size 753780
.gradio/cached_examples/examples_video/Generated Depth Video/5f3610b4ebaa6b9eb736/gokart_depth_colored.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74273c4d67bd9e1c2b3db1cdbf7f0110a7f174c379e3bf1347eb95150eb0a7e9
3
+ size 943825
.gradio/cached_examples/examples_video/Preprocessed video/15b5cb7fff54245476a1/horse_depth_input.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fba86e8a8688a4d700d177ce1668b31da653de0233aec371e49481c3b8c11d6f
3
+ size 769081
.gradio/cached_examples/examples_video/Preprocessed video/70d664d4051498fb4a7e/walking_depth_input.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b6c0ba12b48a68893ec44c6ba28ea649108a5e01f3ad3a43027a21b13f3c899
3
+ size 763862
.gradio/cached_examples/examples_video/Preprocessed video/7f8ff10ad824469dfd1e/gokart_depth_input.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2e5abd9c11980765eb1bb8d67fa325d55c9a560407729390a07491536936e2c
3
+ size 958935
.gradio/cached_examples/examples_video/log.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Preprocessed video,Generated Depth Video,timestamp
2
+ "{""video"": {""path"": "".gradio/cached_examples/examples_video/Preprocessed video/7f8ff10ad824469dfd1e/gokart_depth_input.mp4"", ""url"": ""/gradio_api/file=/private/var/folders/g1/9h6rntcj613_k_5kbhvmywsr0000gn/T/gradio/405a2dbac6ae1c2bc678d41f67c63dda9671314c045a1eb5ae2e1aeb4fd08550/gokart_depth_input.mp4"", ""size"": null, ""orig_name"": ""gokart_depth_input.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}","{""video"": {""path"": "".gradio/cached_examples/examples_video/Generated Depth Video/5f3610b4ebaa6b9eb736/gokart_depth_colored.mp4"", ""url"": ""/gradio_api/file=/private/var/folders/g1/9h6rntcj613_k_5kbhvmywsr0000gn/T/gradio/405a2dbac6ae1c2bc678d41f67c63dda9671314c045a1eb5ae2e1aeb4fd08550/gokart_depth_colored.mp4"", ""size"": null, ""orig_name"": ""gokart_depth_colored.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}",2024-12-01 22:25:30.959210
3
+ "{""video"": {""path"": "".gradio/cached_examples/examples_video/Preprocessed video/15b5cb7fff54245476a1/horse_depth_input.mp4"", ""url"": ""/gradio_api/file=/private/var/folders/g1/9h6rntcj613_k_5kbhvmywsr0000gn/T/gradio/405a2dbac6ae1c2bc678d41f67c63dda9671314c045a1eb5ae2e1aeb4fd08550/horse_depth_input.mp4"", ""size"": null, ""orig_name"": ""horse_depth_input.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}","{""video"": {""path"": "".gradio/cached_examples/examples_video/Generated Depth Video/4af7db82fa84bd9e40e4/horse_depth_colored.mp4"", ""url"": ""/gradio_api/file=/private/var/folders/g1/9h6rntcj613_k_5kbhvmywsr0000gn/T/gradio/405a2dbac6ae1c2bc678d41f67c63dda9671314c045a1eb5ae2e1aeb4fd08550/horse_depth_colored.mp4"", ""size"": null, ""orig_name"": ""horse_depth_colored.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}",2024-12-01 22:25:31.135801
4
+ "{""video"": {""path"": "".gradio/cached_examples/examples_video/Preprocessed video/70d664d4051498fb4a7e/walking_depth_input.mp4"", ""url"": ""/gradio_api/file=/private/var/folders/g1/9h6rntcj613_k_5kbhvmywsr0000gn/T/gradio/405a2dbac6ae1c2bc678d41f67c63dda9671314c045a1eb5ae2e1aeb4fd08550/walking_depth_input.mp4"", ""size"": null, ""orig_name"": ""walking_depth_input.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}","{""video"": {""path"": "".gradio/cached_examples/examples_video/Generated Depth Video/4e1933c53ce6c8430349/walking_depth_colored.mp4"", ""url"": ""/gradio_api/file=/private/var/folders/g1/9h6rntcj613_k_5kbhvmywsr0000gn/T/gradio/405a2dbac6ae1c2bc678d41f67c63dda9671314c045a1eb5ae2e1aeb4fd08550/walking_depth_colored.mp4"", ""size"": null, ""orig_name"": ""walking_depth_colored.mp4"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, ""subtitles"": null}",2024-12-01 22:25:31.313248
LICENSE.txt ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
README.md CHANGED
@@ -1,14 +1,14 @@
1
  ---
2
- title: Rollingdepth
3
- emoji: 🦀
4
- colorFrom: indigo
5
- colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.7.1
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
- short_description: 'Rolling Depth: Video Depth without Video Models'
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: RollingDepth
3
+ emoji: 🛹
4
+ colorFrom: yellow
5
+ colorTo: pink
6
  sdk: gradio
7
  sdk_version: 5.7.1
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
11
+ short_description: Video Depth without Video Models
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Anton Obukhov, ETH Zurich. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # --------------------------------------------------------------------------
15
+ # If you find this code useful, we kindly ask you to cite our paper in your work.
16
+ # Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
17
+ # More information about the method can be found at https://marigoldmonodepth.github.io
18
+ # --------------------------------------------------------------------------
19
+
20
+ import functools
21
+ import os
22
+ import sys
23
+ import tempfile
24
+
25
+ import av
26
+ import numpy as np
27
+
28
+ import spaces
29
+ import gradio as gr
30
+ import torch as torch
31
+ import einops
32
+
33
+ from huggingface_hub import login
34
+
35
+ from gradio_patches.examples import Examples
36
+ from colorize import colorize_depth_multi_thread
37
+ from video_io import get_video_fps, write_video_from_numpy
38
+
39
+ VERBOSE = False
40
+ MAX_FRAMES = 100
41
+
42
+
43
+ def process(pipe, device, path_input):
44
+ print(f"Processing {path_input}")
45
+
46
+ path_output_dir = tempfile.mkdtemp()
47
+ os.makedirs(path_output_dir, exist_ok=True)
48
+
49
+ name_base = os.path.splitext(os.path.basename(path_input))[0]
50
+ path_out_in = os.path.join(path_output_dir, f"{name_base}_depth_input.mp4")
51
+ path_out_vis = os.path.join(path_output_dir, f"{name_base}_depth_colored.mp4")
52
+
53
+ output_fps = int(get_video_fps(path_input))
54
+
55
+ container = av.open(path_input)
56
+ stream = container.streams.video[0]
57
+ fps = float(stream.average_rate)
58
+ duration_sec = float(stream.duration * stream.time_base) if stream.duration else 0
59
+ total_frames = int(duration_sec * fps)
60
+ if total_frames > MAX_FRAMES:
61
+ gr.Warning(
62
+ f"Only the first {MAX_FRAMES} frames (~{MAX_FRAMES / fps:.1f} sec.) will be processed for demonstration; "
63
+ f"use the code from GitHub for full processing"
64
+ )
65
+
66
+ generator = torch.Generator(device=device)
67
+ generator.manual_seed(2024)
68
+
69
+ pipe_out: RollingDepthOutput = pipe(
70
+ # input setting
71
+ input_video_path=path_input,
72
+ start_frame=0,
73
+ frame_count=min(MAX_FRAMES, total_frames), # 0 = all
74
+ processing_res=768,
75
+ # infer setting
76
+ dilations=[1, 25],
77
+ cap_dilation=True,
78
+ snippet_lengths=[3],
79
+ init_infer_steps=[1],
80
+ strides=[1],
81
+ coalign_kwargs=None,
82
+ refine_step=0, # 0 = off
83
+ max_vae_bs=8, # batch size for encoder/decoder
84
+ # other settings
85
+ generator=generator,
86
+ verbose=VERBOSE,
87
+ # output settings
88
+ restore_res=False,
89
+ unload_snippet=False,
90
+ )
91
+
92
+ depth_pred = pipe_out.depth_pred # [N 1 H W]
93
+
94
+ # Colorize results
95
+ cmap = "Spectral_r"
96
+ colored_np = colorize_depth_multi_thread(
97
+ depth=depth_pred.numpy(),
98
+ valid_mask=None,
99
+ chunk_size=4,
100
+ num_threads=4,
101
+ color_map=cmap,
102
+ verbose=VERBOSE,
103
+ ) # [n h w 3], in [0, 255]
104
+
105
+ write_video_from_numpy(
106
+ frames=colored_np,
107
+ output_path=path_out_vis,
108
+ fps=output_fps,
109
+ crf=23,
110
+ preset="medium",
111
+ verbose=VERBOSE,
112
+ )
113
+
114
+ # Save rgb
115
+ rgb = (pipe_out.input_rgb.numpy() * 255).astype(np.uint8) # [N 3 H W]
116
+ rgb = einops.rearrange(rgb, "n c h w -> n h w c")
117
+ write_video_from_numpy(
118
+ frames=rgb,
119
+ output_path=path_out_in,
120
+ fps=output_fps,
121
+ crf=23,
122
+ preset="medium",
123
+ verbose=VERBOSE,
124
+ )
125
+
126
+ return path_out_in, path_out_vis
127
+
128
+
129
+ def run_demo_server(pipe, device):
130
+ process_pipe = spaces.GPU(functools.partial(process, pipe, device))
131
+ os.environ["GRADIO_ALLOW_FLAGGING"] = "never"
132
+
133
+ with gr.Blocks(
134
+ analytics_enabled=False,
135
+ title="RollingDepth",
136
+ css="""
137
+ h1 {
138
+ text-align: center;
139
+ display: block;
140
+ }
141
+ h2 {
142
+ text-align: center;
143
+ display: block;
144
+ }
145
+ h3 {
146
+ text-align: center;
147
+ display: block;
148
+ }
149
+ """,
150
+ ) as demo:
151
+ gr.HTML(
152
+ """
153
+ <h1>🛹 RollingDepth: Video Depth without Video Models</h1>
154
+ <div style="text-align: center; margin-top: 20px;">
155
+ <a title="Website" href="https://rollingdepth.github.io" target="_blank" rel="noopener noreferrer" style="display: inline-block; margin-right: 4px;">
156
+ <img src="https://www.obukhov.ai/img/badges/badge-website.svg" alt="Website Badge">
157
+ </a>
158
+ <a title="arXiv" href="https://arxiv.org/abs/2411.xxxxx" target="_blank" rel="noopener noreferrer" style="display: inline-block; margin-right: 4px;">
159
+ <img src="https://www.obukhov.ai/img/badges/badge-pdf.svg" alt="arXiv Badge">
160
+ </a>
161
+ <a title="GitHub" href="https://github.com/prs-eth/rollingdepth" target="_blank" rel="noopener noreferrer" style="display: inline-block; margin-right: 4px;">
162
+ <img src="https://img.shields.io/github/stars/prs-eth/rollingdepth?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="GitHub Stars Badge">
163
+ </a>
164
+ <a title="Social" href="https://twitter.com/antonobukhov1" target="_blank" rel="noopener noreferrer" style="display: inline-block; margin-right: 4px;">
165
+ <img src="https://www.obukhov.ai/img/badges/badge-social.svg" alt="social">
166
+ </a>
167
+ </div>
168
+ <p style="margin-top: 20px; text-align: justify;">
169
+ RollingDepth is the state-of-the-art depth estimator for videos in the wild. Upload your video into the
170
+ <b>left</b> pane, or click any of the <b>examples</b> below. The result preview will be computed and
171
+ appear in the <b>right</b> panes. For full functionality, use the code on GitHub.
172
+ <b>TIP:</b> When running out of GPU time, fork the demo.
173
+ </p>
174
+ """
175
+ )
176
+
177
+ with gr.Row(equal_height=True):
178
+ with gr.Column(scale=1):
179
+ input_video = gr.Video(label="Input Video")
180
+ with gr.Column(scale=2):
181
+ with gr.Row(equal_height=True):
182
+ output_video_1 = gr.Video(
183
+ label="Preprocessed video",
184
+ interactive=False,
185
+ autoplay=True,
186
+ loop=True,
187
+ show_share_button=True,
188
+ scale=5,
189
+ )
190
+ output_video_2 = gr.Video(
191
+ label="Generated Depth Video",
192
+ interactive=False,
193
+ autoplay=True,
194
+ loop=True,
195
+ show_share_button=True,
196
+ scale=5,
197
+ )
198
+
199
+ with gr.Row(equal_height=True):
200
+ with gr.Column(scale=1):
201
+ with gr.Row(equal_height=False):
202
+ generate_btn = gr.Button("Generate")
203
+ with gr.Column(scale=2):
204
+ pass
205
+
206
+ Examples(
207
+ examples=[
208
+ ["files/gokart.mp4"],
209
+ ["files/horse.mp4"],
210
+ ["files/walking.mp4"],
211
+ ],
212
+ inputs=[input_video],
213
+ outputs=[output_video_1, output_video_2],
214
+ fn=process_pipe,
215
+ cache_examples=True,
216
+ directory_name="examples_video",
217
+ )
218
+
219
+ generate_btn.click(
220
+ fn=process_pipe,
221
+ inputs=[input_video],
222
+ outputs=[output_video_1, output_video_2],
223
+ )
224
+
225
+ demo.queue(
226
+ api_open=False,
227
+ ).launch(
228
+ server_name="0.0.0.0",
229
+ server_port=7860,
230
+ )
231
+
232
+
233
+ def main():
234
+ os.system("pip freeze")
235
+ os.system("pip uninstall -y diffusers")
236
+ os.system("pip install rollingdepth_src/diffusers")
237
+ os.system("pip freeze")
238
+
239
+ if "HF_TOKEN_LOGIN" in os.environ:
240
+ login(token=os.environ["HF_TOKEN_LOGIN"])
241
+
242
+ if torch.cuda.is_available():
243
+ device = torch.device("cuda")
244
+ elif torch.backends.mps.is_available():
245
+ device = torch.device("mps")
246
+ else:
247
+ device = torch.device("cpu")
248
+
249
+ sys.path.append(os.path.join(os.path.dirname(__file__), "rollingdepth_src"))
250
+ from rollingdepth import RollingDepthOutput, RollingDepthPipeline
251
+
252
+ pipe: RollingDepthPipeline = RollingDepthPipeline.from_pretrained(
253
+ "prs-eth/rollingdepth-v1-0",
254
+ torch_dtype=torch.float16,
255
+ )
256
+ pipe.set_progress_bar_config(disable=True)
257
+
258
+ try:
259
+ import xformers
260
+
261
+ pipe.enable_xformers_memory_efficient_attention()
262
+ except:
263
+ pass # run without xformers
264
+
265
+ pipe = pipe.to(device)
266
+
267
+ run_demo_server(pipe, device)
268
+
269
+
270
+ if __name__ == "__main__":
271
+ main()
colorize.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Bingxin Ke
2
+ # Last modified: 2024-11-25
3
+
4
+ import concurrent.futures
5
+ from typing import Union
6
+
7
+ import matplotlib
8
+ import numpy as np
9
+ from tqdm import tqdm
10
+
11
+
12
+ def colorize_depth(
13
+ depth: np.ndarray,
14
+ min_depth: float,
15
+ max_depth: float,
16
+ cmap: str = "Spectral_r",
17
+ valid_mask: Union[np.ndarray, None] = None,
18
+ ) -> np.ndarray:
19
+ assert len(depth.shape) >= 2, "Invalid dimension"
20
+
21
+ if depth.ndim < 3:
22
+ depth = depth[np.newaxis, :, :]
23
+
24
+ # colorize
25
+ cm = matplotlib.colormaps[cmap]
26
+ depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
27
+ img_colored_np = cm(depth, bytes=False)[:, :, :, 0:3] # value from 0 to 1
28
+
29
+ if valid_mask is not None:
30
+ valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]
31
+ if valid_mask.ndim < 3:
32
+ valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
33
+ else:
34
+ valid_mask = valid_mask[:, np.newaxis, :, :]
35
+ valid_mask = np.repeat(valid_mask, 3, axis=1)
36
+ img_colored_np[~valid_mask] = 0
37
+
38
+ return img_colored_np
39
+
40
+
41
+ def colorize_depth_multi_thread(
42
+ depth: np.ndarray,
43
+ valid_mask: Union[np.ndarray, None] = None,
44
+ chunk_size: int = 4,
45
+ num_threads: int = 4,
46
+ color_map: str = "Spectral",
47
+ verbose: bool = False,
48
+ ) -> np.ndarray:
49
+ depth = depth.squeeze(1)
50
+ assert 3 == depth.ndim
51
+
52
+ n_frame = depth.shape[0]
53
+
54
+ if valid_mask is None:
55
+ valid_depth = depth
56
+ else:
57
+ valid_depth = depth[valid_mask]
58
+ min_depth = valid_depth.min()
59
+ max_depth = valid_depth.max()
60
+
61
+ def process_chunk(chunk):
62
+ chunk = colorize_depth(
63
+ chunk, min_depth=min_depth, max_depth=max_depth, cmap=color_map
64
+ )
65
+ chunk = (chunk * 255).astype(np.uint8)
66
+ return chunk
67
+
68
+ # Pre-allocate the full array
69
+ colored = np.empty((*depth.shape[:3], 3), dtype=np.uint8)
70
+
71
+ with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
72
+ # Submit all tasks and store futures with their corresponding indices
73
+ future_to_index = {
74
+ executor.submit(process_chunk, depth[i : i + chunk_size]): i
75
+ for i in range(0, n_frame, chunk_size)
76
+ }
77
+
78
+ # Process futures in the order they were submitted
79
+ chunk_iterable = concurrent.futures.as_completed(future_to_index)
80
+ if verbose:
81
+ chunk_iterable = tqdm(
82
+ chunk_iterable,
83
+ desc=" colorizing",
84
+ leave=False,
85
+ total=len(future_to_index),
86
+ )
87
+ for future in chunk_iterable:
88
+ index = future_to_index[future]
89
+ start = index
90
+ end = min(index + chunk_size, n_frame)
91
+ result = future.result()
92
+ colored[start:end] = result
93
+ return colored
files/gokart.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fa3fb717be03639504cf7e4ffa08dcd0f620a178baddbf2b9f22f508af9ad9e
3
+ size 29193147
files/horse.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f62a7e2a330cb15c30e923cf434956485d7498164288d356eb189a75bd20b652
3
+ size 8611545
files/walking.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e6942c65e9e1fc4c21c75ba352f2d0cc12eb45e52c888429fda1c913d79ee0f
3
+ size 7886417
gradio_patches/examples.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ import gradio
4
+ from gradio.utils import get_cache_folder
5
+
6
+
7
+ class Examples(gradio.helpers.Examples):
8
+ def __init__(self, *args, directory_name=None, **kwargs):
9
+ super().__init__(*args, **kwargs, _initiated_directly=False)
10
+ if directory_name is not None:
11
+ self.cached_folder = get_cache_folder() / directory_name
12
+ self.cached_file = Path(self.cached_folder) / "log.csv"
13
+ self.create()
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch==2.4.1
2
+ torchvision==0.19.1
3
+ transformers>=4.32.1
4
+ xformers==0.0.28.post1
5
+ einops
6
+ matplotlib
7
+ pyav>=13.1.0
8
+ omegaconf
9
+
10
+ gradio==5.7.1
11
+ accelerate
12
+ safetensors
13
+ spaces
14
+ numpy
15
+ pillow
rollingdepth_src/.gitignore ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # ignore these folders
3
+ /checkpoint
4
+ /checkpoint/*
5
+ /data
6
+ /data/*
7
+ /input
8
+ /input/*
9
+ /output
10
+ /output/*
11
+ /temp
12
+ /temp/*
13
+ /venv
14
+ /venv/*
15
+ /cache
16
+ /cache/*
17
+ /.slurm
18
+
19
+ **/.ipynb_checkpoints/
20
+ .idea
21
+
22
+ # ignore these types
23
+ *.pyc
24
+ *.out
25
+ *.log
26
+ *.mexa64
27
+ *.pdf
28
+ *.tar
rollingdepth_src/LICENSE-MODEL.txt ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OpenRAIL++-M License
2
+
3
+ Copyright (c) 2024 PRS, ETH Zurich
4
+
5
+ Open RAIL++-M
6
+ dated November 22, 2024
7
+
8
+ (This license is based on the CreativeML Open RAIL-M license for stable diffusion.)
9
+
10
+ Section I: PREAMBLE
11
+
12
+ [describe reasoning for the license]
13
+
14
+ NOW THEREFORE, You and Licensor agree as follows:
15
+
16
+ 1. Definitions
17
+
18
+ - "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document.
19
+ - "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License.
20
+ - "Output" means the results of operating a Model as embodied in informational content resulting therefrom.
21
+ - "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material.
22
+ - "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model.
23
+ - "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any.
24
+ - "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access.
25
+ - "Licensor" means the copyright owner or entity authorized by the copyright owner that is granting the License, including the persons or entities that may have rights in the Model and/or distributing the Model.
26
+ - "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, image generator.
27
+ - "Third Parties" means individuals or legal entities that are not under common control with Licensor or You.
28
+ - "Contribution" means any work of authorship, including the original version of the Model and any modifications or additions to that Model or Derivatives of the Model thereof, that is intentionally submitted to Licensor for inclusion in the Model by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Model, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
29
+ - "Contributor" means Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Model.
30
+
31
+ Section II: INTELLECTUAL PROPERTY RIGHTS
32
+
33
+ Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III.
34
+
35
+ 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model.
36
+ 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Model to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material or a Contribution incorporated within the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or Work shall terminate as of the date such litigation is asserted or filed.
37
+
38
+ Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION
39
+
40
+ 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions:
41
+ Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material.
42
+ You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License;
43
+ You must cause any modified files to carry prominent notices stating that You changed the files;
44
+ You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model.
45
+ You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. - for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License.
46
+ 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5).
47
+ 6. The Output You Generate. Except as set forth herein, Licensor claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License.
48
+
49
+ Section IV: OTHER PROVISIONS
50
+
51
+ 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, Licensor reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License.
52
+ 8. Trademarks and related. Nothing in this License permits You to make use of Licensors’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors.
53
+ 9. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Model and the Complementary Material (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License.
54
+ 10. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
55
+ 11. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
56
+ 12. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein.
57
+
58
+ END OF TERMS AND CONDITIONS
59
+
60
+
61
+
62
+
63
+ Attachment A
64
+
65
+ Use Restrictions
66
+
67
+ You agree not to use the Model or Derivatives of the Model:
68
+
69
+ [insert use restrictions]
rollingdepth_src/LICENSE.txt ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
rollingdepth_src/README.md ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🛹 RollingDepth: Video Depth without Video Models
2
+
3
+ [![Website](doc/badges/badge-website.svg)](https://rollingdepth.github.io)
4
+ [![Hugging Face Model](https://img.shields.io/badge/🤗%20Hugging%20Face-Model-green)](https://huggingface.co/prs-eth/rollingdepth-v1-0)
5
+ <!-- [![arXiv](https://img.shields.io/badge/arXiv-PDF-b31b1b)]() -->
6
+
7
+ This repository represents the official implementation of the paper titled "Video Depth without Video Models".
8
+
9
+ [Bingxin Ke](http://www.kebingxin.com/)<sup>1</sup>,
10
+ [Dominik Narnhofer](https://scholar.google.com/citations?user=tFx8AhkAAAAJ&hl=en)<sup>1</sup>,
11
+ [Shengyu Huang](https://shengyuh.github.io/)<sup>1</sup>,
12
+ [Lei Ke](https://www.kelei.site/)<sup>2</sup>,
13
+ [Torben Peters](https://scholar.google.com/citations?user=F2C3I9EAAAAJ&hl=de)<sup>1</sup>,
14
+ [Katerina Fragkiadaki](https://www.cs.cmu.edu/~katef/)<sup>2</sup>,
15
+ [Anton Obukhov](https://www.obukhov.ai/)<sup>1</sup>,
16
+ [Konrad Schindler](https://scholar.google.com/citations?user=FZuNgqIAAAAJ&hl=en)<sup>1</sup>
17
+
18
+
19
+ <sup>1</sup>ETH Zurich,
20
+ <sup>2</sup>Carnegie Mellon University
21
+
22
+
23
+
24
+ ## 📢 News
25
+ 2024-11-28: Inference code is released.<br>
26
+
27
+
28
+
29
+ ## 🛠️ Setup
30
+ The inference code was tested on: Debian 12, Python 3.12.7 (venv), CUDA 12.4, GeForce RTX 3090
31
+
32
+ ### 📦 Repository
33
+ ```bash
34
+ git clone https://github.com/prs-eth/RollingDepth.git
35
+ cd RollingDepth
36
+ ```
37
+
38
+ ### 🐍 Python environment
39
+ Create python environment:
40
+ ```bash
41
+ # with venv
42
+ python -m venv venv/rollingdepth
43
+ source venv/rollingdepth/bin/activate
44
+
45
+ # or with conda
46
+ conda create --name rollingdepth python=3.12
47
+ conda activate rollingdepth
48
+ ```
49
+
50
+ ### 💻 Dependencies
51
+ Install dependicies:
52
+ ```bash
53
+ pip install -r requirements.txt
54
+
55
+ # Install modified diffusers with cross-frame self-attention
56
+ bash script/install_diffusers_dev.sh
57
+ ```
58
+ We use [pyav](https://github.com/PyAV-Org/PyAV) for video I/O, which relies on [ffmpeg](https://www.ffmpeg.org/).
59
+
60
+
61
+ ## 🏃 Test on your videos
62
+ All scripts are designed to run from the project root directory.
63
+
64
+ ### 📷 Prepare input videos
65
+ 1. Use sample videos:
66
+ ```bash
67
+ bash script/download_sample_data.sh
68
+ ```
69
+
70
+ 1. Or place your videos in a directory, for example, under `data/samples`.
71
+
72
+ ### 🚀 Run with presets
73
+ ```bash
74
+ python run_video.py \
75
+ -i data/samples \
76
+ -o output/samples_fast \
77
+ -p fast \
78
+ --save-npy true \
79
+ --verbose
80
+ ```
81
+ - `-p` or `--preset`: preset options
82
+ - `fast` for **fast inference**, with dilations [1, 25] (flexible), fp16, without refinement, at max. resolution 768.
83
+ - `fast1024` for **fast inference at resolution 1024**
84
+ - `full` for **better details**, with dilations [1, 10, 25] (flexible), fp16, with 10 refinement steps, at max. resolution 1024.
85
+ - `paper` for **reproducing paper numbers**, with (fixed) dilations [1, 10, 25], fp32, with 10 refinement steps, at max. resolution 768.
86
+ - `-i` or `--input-video`: path to input data, can be a single video file, a text file with video paths, or a directory of videos.
87
+ - `-o` or `--output-dir`: output directory.
88
+
89
+ Passing other arguments below may overwrite the preset settings:
90
+ - Coming soon
91
+ <!-- TODO: explain all arguments in detailed -->
92
+
93
+
94
+ ## ⬇ Checkpoint cache
95
+ By default, the [checkpoint](https://huggingface.co/prs-eth/rollingdepth-v1-0) is stored in the Hugging Face cache. The HF_HOME environment variable defines its location and can be overridden, e.g.:
96
+
97
+ ```
98
+ export HF_HOME=$(pwd)/cache
99
+ ```
100
+
101
+ Alternatively, use the following script to download the checkpoint weights locally and specify checkpoint path by `-c checkpoint/rollingdepth-v1-0 `
102
+
103
+ ```bash
104
+ bash script/download_weight.sh
105
+ ```
106
+
107
+
108
+ ## 🦿 Evaluation on test datasets
109
+ Coming soon
110
+
111
+
112
+ <!-- ## 🎓 Citation
113
+ TODO -->
114
+
115
+
116
+ ## 🙏 Acknowledgments
117
+ We thank Yue Pan, Shuchang Liu, Nando Metzger, and Nikolai Kalischek for fruitful discussions.
118
+
119
+ We are grateful to [redmond.ai](https://redmond.ai/) (robin@redmond.ai) for providing GPU resources.
120
+
121
+ ## 🎫 License
122
+
123
+ This code of this work is licensed under the Apache License, Version 2.0 (as defined in the [LICENSE](LICENSE.txt)).
124
+
125
+ The model is licensed under RAIL++-M License (as defined in the [LICENSE-MODEL](LICENSE-MODEL.txt))
126
+
127
+ By downloading and using the code and model you agree to the terms in [LICENSE](LICENSE.txt) and [LICENSE-MODEL](LICENSE-MODEL.txt) respectively.
rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/bug-report.yml ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "\U0001F41B Bug Report"
2
+ description: Report a bug on Diffusers
3
+ labels: [ "bug" ]
4
+ body:
5
+ - type: markdown
6
+ attributes:
7
+ value: |
8
+ Thanks a lot for taking the time to file this issue 🤗.
9
+ Issues do not only help to improve the library, but also publicly document common problems, questions, workflows for the whole community!
10
+ Thus, issues are of the same importance as pull requests when contributing to this library ❤️.
11
+ In order to make your issue as **useful for the community as possible**, let's try to stick to some simple guidelines:
12
+ - 1. Please try to be as precise and concise as possible.
13
+ *Give your issue a fitting title. Assume that someone which very limited knowledge of Diffusers can understand your issue. Add links to the source code, documentation other issues, pull requests etc...*
14
+ - 2. If your issue is about something not working, **always** provide a reproducible code snippet. The reader should be able to reproduce your issue by **only copy-pasting your code snippet into a Python shell**.
15
+ *The community cannot solve your issue if it cannot reproduce it. If your bug is related to training, add your training script and make everything needed to train public. Otherwise, just add a simple Python code snippet.*
16
+ - 3. Add the **minimum** amount of code / context that is needed to understand, reproduce your issue.
17
+ *Make the life of maintainers easy. `diffusers` is getting many issues every day. Make sure your issue is about one bug and one bug only. Make sure you add only the context, code needed to understand your issues - nothing more. Generally, every issue is a way of documenting this library, try to make it a good documentation entry.*
18
+ - 4. For issues related to community pipelines (i.e., the pipelines located in the `examples/community` folder), please tag the author of the pipeline in your issue thread as those pipelines are not maintained.
19
+ - type: markdown
20
+ attributes:
21
+ value: |
22
+ For more in-detail information on how to write good issues you can have a look [here](https://huggingface.co/course/chapter8/5?fw=pt).
23
+ - type: textarea
24
+ id: bug-description
25
+ attributes:
26
+ label: Describe the bug
27
+ description: A clear and concise description of what the bug is. If you intend to submit a pull request for this issue, tell us in the description. Thanks!
28
+ placeholder: Bug description
29
+ validations:
30
+ required: true
31
+ - type: textarea
32
+ id: reproduction
33
+ attributes:
34
+ label: Reproduction
35
+ description: Please provide a minimal reproducible code which we can copy/paste and reproduce the issue.
36
+ placeholder: Reproduction
37
+ validations:
38
+ required: true
39
+ - type: textarea
40
+ id: logs
41
+ attributes:
42
+ label: Logs
43
+ description: "Please include the Python logs if you can."
44
+ render: shell
45
+ - type: textarea
46
+ id: system-info
47
+ attributes:
48
+ label: System Info
49
+ description: Please share your system info with us. You can run the command `diffusers-cli env` and copy-paste its output below.
50
+ placeholder: Diffusers version, platform, Python version, ...
51
+ validations:
52
+ required: true
53
+ - type: textarea
54
+ id: who-can-help
55
+ attributes:
56
+ label: Who can help?
57
+ description: |
58
+ Your issue will be replied to more quickly if you can figure out the right person to tag with @.
59
+ If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
60
+
61
+ All issues are read by one of the core maintainers, so if you don't know who to tag, just leave this blank and
62
+ a core maintainer will ping the right person.
63
+
64
+ Please tag a maximum of 2 people.
65
+
66
+ Questions on DiffusionPipeline (Saving, Loading, From pretrained, ...): @sayakpaul @DN6
67
+
68
+ Questions on pipelines:
69
+ - Stable Diffusion @yiyixuxu @asomoza
70
+ - Stable Diffusion XL @yiyixuxu @sayakpaul @DN6
71
+ - Stable Diffusion 3: @yiyixuxu @sayakpaul @DN6 @asomoza
72
+ - Kandinsky @yiyixuxu
73
+ - ControlNet @sayakpaul @yiyixuxu @DN6
74
+ - T2I Adapter @sayakpaul @yiyixuxu @DN6
75
+ - IF @DN6
76
+ - Text-to-Video / Video-to-Video @DN6 @a-r-r-o-w
77
+ - Wuerstchen @DN6
78
+ - Other: @yiyixuxu @DN6
79
+ - Improving generation quality: @asomoza
80
+
81
+ Questions on models:
82
+ - UNet @DN6 @yiyixuxu @sayakpaul
83
+ - VAE @sayakpaul @DN6 @yiyixuxu
84
+ - Transformers/Attention @DN6 @yiyixuxu @sayakpaul
85
+
86
+ Questions on single file checkpoints: @DN6
87
+
88
+ Questions on Schedulers: @yiyixuxu
89
+
90
+ Questions on LoRA: @sayakpaul
91
+
92
+ Questions on Textual Inversion: @sayakpaul
93
+
94
+ Questions on Training:
95
+ - DreamBooth @sayakpaul
96
+ - Text-to-Image Fine-tuning @sayakpaul
97
+ - Textual Inversion @sayakpaul
98
+ - ControlNet @sayakpaul
99
+
100
+ Questions on Tests: @DN6 @sayakpaul @yiyixuxu
101
+
102
+ Questions on Documentation: @stevhliu
103
+
104
+ Questions on JAX- and MPS-related things: @pcuenca
105
+
106
+ Questions on audio pipelines: @sanchit-gandhi
107
+
108
+
109
+
110
+ placeholder: "@Username ..."
rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ contact_links:
2
+ - name: Questions / Discussions
3
+ url: https://github.com/huggingface/diffusers/discussions
4
+ about: General usage questions and community discussions
rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/feature_request.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "\U0001F680 Feature Request"
3
+ about: Suggest an idea for this project
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ **Is your feature request related to a problem? Please describe.**
11
+ A clear and concise description of what the problem is. Ex. I'm always frustrated when [...].
12
+
13
+ **Describe the solution you'd like.**
14
+ A clear and concise description of what you want to happen.
15
+
16
+ **Describe alternatives you've considered.**
17
+ A clear and concise description of any alternative solutions or features you've considered.
18
+
19
+ **Additional context.**
20
+ Add any other context or screenshots about the feature request here.
rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/feedback.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: "💬 Feedback about API Design"
3
+ about: Give feedback about the current API design
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ **What API design would you like to have changed or added to the library? Why?**
11
+
12
+ **What use case would this enable or better enable? Can you give us a code example?**
rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/new-model-addition.yml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "\U0001F31F New Model/Pipeline/Scheduler Addition"
2
+ description: Submit a proposal/request to implement a new diffusion model/pipeline/scheduler
3
+ labels: [ "New model/pipeline/scheduler" ]
4
+
5
+ body:
6
+ - type: textarea
7
+ id: description-request
8
+ validations:
9
+ required: true
10
+ attributes:
11
+ label: Model/Pipeline/Scheduler description
12
+ description: |
13
+ Put any and all important information relative to the model/pipeline/scheduler
14
+
15
+ - type: checkboxes
16
+ id: information-tasks
17
+ attributes:
18
+ label: Open source status
19
+ description: |
20
+ Please note that if the model implementation isn't available or if the weights aren't open-source, we are less likely to implement it in `diffusers`.
21
+ options:
22
+ - label: "The model implementation is available."
23
+ - label: "The model weights are available (Only relevant if addition is not a scheduler)."
24
+
25
+ - type: textarea
26
+ id: additional-info
27
+ attributes:
28
+ label: Provide useful links for the implementation
29
+ description: |
30
+ Please provide information regarding the implementation, the weights, and the authors.
31
+ Please mention the authors by @gh-username if you're aware of their usernames.
rollingdepth_src/diffusers/.github/ISSUE_TEMPLATE/translate.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: 🌐 Translating a New Language?
3
+ about: Start a new translation effort in your language
4
+ title: '[<languageCode>] Translating docs to <languageName>'
5
+ labels: WIP
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ <!--
11
+ Note: Please search to see if an issue already exists for the language you are trying to translate.
12
+ -->
13
+
14
+ Hi!
15
+
16
+ Let's bring the documentation to all the <languageName>-speaking community 🌐.
17
+
18
+ Who would want to translate? Please follow the 🤗 [TRANSLATING guide](https://github.com/huggingface/diffusers/blob/main/docs/TRANSLATING.md). Here is a list of the files ready for translation. Let us know in this issue if you'd like to translate any, and we'll add your name to the list.
19
+
20
+ Some notes:
21
+
22
+ * Please translate using an informal tone (imagine you are talking with a friend about Diffusers 🤗).
23
+ * Please translate in a gender-neutral way.
24
+ * Add your translations to the folder called `<languageCode>` inside the [source folder](https://github.com/huggingface/diffusers/tree/main/docs/source).
25
+ * Register your translation in `<languageCode>/_toctree.yml`; please follow the order of the [English version](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml).
26
+ * Once you're finished, open a pull request and tag this issue by including #issue-number in the description, where issue-number is the number of this issue. Please ping @stevhliu for review.
27
+ * 🙋 If you'd like others to help you with the translation, you can also post in the 🤗 [forums](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63).
28
+
29
+ Thank you so much for your help! 🤗
rollingdepth_src/diffusers/.github/PULL_REQUEST_TEMPLATE.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # What does this PR do?
2
+
3
+ <!--
4
+ Congratulations! You've made it this far! You're not quite done yet though.
5
+
6
+ Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution.
7
+
8
+ Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change.
9
+
10
+ Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost.
11
+ -->
12
+
13
+ <!-- Remove if not applicable -->
14
+
15
+ Fixes # (issue)
16
+
17
+
18
+ ## Before submitting
19
+ - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case).
20
+ - [ ] Did you read the [contributor guideline](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md)?
21
+ - [ ] Did you read our [philosophy doc](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md) (important for complex PRs)?
22
+ - [ ] Was this discussed/approved via a GitHub issue or the [forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63)? Please add a link to it if that's the case.
23
+ - [ ] Did you make sure to update the documentation with your changes? Here are the
24
+ [documentation guidelines](https://github.com/huggingface/diffusers/tree/main/docs), and
25
+ [here are tips on formatting docstrings](https://github.com/huggingface/diffusers/tree/main/docs#writing-source-documentation).
26
+ - [ ] Did you write any new necessary tests?
27
+
28
+
29
+ ## Who can review?
30
+
31
+ Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
32
+ members/contributors who may be interested in your PR.
33
+
34
+ <!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @.
35
+
36
+ If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
37
+ Please tag fewer than 3 people.
38
+
39
+ Core library:
40
+
41
+ - Schedulers: @yiyixuxu
42
+ - Pipelines and pipeline callbacks: @yiyixuxu and @asomoza
43
+ - Training examples: @sayakpaul
44
+ - Docs: @stevhliu and @sayakpaul
45
+ - JAX and MPS: @pcuenca
46
+ - Audio: @sanchit-gandhi
47
+ - General functionalities: @sayakpaul @yiyixuxu @DN6
48
+
49
+ Integrations:
50
+
51
+ - deepspeed: HF Trainer/Accelerate: @SunMarc
52
+ - PEFT: @sayakpaul @BenjaminBossan
53
+
54
+ HF projects:
55
+
56
+ - accelerate: [different repo](https://github.com/huggingface/accelerate)
57
+ - datasets: [different repo](https://github.com/huggingface/datasets)
58
+ - transformers: [different repo](https://github.com/huggingface/transformers)
59
+ - safetensors: [different repo](https://github.com/huggingface/safetensors)
60
+
61
+ -->
rollingdepth_src/diffusers/.github/actions/setup-miniconda/action.yml ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Set up conda environment for testing
2
+
3
+ description: Sets up miniconda in your ${RUNNER_TEMP} environment and gives you the ${CONDA_RUN} environment variable so you don't have to worry about polluting non-empeheral runners anymore
4
+
5
+ inputs:
6
+ python-version:
7
+ description: If set to any value, don't use sudo to clean the workspace
8
+ required: false
9
+ type: string
10
+ default: "3.9"
11
+ miniconda-version:
12
+ description: Miniconda version to install
13
+ required: false
14
+ type: string
15
+ default: "4.12.0"
16
+ environment-file:
17
+ description: Environment file to install dependencies from
18
+ required: false
19
+ type: string
20
+ default: ""
21
+
22
+ runs:
23
+ using: composite
24
+ steps:
25
+ # Use the same trick from https://github.com/marketplace/actions/setup-miniconda
26
+ # to refresh the cache daily. This is kind of optional though
27
+ - name: Get date
28
+ id: get-date
29
+ shell: bash
30
+ run: echo "today=$(/bin/date -u '+%Y%m%d')d" >> $GITHUB_OUTPUT
31
+ - name: Setup miniconda cache
32
+ id: miniconda-cache
33
+ uses: actions/cache@v2
34
+ with:
35
+ path: ${{ runner.temp }}/miniconda
36
+ key: miniconda-${{ runner.os }}-${{ runner.arch }}-${{ inputs.python-version }}-${{ steps.get-date.outputs.today }}
37
+ - name: Install miniconda (${{ inputs.miniconda-version }})
38
+ if: steps.miniconda-cache.outputs.cache-hit != 'true'
39
+ env:
40
+ MINICONDA_VERSION: ${{ inputs.miniconda-version }}
41
+ shell: bash -l {0}
42
+ run: |
43
+ MINICONDA_INSTALL_PATH="${RUNNER_TEMP}/miniconda"
44
+ mkdir -p "${MINICONDA_INSTALL_PATH}"
45
+ case ${RUNNER_OS}-${RUNNER_ARCH} in
46
+ Linux-X64)
47
+ MINICONDA_ARCH="Linux-x86_64"
48
+ ;;
49
+ macOS-ARM64)
50
+ MINICONDA_ARCH="MacOSX-arm64"
51
+ ;;
52
+ macOS-X64)
53
+ MINICONDA_ARCH="MacOSX-x86_64"
54
+ ;;
55
+ *)
56
+ echo "::error::Platform ${RUNNER_OS}-${RUNNER_ARCH} currently unsupported using this action"
57
+ exit 1
58
+ ;;
59
+ esac
60
+ MINICONDA_URL="https://repo.anaconda.com/miniconda/Miniconda3-py39_${MINICONDA_VERSION}-${MINICONDA_ARCH}.sh"
61
+ curl -fsSL "${MINICONDA_URL}" -o "${MINICONDA_INSTALL_PATH}/miniconda.sh"
62
+ bash "${MINICONDA_INSTALL_PATH}/miniconda.sh" -b -u -p "${MINICONDA_INSTALL_PATH}"
63
+ rm -rf "${MINICONDA_INSTALL_PATH}/miniconda.sh"
64
+ - name: Update GitHub path to include miniconda install
65
+ shell: bash
66
+ run: |
67
+ MINICONDA_INSTALL_PATH="${RUNNER_TEMP}/miniconda"
68
+ echo "${MINICONDA_INSTALL_PATH}/bin" >> $GITHUB_PATH
69
+ - name: Setup miniconda env cache (with env file)
70
+ id: miniconda-env-cache-env-file
71
+ if: ${{ runner.os }} == 'macOS' && ${{ inputs.environment-file }} != ''
72
+ uses: actions/cache@v2
73
+ with:
74
+ path: ${{ runner.temp }}/conda-python-${{ inputs.python-version }}
75
+ key: miniconda-env-${{ runner.os }}-${{ runner.arch }}-${{ inputs.python-version }}-${{ steps.get-date.outputs.today }}-${{ hashFiles(inputs.environment-file) }}
76
+ - name: Setup miniconda env cache (without env file)
77
+ id: miniconda-env-cache
78
+ if: ${{ runner.os }} == 'macOS' && ${{ inputs.environment-file }} == ''
79
+ uses: actions/cache@v2
80
+ with:
81
+ path: ${{ runner.temp }}/conda-python-${{ inputs.python-version }}
82
+ key: miniconda-env-${{ runner.os }}-${{ runner.arch }}-${{ inputs.python-version }}-${{ steps.get-date.outputs.today }}
83
+ - name: Setup conda environment with python (v${{ inputs.python-version }})
84
+ if: steps.miniconda-env-cache-env-file.outputs.cache-hit != 'true' && steps.miniconda-env-cache.outputs.cache-hit != 'true'
85
+ shell: bash
86
+ env:
87
+ PYTHON_VERSION: ${{ inputs.python-version }}
88
+ ENV_FILE: ${{ inputs.environment-file }}
89
+ run: |
90
+ CONDA_BASE_ENV="${RUNNER_TEMP}/conda-python-${PYTHON_VERSION}"
91
+ ENV_FILE_FLAG=""
92
+ if [[ -f "${ENV_FILE}" ]]; then
93
+ ENV_FILE_FLAG="--file ${ENV_FILE}"
94
+ elif [[ -n "${ENV_FILE}" ]]; then
95
+ echo "::warning::Specified env file (${ENV_FILE}) not found, not going to include it"
96
+ fi
97
+ conda create \
98
+ --yes \
99
+ --prefix "${CONDA_BASE_ENV}" \
100
+ "python=${PYTHON_VERSION}" \
101
+ ${ENV_FILE_FLAG} \
102
+ cmake=3.22 \
103
+ conda-build=3.21 \
104
+ ninja=1.10 \
105
+ pkg-config=0.29 \
106
+ wheel=0.37
107
+ - name: Clone the base conda environment and update GitHub env
108
+ shell: bash
109
+ env:
110
+ PYTHON_VERSION: ${{ inputs.python-version }}
111
+ CONDA_BASE_ENV: ${{ runner.temp }}/conda-python-${{ inputs.python-version }}
112
+ run: |
113
+ CONDA_ENV="${RUNNER_TEMP}/conda_environment_${GITHUB_RUN_ID}"
114
+ conda create \
115
+ --yes \
116
+ --prefix "${CONDA_ENV}" \
117
+ --clone "${CONDA_BASE_ENV}"
118
+ # TODO: conda-build could not be cloned because it hardcodes the path, so it
119
+ # could not be cached
120
+ conda install --yes -p ${CONDA_ENV} conda-build=3.21
121
+ echo "CONDA_ENV=${CONDA_ENV}" >> "${GITHUB_ENV}"
122
+ echo "CONDA_RUN=conda run -p ${CONDA_ENV} --no-capture-output" >> "${GITHUB_ENV}"
123
+ echo "CONDA_BUILD=conda run -p ${CONDA_ENV} conda-build" >> "${GITHUB_ENV}"
124
+ echo "CONDA_INSTALL=conda install -p ${CONDA_ENV}" >> "${GITHUB_ENV}"
125
+ - name: Get disk space usage and throw an error for low disk space
126
+ shell: bash
127
+ run: |
128
+ echo "Print the available disk space for manual inspection"
129
+ df -h
130
+ # Set the minimum requirement space to 4GB
131
+ MINIMUM_AVAILABLE_SPACE_IN_GB=4
132
+ MINIMUM_AVAILABLE_SPACE_IN_KB=$(($MINIMUM_AVAILABLE_SPACE_IN_GB * 1024 * 1024))
133
+ # Use KB to avoid floating point warning like 3.1GB
134
+ df -k | tr -s ' ' | cut -d' ' -f 4,9 | while read -r LINE;
135
+ do
136
+ AVAIL=$(echo $LINE | cut -f1 -d' ')
137
+ MOUNT=$(echo $LINE | cut -f2 -d' ')
138
+ if [ "$MOUNT" = "/" ]; then
139
+ if [ "$AVAIL" -lt "$MINIMUM_AVAILABLE_SPACE_IN_KB" ]; then
140
+ echo "There is only ${AVAIL}KB free space left in $MOUNT, which is less than the minimum requirement of ${MINIMUM_AVAILABLE_SPACE_IN_KB}KB. Please help create an issue to PyTorch Release Engineering via https://github.com/pytorch/test-infra/issues and provide the link to the workflow run."
141
+ exit 1;
142
+ else
143
+ echo "There is ${AVAIL}KB free space left in $MOUNT, continue"
144
+ fi
145
+ fi
146
+ done
rollingdepth_src/diffusers/.github/workflows/benchmark.yml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Benchmarking tests
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ schedule:
6
+ - cron: "30 1 1,15 * *" # every 2 weeks on the 1st and the 15th of every month at 1:30 AM
7
+
8
+ env:
9
+ DIFFUSERS_IS_CI: yes
10
+ HF_HOME: /mnt/cache
11
+ OMP_NUM_THREADS: 8
12
+ MKL_NUM_THREADS: 8
13
+
14
+ jobs:
15
+ torch_pipelines_cuda_benchmark_tests:
16
+ env:
17
+ SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_BENCHMARK }}
18
+ name: Torch Core Pipelines CUDA Benchmarking Tests
19
+ strategy:
20
+ fail-fast: false
21
+ max-parallel: 1
22
+ runs-on:
23
+ group: aws-g6-4xlarge-plus
24
+ container:
25
+ image: diffusers/diffusers-pytorch-compile-cuda
26
+ options: --shm-size "16gb" --ipc host --gpus 0
27
+ steps:
28
+ - name: Checkout diffusers
29
+ uses: actions/checkout@v3
30
+ with:
31
+ fetch-depth: 2
32
+ - name: NVIDIA-SMI
33
+ run: |
34
+ nvidia-smi
35
+ - name: Install dependencies
36
+ run: |
37
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
38
+ python -m uv pip install -e [quality,test]
39
+ python -m uv pip install pandas peft
40
+ - name: Environment
41
+ run: |
42
+ python utils/print_env.py
43
+ - name: Diffusers Benchmarking
44
+ env:
45
+ HF_TOKEN: ${{ secrets.DIFFUSERS_BOT_TOKEN }}
46
+ BASE_PATH: benchmark_outputs
47
+ run: |
48
+ export TOTAL_GPU_MEMORY=$(python -c "import torch; print(torch.cuda.get_device_properties(0).total_memory / (1024**3))")
49
+ cd benchmarks && mkdir ${BASE_PATH} && python run_all.py && python push_results.py
50
+
51
+ - name: Test suite reports artifacts
52
+ if: ${{ always() }}
53
+ uses: actions/upload-artifact@v2
54
+ with:
55
+ name: benchmark_test_reports
56
+ path: benchmarks/benchmark_outputs
57
+
58
+ - name: Report success status
59
+ if: ${{ success() }}
60
+ run: |
61
+ pip install requests && python utils/notify_benchmarking_status.py --status=success
62
+
63
+ - name: Report failure status
64
+ if: ${{ failure() }}
65
+ run: |
66
+ pip install requests && python utils/notify_benchmarking_status.py --status=failure
rollingdepth_src/diffusers/.github/workflows/build_docker_images.yml ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Test, build, and push Docker images
2
+
3
+ on:
4
+ pull_request: # During PRs, we just check if the changes Dockerfiles can be successfully built
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "docker/**"
9
+ workflow_dispatch:
10
+ schedule:
11
+ - cron: "0 0 * * *" # every day at midnight
12
+
13
+ concurrency:
14
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
15
+ cancel-in-progress: true
16
+
17
+ env:
18
+ REGISTRY: diffusers
19
+ CI_SLACK_CHANNEL: ${{ secrets.CI_DOCKER_CHANNEL }}
20
+
21
+ jobs:
22
+ test-build-docker-images:
23
+ runs-on:
24
+ group: aws-general-8-plus
25
+ if: github.event_name == 'pull_request'
26
+ steps:
27
+ - name: Set up Docker Buildx
28
+ uses: docker/setup-buildx-action@v1
29
+
30
+ - name: Check out code
31
+ uses: actions/checkout@v3
32
+
33
+ - name: Find Changed Dockerfiles
34
+ id: file_changes
35
+ uses: jitterbit/get-changed-files@v1
36
+ with:
37
+ format: 'space-delimited'
38
+ token: ${{ secrets.GITHUB_TOKEN }}
39
+
40
+ - name: Build Changed Docker Images
41
+ run: |
42
+ CHANGED_FILES="${{ steps.file_changes.outputs.all }}"
43
+ for FILE in $CHANGED_FILES; do
44
+ if [[ "$FILE" == docker/*Dockerfile ]]; then
45
+ DOCKER_PATH="${FILE%/Dockerfile}"
46
+ DOCKER_TAG=$(basename "$DOCKER_PATH")
47
+ echo "Building Docker image for $DOCKER_TAG"
48
+ docker build -t "$DOCKER_TAG" "$DOCKER_PATH"
49
+ fi
50
+ done
51
+ if: steps.file_changes.outputs.all != ''
52
+
53
+ build-and-push-docker-images:
54
+ runs-on:
55
+ group: aws-general-8-plus
56
+ if: github.event_name != 'pull_request'
57
+
58
+ permissions:
59
+ contents: read
60
+ packages: write
61
+
62
+ strategy:
63
+ fail-fast: false
64
+ matrix:
65
+ image-name:
66
+ - diffusers-pytorch-cpu
67
+ - diffusers-pytorch-cuda
68
+ - diffusers-pytorch-compile-cuda
69
+ - diffusers-pytorch-xformers-cuda
70
+ - diffusers-flax-cpu
71
+ - diffusers-flax-tpu
72
+ - diffusers-onnxruntime-cpu
73
+ - diffusers-onnxruntime-cuda
74
+ - diffusers-doc-builder
75
+
76
+ steps:
77
+ - name: Checkout repository
78
+ uses: actions/checkout@v3
79
+ - name: Set up Docker Buildx
80
+ uses: docker/setup-buildx-action@v1
81
+ - name: Login to Docker Hub
82
+ uses: docker/login-action@v2
83
+ with:
84
+ username: ${{ env.REGISTRY }}
85
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
86
+ - name: Build and push
87
+ uses: docker/build-push-action@v3
88
+ with:
89
+ no-cache: true
90
+ context: ./docker/${{ matrix.image-name }}
91
+ push: true
92
+ tags: ${{ env.REGISTRY }}/${{ matrix.image-name }}:latest
93
+
94
+ - name: Post to a Slack channel
95
+ id: slack
96
+ uses: huggingface/hf-workflows/.github/actions/post-slack@main
97
+ with:
98
+ # Slack channel id, channel name, or user id to post message.
99
+ # See also: https://api.slack.com/methods/chat.postMessage#channels
100
+ slack_channel: ${{ env.CI_SLACK_CHANNEL }}
101
+ title: "🤗 Results of the ${{ matrix.image-name }} Docker Image build"
102
+ status: ${{ job.status }}
103
+ slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
rollingdepth_src/diffusers/.github/workflows/build_documentation.yml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build documentation
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ - doc-builder*
8
+ - v*-release
9
+ - v*-patch
10
+ paths:
11
+ - "src/diffusers/**.py"
12
+ - "examples/**"
13
+ - "docs/**"
14
+
15
+ jobs:
16
+ build:
17
+ uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
18
+ with:
19
+ commit_sha: ${{ github.sha }}
20
+ install_libgl1: true
21
+ package: diffusers
22
+ notebook_folder: diffusers_doc
23
+ languages: en ko zh ja pt
24
+ custom_container: diffusers/diffusers-doc-builder
25
+ secrets:
26
+ token: ${{ secrets.HUGGINGFACE_PUSH }}
27
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
rollingdepth_src/diffusers/.github/workflows/build_pr_documentation.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build PR Documentation
2
+
3
+ on:
4
+ pull_request:
5
+ paths:
6
+ - "src/diffusers/**.py"
7
+ - "examples/**"
8
+ - "docs/**"
9
+
10
+ concurrency:
11
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
12
+ cancel-in-progress: true
13
+
14
+ jobs:
15
+ build:
16
+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
17
+ with:
18
+ commit_sha: ${{ github.event.pull_request.head.sha }}
19
+ pr_number: ${{ github.event.number }}
20
+ install_libgl1: true
21
+ package: diffusers
22
+ languages: en ko zh ja pt
23
+ custom_container: diffusers/diffusers-doc-builder
rollingdepth_src/diffusers/.github/workflows/mirror_community_pipeline.yml ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Mirror Community Pipeline
2
+
3
+ on:
4
+ # Push changes on the main branch
5
+ push:
6
+ branches:
7
+ - main
8
+ paths:
9
+ - 'examples/community/**.py'
10
+
11
+ # And on tag creation (e.g. `v0.28.1`)
12
+ tags:
13
+ - '*'
14
+
15
+ # Manual trigger with ref input
16
+ workflow_dispatch:
17
+ inputs:
18
+ ref:
19
+ description: "Either 'main' or a tag ref"
20
+ required: true
21
+ default: 'main'
22
+
23
+ jobs:
24
+ mirror_community_pipeline:
25
+ env:
26
+ SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_COMMUNITY_MIRROR }}
27
+
28
+ runs-on: ubuntu-latest
29
+ steps:
30
+ # Checkout to correct ref
31
+ # If workflow dispatch
32
+ # If ref is 'main', set:
33
+ # CHECKOUT_REF=refs/heads/main
34
+ # PATH_IN_REPO=main
35
+ # Else it must be a tag. Set:
36
+ # CHECKOUT_REF=refs/tags/{tag}
37
+ # PATH_IN_REPO={tag}
38
+ # If not workflow dispatch
39
+ # If ref is 'refs/heads/main' => set 'main'
40
+ # Else it must be a tag => set {tag}
41
+ - name: Set checkout_ref and path_in_repo
42
+ run: |
43
+ if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
44
+ if [ -z "${{ github.event.inputs.ref }}" ]; then
45
+ echo "Error: Missing ref input"
46
+ exit 1
47
+ elif [ "${{ github.event.inputs.ref }}" == "main" ]; then
48
+ echo "CHECKOUT_REF=refs/heads/main" >> $GITHUB_ENV
49
+ echo "PATH_IN_REPO=main" >> $GITHUB_ENV
50
+ else
51
+ echo "CHECKOUT_REF=refs/tags/${{ github.event.inputs.ref }}" >> $GITHUB_ENV
52
+ echo "PATH_IN_REPO=${{ github.event.inputs.ref }}" >> $GITHUB_ENV
53
+ fi
54
+ elif [ "${{ github.ref }}" == "refs/heads/main" ]; then
55
+ echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
56
+ echo "PATH_IN_REPO=main" >> $GITHUB_ENV
57
+ else
58
+ # e.g. refs/tags/v0.28.1 -> v0.28.1
59
+ echo "CHECKOUT_REF=${{ github.ref }}" >> $GITHUB_ENV
60
+ echo "PATH_IN_REPO=$(echo ${{ github.ref }} | sed 's/^refs\/tags\///')" >> $GITHUB_ENV
61
+ fi
62
+ - name: Print env vars
63
+ run: |
64
+ echo "CHECKOUT_REF: ${{ env.CHECKOUT_REF }}"
65
+ echo "PATH_IN_REPO: ${{ env.PATH_IN_REPO }}"
66
+ - uses: actions/checkout@v3
67
+ with:
68
+ ref: ${{ env.CHECKOUT_REF }}
69
+
70
+ # Setup + install dependencies
71
+ - name: Set up Python
72
+ uses: actions/setup-python@v4
73
+ with:
74
+ python-version: "3.10"
75
+ - name: Install dependencies
76
+ run: |
77
+ python -m pip install --upgrade pip
78
+ pip install --upgrade huggingface_hub
79
+
80
+ # Check secret is set
81
+ - name: whoami
82
+ run: huggingface-cli whoami
83
+ env:
84
+ HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
85
+
86
+ # Push to HF! (under subfolder based on checkout ref)
87
+ # https://huggingface.co/datasets/diffusers/community-pipelines-mirror
88
+ - name: Mirror community pipeline to HF
89
+ run: huggingface-cli upload diffusers/community-pipelines-mirror ./examples/community ${PATH_IN_REPO} --repo-type dataset
90
+ env:
91
+ PATH_IN_REPO: ${{ env.PATH_IN_REPO }}
92
+ HF_TOKEN: ${{ secrets.HF_TOKEN_MIRROR_COMMUNITY_PIPELINES }}
93
+
94
+ - name: Report success status
95
+ if: ${{ success() }}
96
+ run: |
97
+ pip install requests && python utils/notify_community_pipelines_mirror.py --status=success
98
+
99
+ - name: Report failure status
100
+ if: ${{ failure() }}
101
+ run: |
102
+ pip install requests && python utils/notify_community_pipelines_mirror.py --status=failure
rollingdepth_src/diffusers/.github/workflows/nightly_tests.yml ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Nightly and release tests on main/release branch
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ schedule:
6
+ - cron: "0 0 * * *" # every day at midnight
7
+
8
+ env:
9
+ DIFFUSERS_IS_CI: yes
10
+ HF_HUB_ENABLE_HF_TRANSFER: 1
11
+ OMP_NUM_THREADS: 8
12
+ MKL_NUM_THREADS: 8
13
+ PYTEST_TIMEOUT: 600
14
+ RUN_SLOW: yes
15
+ RUN_NIGHTLY: yes
16
+ PIPELINE_USAGE_CUTOFF: 5000
17
+ SLACK_API_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
18
+
19
+ jobs:
20
+ setup_torch_cuda_pipeline_matrix:
21
+ name: Setup Torch Pipelines CUDA Slow Tests Matrix
22
+ runs-on:
23
+ group: aws-general-8-plus
24
+ container:
25
+ image: diffusers/diffusers-pytorch-cpu
26
+ outputs:
27
+ pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
28
+ steps:
29
+ - name: Checkout diffusers
30
+ uses: actions/checkout@v3
31
+ with:
32
+ fetch-depth: 2
33
+ - name: Install dependencies
34
+ run: |
35
+ pip install -e .[test]
36
+ pip install huggingface_hub
37
+ - name: Fetch Pipeline Matrix
38
+ id: fetch_pipeline_matrix
39
+ run: |
40
+ matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py)
41
+ echo $matrix
42
+ echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
43
+
44
+ - name: Pipeline Tests Artifacts
45
+ if: ${{ always() }}
46
+ uses: actions/upload-artifact@v2
47
+ with:
48
+ name: test-pipelines.json
49
+ path: reports
50
+
51
+ run_nightly_tests_for_torch_pipelines:
52
+ name: Nightly Torch Pipelines CUDA Tests
53
+ needs: setup_torch_cuda_pipeline_matrix
54
+ strategy:
55
+ fail-fast: false
56
+ max-parallel: 8
57
+ matrix:
58
+ module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
59
+ runs-on:
60
+ group: aws-g4dn-2xlarge
61
+ container:
62
+ image: diffusers/diffusers-pytorch-cuda
63
+ options: --shm-size "16gb" --ipc host --gpus 0
64
+ steps:
65
+ - name: Checkout diffusers
66
+ uses: actions/checkout@v3
67
+ with:
68
+ fetch-depth: 2
69
+ - name: NVIDIA-SMI
70
+ run: nvidia-smi
71
+ - name: Install dependencies
72
+ run: |
73
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
74
+ python -m uv pip install -e [quality,test]
75
+ python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
76
+ python -m uv pip install pytest-reportlog
77
+ - name: Environment
78
+ run: |
79
+ python utils/print_env.py
80
+ - name: Pipeline CUDA Test
81
+ env:
82
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
83
+ # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
84
+ CUBLAS_WORKSPACE_CONFIG: :16:8
85
+ run: |
86
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
87
+ -s -v -k "not Flax and not Onnx" \
88
+ --make-reports=tests_pipeline_${{ matrix.module }}_cuda \
89
+ --report-log=tests_pipeline_${{ matrix.module }}_cuda.log \
90
+ tests/pipelines/${{ matrix.module }}
91
+ - name: Failure short reports
92
+ if: ${{ failure() }}
93
+ run: |
94
+ cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
95
+ cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
96
+ - name: Test suite reports artifacts
97
+ if: ${{ always() }}
98
+ uses: actions/upload-artifact@v2
99
+ with:
100
+ name: pipeline_${{ matrix.module }}_test_reports
101
+ path: reports
102
+ - name: Generate Report and Notify Channel
103
+ if: always()
104
+ run: |
105
+ pip install slack_sdk tabulate
106
+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
107
+
108
+ run_nightly_tests_for_other_torch_modules:
109
+ name: Nightly Torch CUDA Tests
110
+ runs-on:
111
+ group: aws-g4dn-2xlarge
112
+ container:
113
+ image: diffusers/diffusers-pytorch-cuda
114
+ options: --shm-size "16gb" --ipc host --gpus 0
115
+ defaults:
116
+ run:
117
+ shell: bash
118
+ strategy:
119
+ max-parallel: 2
120
+ matrix:
121
+ module: [models, schedulers, lora, others, single_file, examples]
122
+ steps:
123
+ - name: Checkout diffusers
124
+ uses: actions/checkout@v3
125
+ with:
126
+ fetch-depth: 2
127
+
128
+ - name: Install dependencies
129
+ run: |
130
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
131
+ python -m uv pip install -e [quality,test]
132
+ python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
133
+ python -m uv pip install peft@git+https://github.com/huggingface/peft.git
134
+ python -m uv pip install pytest-reportlog
135
+ - name: Environment
136
+ run: python utils/print_env.py
137
+
138
+ - name: Run nightly PyTorch CUDA tests for non-pipeline modules
139
+ if: ${{ matrix.module != 'examples'}}
140
+ env:
141
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
142
+ # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
143
+ CUBLAS_WORKSPACE_CONFIG: :16:8
144
+ run: |
145
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
146
+ -s -v -k "not Flax and not Onnx" \
147
+ --make-reports=tests_torch_${{ matrix.module }}_cuda \
148
+ --report-log=tests_torch_${{ matrix.module }}_cuda.log \
149
+ tests/${{ matrix.module }}
150
+
151
+ - name: Run nightly example tests with Torch
152
+ if: ${{ matrix.module == 'examples' }}
153
+ env:
154
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
155
+ # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
156
+ CUBLAS_WORKSPACE_CONFIG: :16:8
157
+ run: |
158
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
159
+ -s -v --make-reports=examples_torch_cuda \
160
+ --report-log=examples_torch_cuda.log \
161
+ examples/
162
+
163
+ - name: Failure short reports
164
+ if: ${{ failure() }}
165
+ run: |
166
+ cat reports/tests_torch_${{ matrix.module }}_cuda_stats.txt
167
+ cat reports/tests_torch_${{ matrix.module }}_cuda_failures_short.txt
168
+
169
+ - name: Test suite reports artifacts
170
+ if: ${{ always() }}
171
+ uses: actions/upload-artifact@v2
172
+ with:
173
+ name: torch_${{ matrix.module }}_cuda_test_reports
174
+ path: reports
175
+
176
+ - name: Generate Report and Notify Channel
177
+ if: always()
178
+ run: |
179
+ pip install slack_sdk tabulate
180
+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
181
+
182
+ run_flax_tpu_tests:
183
+ name: Nightly Flax TPU Tests
184
+ runs-on: docker-tpu
185
+ if: github.event_name == 'schedule'
186
+
187
+ container:
188
+ image: diffusers/diffusers-flax-tpu
189
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/ --privileged
190
+ defaults:
191
+ run:
192
+ shell: bash
193
+ steps:
194
+ - name: Checkout diffusers
195
+ uses: actions/checkout@v3
196
+ with:
197
+ fetch-depth: 2
198
+
199
+ - name: Install dependencies
200
+ run: |
201
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
202
+ python -m uv pip install -e [quality,test]
203
+ python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
204
+ python -m uv pip install pytest-reportlog
205
+
206
+ - name: Environment
207
+ run: python utils/print_env.py
208
+
209
+ - name: Run nightly Flax TPU tests
210
+ env:
211
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
212
+ run: |
213
+ python -m pytest -n 0 \
214
+ -s -v -k "Flax" \
215
+ --make-reports=tests_flax_tpu \
216
+ --report-log=tests_flax_tpu.log \
217
+ tests/
218
+
219
+ - name: Failure short reports
220
+ if: ${{ failure() }}
221
+ run: |
222
+ cat reports/tests_flax_tpu_stats.txt
223
+ cat reports/tests_flax_tpu_failures_short.txt
224
+
225
+ - name: Test suite reports artifacts
226
+ if: ${{ always() }}
227
+ uses: actions/upload-artifact@v2
228
+ with:
229
+ name: flax_tpu_test_reports
230
+ path: reports
231
+
232
+ - name: Generate Report and Notify Channel
233
+ if: always()
234
+ run: |
235
+ pip install slack_sdk tabulate
236
+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
237
+
238
+ run_nightly_onnx_tests:
239
+ name: Nightly ONNXRuntime CUDA tests on Ubuntu
240
+ runs-on:
241
+ group: aws-g4dn-2xlarge
242
+ container:
243
+ image: diffusers/diffusers-onnxruntime-cuda
244
+ options: --gpus 0 --shm-size "16gb" --ipc host
245
+
246
+ steps:
247
+ - name: Checkout diffusers
248
+ uses: actions/checkout@v3
249
+ with:
250
+ fetch-depth: 2
251
+
252
+ - name: NVIDIA-SMI
253
+ run: nvidia-smi
254
+
255
+ - name: Install dependencies
256
+ run: |
257
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
258
+ python -m uv pip install -e [quality,test]
259
+ python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
260
+ python -m uv pip install pytest-reportlog
261
+ - name: Environment
262
+ run: python utils/print_env.py
263
+
264
+ - name: Run Nightly ONNXRuntime CUDA tests
265
+ env:
266
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
267
+ run: |
268
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
269
+ -s -v -k "Onnx" \
270
+ --make-reports=tests_onnx_cuda \
271
+ --report-log=tests_onnx_cuda.log \
272
+ tests/
273
+
274
+ - name: Failure short reports
275
+ if: ${{ failure() }}
276
+ run: |
277
+ cat reports/tests_onnx_cuda_stats.txt
278
+ cat reports/tests_onnx_cuda_failures_short.txt
279
+
280
+ - name: Test suite reports artifacts
281
+ if: ${{ always() }}
282
+ uses: actions/upload-artifact@v2
283
+ with:
284
+ name: ${{ matrix.config.report }}_test_reports
285
+ path: reports
286
+
287
+ - name: Generate Report and Notify Channel
288
+ if: always()
289
+ run: |
290
+ pip install slack_sdk tabulate
291
+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
292
+
293
+ run_nightly_tests_apple_m1:
294
+ name: Nightly PyTorch MPS tests on MacOS
295
+ runs-on: [ self-hosted, apple-m1 ]
296
+ if: github.event_name == 'schedule'
297
+
298
+ steps:
299
+ - name: Checkout diffusers
300
+ uses: actions/checkout@v3
301
+ with:
302
+ fetch-depth: 2
303
+
304
+ - name: Clean checkout
305
+ shell: arch -arch arm64 bash {0}
306
+ run: |
307
+ git clean -fxd
308
+
309
+ - name: Setup miniconda
310
+ uses: ./.github/actions/setup-miniconda
311
+ with:
312
+ python-version: 3.9
313
+
314
+ - name: Install dependencies
315
+ shell: arch -arch arm64 bash {0}
316
+ run: |
317
+ ${CONDA_RUN} python -m pip install --upgrade pip uv
318
+ ${CONDA_RUN} python -m uv pip install -e [quality,test]
319
+ ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
320
+ ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate
321
+ ${CONDA_RUN} python -m uv pip install pytest-reportlog
322
+
323
+ - name: Environment
324
+ shell: arch -arch arm64 bash {0}
325
+ run: |
326
+ ${CONDA_RUN} python utils/print_env.py
327
+
328
+ - name: Run nightly PyTorch tests on M1 (MPS)
329
+ shell: arch -arch arm64 bash {0}
330
+ env:
331
+ HF_HOME: /System/Volumes/Data/mnt/cache
332
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
333
+ run: |
334
+ ${CONDA_RUN} python -m pytest -n 1 -s -v --make-reports=tests_torch_mps \
335
+ --report-log=tests_torch_mps.log \
336
+ tests/
337
+
338
+ - name: Failure short reports
339
+ if: ${{ failure() }}
340
+ run: cat reports/tests_torch_mps_failures_short.txt
341
+
342
+ - name: Test suite reports artifacts
343
+ if: ${{ always() }}
344
+ uses: actions/upload-artifact@v2
345
+ with:
346
+ name: torch_mps_test_reports
347
+ path: reports
348
+
349
+ - name: Generate Report and Notify Channel
350
+ if: always()
351
+ run: |
352
+ pip install slack_sdk tabulate
353
+ python utils/log_reports.py >> $GITHUB_STEP_SUMMARY
rollingdepth_src/diffusers/.github/workflows/notify_slack_about_release.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Notify Slack about a release
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ release:
6
+ types: [published]
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+
15
+ - name: Setup Python
16
+ uses: actions/setup-python@v4
17
+ with:
18
+ python-version: '3.8'
19
+
20
+ - name: Notify Slack about the release
21
+ env:
22
+ SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
23
+ run: pip install requests && python utils/notify_slack_about_release.py
rollingdepth_src/diffusers/.github/workflows/pr_dependency_test.yml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run dependency tests
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "src/diffusers/**.py"
9
+ push:
10
+ branches:
11
+ - main
12
+
13
+ concurrency:
14
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
15
+ cancel-in-progress: true
16
+
17
+ jobs:
18
+ check_dependencies:
19
+ runs-on: ubuntu-latest
20
+ steps:
21
+ - uses: actions/checkout@v3
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v4
24
+ with:
25
+ python-version: "3.8"
26
+ - name: Install dependencies
27
+ run: |
28
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
29
+ python -m pip install --upgrade pip uv
30
+ python -m uv pip install -e .
31
+ python -m uv pip install pytest
32
+ - name: Check for soft dependencies
33
+ run: |
34
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
35
+ pytest tests/others/test_dependencies.py
rollingdepth_src/diffusers/.github/workflows/pr_flax_dependency_test.yml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Flax dependency tests
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "src/diffusers/**.py"
9
+ push:
10
+ branches:
11
+ - main
12
+
13
+ concurrency:
14
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
15
+ cancel-in-progress: true
16
+
17
+ jobs:
18
+ check_flax_dependencies:
19
+ runs-on: ubuntu-latest
20
+ steps:
21
+ - uses: actions/checkout@v3
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v4
24
+ with:
25
+ python-version: "3.8"
26
+ - name: Install dependencies
27
+ run: |
28
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
29
+ python -m pip install --upgrade pip uv
30
+ python -m uv pip install -e .
31
+ python -m uv pip install "jax[cpu]>=0.2.16,!=0.3.2"
32
+ python -m uv pip install "flax>=0.4.1"
33
+ python -m uv pip install "jaxlib>=0.1.65"
34
+ python -m uv pip install pytest
35
+ - name: Check for soft dependencies
36
+ run: |
37
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
38
+ pytest tests/others/test_dependencies.py
rollingdepth_src/diffusers/.github/workflows/pr_test_fetcher.yml ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Fast tests for PRs - Test Fetcher
2
+
3
+ on: workflow_dispatch
4
+
5
+ env:
6
+ DIFFUSERS_IS_CI: yes
7
+ OMP_NUM_THREADS: 4
8
+ MKL_NUM_THREADS: 4
9
+ PYTEST_TIMEOUT: 60
10
+
11
+ concurrency:
12
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
13
+ cancel-in-progress: true
14
+
15
+ jobs:
16
+ setup_pr_tests:
17
+ name: Setup PR Tests
18
+ runs-on:
19
+ group: aws-general-8-plus
20
+ container:
21
+ image: diffusers/diffusers-pytorch-cpu
22
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
23
+ defaults:
24
+ run:
25
+ shell: bash
26
+ outputs:
27
+ matrix: ${{ steps.set_matrix.outputs.matrix }}
28
+ test_map: ${{ steps.set_matrix.outputs.test_map }}
29
+ steps:
30
+ - name: Checkout diffusers
31
+ uses: actions/checkout@v3
32
+ with:
33
+ fetch-depth: 0
34
+ - name: Install dependencies
35
+ run: |
36
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
37
+ python -m uv pip install -e [quality,test]
38
+ - name: Environment
39
+ run: |
40
+ python utils/print_env.py
41
+ echo $(git --version)
42
+ - name: Fetch Tests
43
+ run: |
44
+ python utils/tests_fetcher.py | tee test_preparation.txt
45
+ - name: Report fetched tests
46
+ uses: actions/upload-artifact@v3
47
+ with:
48
+ name: test_fetched
49
+ path: test_preparation.txt
50
+ - id: set_matrix
51
+ name: Create Test Matrix
52
+ # The `keys` is used as GitHub actions matrix for jobs, i.e. `models`, `pipelines`, etc.
53
+ # The `test_map` is used to get the actual identified test files under each key.
54
+ # If no test to run (so no `test_map.json` file), create a dummy map (empty matrix will fail)
55
+ run: |
56
+ if [ -f test_map.json ]; then
57
+ keys=$(python3 -c 'import json; fp = open("test_map.json"); test_map = json.load(fp); fp.close(); d = list(test_map.keys()); print(json.dumps(d))')
58
+ test_map=$(python3 -c 'import json; fp = open("test_map.json"); test_map = json.load(fp); fp.close(); print(json.dumps(test_map))')
59
+ else
60
+ keys=$(python3 -c 'keys = ["dummy"]; print(keys)')
61
+ test_map=$(python3 -c 'test_map = {"dummy": []}; print(test_map)')
62
+ fi
63
+ echo $keys
64
+ echo $test_map
65
+ echo "matrix=$keys" >> $GITHUB_OUTPUT
66
+ echo "test_map=$test_map" >> $GITHUB_OUTPUT
67
+
68
+ run_pr_tests:
69
+ name: Run PR Tests
70
+ needs: setup_pr_tests
71
+ if: contains(fromJson(needs.setup_pr_tests.outputs.matrix), 'dummy') != true
72
+ strategy:
73
+ fail-fast: false
74
+ max-parallel: 2
75
+ matrix:
76
+ modules: ${{ fromJson(needs.setup_pr_tests.outputs.matrix) }}
77
+ runs-on:
78
+ group: aws-general-8-plus
79
+ container:
80
+ image: diffusers/diffusers-pytorch-cpu
81
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
82
+ defaults:
83
+ run:
84
+ shell: bash
85
+ steps:
86
+ - name: Checkout diffusers
87
+ uses: actions/checkout@v3
88
+ with:
89
+ fetch-depth: 2
90
+
91
+ - name: Install dependencies
92
+ run: |
93
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
94
+ python -m pip install -e [quality,test]
95
+ python -m pip install accelerate
96
+
97
+ - name: Environment
98
+ run: |
99
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
100
+ python utils/print_env.py
101
+
102
+ - name: Run all selected tests on CPU
103
+ run: |
104
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
105
+ python -m pytest -n 2 --dist=loadfile -v --make-reports=${{ matrix.modules }}_tests_cpu ${{ fromJson(needs.setup_pr_tests.outputs.test_map)[matrix.modules] }}
106
+
107
+ - name: Failure short reports
108
+ if: ${{ failure() }}
109
+ continue-on-error: true
110
+ run: |
111
+ cat reports/${{ matrix.modules }}_tests_cpu_stats.txt
112
+ cat reports/${{ matrix.modules }}_tests_cpu_failures_short.txt
113
+
114
+ - name: Test suite reports artifacts
115
+ if: ${{ always() }}
116
+ uses: actions/upload-artifact@v3
117
+ with:
118
+ name: ${{ matrix.modules }}_test_reports
119
+ path: reports
120
+
121
+ run_staging_tests:
122
+ strategy:
123
+ fail-fast: false
124
+ matrix:
125
+ config:
126
+ - name: Hub tests for models, schedulers, and pipelines
127
+ framework: hub_tests_pytorch
128
+ runner: aws-general-8-plus
129
+ image: diffusers/diffusers-pytorch-cpu
130
+ report: torch_hub
131
+
132
+ name: ${{ matrix.config.name }}
133
+ runs-on:
134
+ group: ${{ matrix.config.runner }}
135
+ container:
136
+ image: ${{ matrix.config.image }}
137
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
138
+
139
+ defaults:
140
+ run:
141
+ shell: bash
142
+
143
+ steps:
144
+ - name: Checkout diffusers
145
+ uses: actions/checkout@v3
146
+ with:
147
+ fetch-depth: 2
148
+
149
+ - name: Install dependencies
150
+ run: |
151
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
152
+ python -m pip install -e [quality,test]
153
+
154
+ - name: Environment
155
+ run: |
156
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
157
+ python utils/print_env.py
158
+
159
+ - name: Run Hub tests for models, schedulers, and pipelines on a staging env
160
+ if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
161
+ run: |
162
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
163
+ HUGGINGFACE_CO_STAGING=true python -m pytest \
164
+ -m "is_staging_test" \
165
+ --make-reports=tests_${{ matrix.config.report }} \
166
+ tests
167
+
168
+ - name: Failure short reports
169
+ if: ${{ failure() }}
170
+ run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
171
+
172
+ - name: Test suite reports artifacts
173
+ if: ${{ always() }}
174
+ uses: actions/upload-artifact@v2
175
+ with:
176
+ name: pr_${{ matrix.config.report }}_test_reports
177
+ path: reports
rollingdepth_src/diffusers/.github/workflows/pr_test_peft_backend.yml ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Fast tests for PRs - PEFT backend
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "src/diffusers/**.py"
9
+ - "tests/**.py"
10
+
11
+ concurrency:
12
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
13
+ cancel-in-progress: true
14
+
15
+ env:
16
+ DIFFUSERS_IS_CI: yes
17
+ OMP_NUM_THREADS: 4
18
+ MKL_NUM_THREADS: 4
19
+ PYTEST_TIMEOUT: 60
20
+
21
+ jobs:
22
+ check_code_quality:
23
+ runs-on: ubuntu-latest
24
+ steps:
25
+ - uses: actions/checkout@v3
26
+ - name: Set up Python
27
+ uses: actions/setup-python@v4
28
+ with:
29
+ python-version: "3.8"
30
+ - name: Install dependencies
31
+ run: |
32
+ python -m pip install --upgrade pip
33
+ pip install .[quality]
34
+ - name: Check quality
35
+ run: make quality
36
+ - name: Check if failure
37
+ if: ${{ failure() }}
38
+ run: |
39
+ echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make style && make quality'" >> $GITHUB_STEP_SUMMARY
40
+
41
+ check_repository_consistency:
42
+ needs: check_code_quality
43
+ runs-on: ubuntu-latest
44
+ steps:
45
+ - uses: actions/checkout@v3
46
+ - name: Set up Python
47
+ uses: actions/setup-python@v4
48
+ with:
49
+ python-version: "3.8"
50
+ - name: Install dependencies
51
+ run: |
52
+ python -m pip install --upgrade pip
53
+ pip install .[quality]
54
+ - name: Check repo consistency
55
+ run: |
56
+ python utils/check_copies.py
57
+ python utils/check_dummies.py
58
+ make deps_table_check_updated
59
+ - name: Check if failure
60
+ if: ${{ failure() }}
61
+ run: |
62
+ echo "Repo consistency check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make fix-copies'" >> $GITHUB_STEP_SUMMARY
63
+
64
+ run_fast_tests:
65
+ needs: [check_code_quality, check_repository_consistency]
66
+ strategy:
67
+ fail-fast: false
68
+ matrix:
69
+ lib-versions: ["main", "latest"]
70
+
71
+
72
+ name: LoRA - ${{ matrix.lib-versions }}
73
+
74
+ runs-on:
75
+ group: aws-general-8-plus
76
+
77
+ container:
78
+ image: diffusers/diffusers-pytorch-cpu
79
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
80
+
81
+ defaults:
82
+ run:
83
+ shell: bash
84
+
85
+ steps:
86
+ - name: Checkout diffusers
87
+ uses: actions/checkout@v3
88
+ with:
89
+ fetch-depth: 2
90
+
91
+ - name: Install dependencies
92
+ run: |
93
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
94
+ python -m uv pip install -e [quality,test]
95
+ if [ "${{ matrix.lib-versions }}" == "main" ]; then
96
+ python -m pip install -U peft@git+https://github.com/huggingface/peft.git
97
+ python -m uv pip install -U transformers@git+https://github.com/huggingface/transformers.git
98
+ python -m uv pip install -U accelerate@git+https://github.com/huggingface/accelerate.git
99
+ else
100
+ python -m uv pip install -U peft transformers accelerate
101
+ fi
102
+
103
+ - name: Environment
104
+ run: |
105
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
106
+ python utils/print_env.py
107
+
108
+ - name: Run fast PyTorch LoRA CPU tests with PEFT backend
109
+ run: |
110
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
111
+ python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
112
+ -s -v \
113
+ --make-reports=tests_${{ matrix.config.report }} \
114
+ tests/lora/
115
+ python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
116
+ -s -v \
117
+ --make-reports=tests_models_lora_${{ matrix.config.report }} \
118
+ tests/models/ -k "lora"
119
+
120
+
121
+ - name: Failure short reports
122
+ if: ${{ failure() }}
123
+ run: |
124
+ cat reports/tests_${{ matrix.config.report }}_failures_short.txt
125
+ cat reports/tests_models_lora_${{ matrix.config.report }}_failures_short.txt
126
+
127
+ - name: Test suite reports artifacts
128
+ if: ${{ always() }}
129
+ uses: actions/upload-artifact@v2
130
+ with:
131
+ name: pr_${{ matrix.config.report }}_test_reports
132
+ path: reports
rollingdepth_src/diffusers/.github/workflows/pr_tests.yml ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Fast tests for PRs
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "src/diffusers/**.py"
9
+ - "benchmarks/**.py"
10
+ - "examples/**.py"
11
+ - "scripts/**.py"
12
+ - "tests/**.py"
13
+ - ".github/**.yml"
14
+ - "utils/**.py"
15
+ push:
16
+ branches:
17
+ - ci-*
18
+
19
+ concurrency:
20
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
21
+ cancel-in-progress: true
22
+
23
+ env:
24
+ DIFFUSERS_IS_CI: yes
25
+ OMP_NUM_THREADS: 4
26
+ MKL_NUM_THREADS: 4
27
+ PYTEST_TIMEOUT: 60
28
+
29
+ jobs:
30
+ check_code_quality:
31
+ runs-on: ubuntu-latest
32
+ steps:
33
+ - uses: actions/checkout@v3
34
+ - name: Set up Python
35
+ uses: actions/setup-python@v4
36
+ with:
37
+ python-version: "3.8"
38
+ - name: Install dependencies
39
+ run: |
40
+ python -m pip install --upgrade pip
41
+ pip install .[quality]
42
+ - name: Check quality
43
+ run: make quality
44
+ - name: Check if failure
45
+ if: ${{ failure() }}
46
+ run: |
47
+ echo "Quality check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make style && make quality'" >> $GITHUB_STEP_SUMMARY
48
+
49
+ check_repository_consistency:
50
+ needs: check_code_quality
51
+ runs-on: ubuntu-latest
52
+ steps:
53
+ - uses: actions/checkout@v3
54
+ - name: Set up Python
55
+ uses: actions/setup-python@v4
56
+ with:
57
+ python-version: "3.8"
58
+ - name: Install dependencies
59
+ run: |
60
+ python -m pip install --upgrade pip
61
+ pip install .[quality]
62
+ - name: Check repo consistency
63
+ run: |
64
+ python utils/check_copies.py
65
+ python utils/check_dummies.py
66
+ make deps_table_check_updated
67
+ - name: Check if failure
68
+ if: ${{ failure() }}
69
+ run: |
70
+ echo "Repo consistency check failed. Please ensure the right dependency versions are installed with 'pip install -e .[quality]' and run 'make fix-copies'" >> $GITHUB_STEP_SUMMARY
71
+
72
+ run_fast_tests:
73
+ needs: [check_code_quality, check_repository_consistency]
74
+ strategy:
75
+ fail-fast: false
76
+ matrix:
77
+ config:
78
+ - name: Fast PyTorch Pipeline CPU tests
79
+ framework: pytorch_pipelines
80
+ runner: aws-highmemory-32-plus
81
+ image: diffusers/diffusers-pytorch-cpu
82
+ report: torch_cpu_pipelines
83
+ - name: Fast PyTorch Models & Schedulers CPU tests
84
+ framework: pytorch_models
85
+ runner: aws-general-8-plus
86
+ image: diffusers/diffusers-pytorch-cpu
87
+ report: torch_cpu_models_schedulers
88
+ - name: Fast Flax CPU tests
89
+ framework: flax
90
+ runner: aws-general-8-plus
91
+ image: diffusers/diffusers-flax-cpu
92
+ report: flax_cpu
93
+ - name: PyTorch Example CPU tests
94
+ framework: pytorch_examples
95
+ runner: aws-general-8-plus
96
+ image: diffusers/diffusers-pytorch-cpu
97
+ report: torch_example_cpu
98
+
99
+ name: ${{ matrix.config.name }}
100
+
101
+ runs-on:
102
+ group: ${{ matrix.config.runner }}
103
+
104
+ container:
105
+ image: ${{ matrix.config.image }}
106
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
107
+
108
+ defaults:
109
+ run:
110
+ shell: bash
111
+
112
+ steps:
113
+ - name: Checkout diffusers
114
+ uses: actions/checkout@v3
115
+ with:
116
+ fetch-depth: 2
117
+
118
+ - name: Install dependencies
119
+ run: |
120
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
121
+ python -m uv pip install -e [quality,test]
122
+ python -m uv pip install accelerate
123
+
124
+ - name: Environment
125
+ run: |
126
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
127
+ python utils/print_env.py
128
+
129
+ - name: Run fast PyTorch Pipeline CPU tests
130
+ if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
131
+ run: |
132
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
133
+ python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile \
134
+ -s -v -k "not Flax and not Onnx" \
135
+ --make-reports=tests_${{ matrix.config.report }} \
136
+ tests/pipelines
137
+
138
+ - name: Run fast PyTorch Model Scheduler CPU tests
139
+ if: ${{ matrix.config.framework == 'pytorch_models' }}
140
+ run: |
141
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
142
+ python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
143
+ -s -v -k "not Flax and not Onnx and not Dependency" \
144
+ --make-reports=tests_${{ matrix.config.report }} \
145
+ tests/models tests/schedulers tests/others
146
+
147
+ - name: Run fast Flax TPU tests
148
+ if: ${{ matrix.config.framework == 'flax' }}
149
+ run: |
150
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
151
+ python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
152
+ -s -v -k "Flax" \
153
+ --make-reports=tests_${{ matrix.config.report }} \
154
+ tests
155
+
156
+ - name: Run example PyTorch CPU tests
157
+ if: ${{ matrix.config.framework == 'pytorch_examples' }}
158
+ run: |
159
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
160
+ python -m uv pip install peft timm
161
+ python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
162
+ --make-reports=tests_${{ matrix.config.report }} \
163
+ examples
164
+
165
+ - name: Failure short reports
166
+ if: ${{ failure() }}
167
+ run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
168
+
169
+ - name: Test suite reports artifacts
170
+ if: ${{ always() }}
171
+ uses: actions/upload-artifact@v2
172
+ with:
173
+ name: pr_${{ matrix.config.report }}_test_reports
174
+ path: reports
175
+
176
+ run_staging_tests:
177
+ needs: [check_code_quality, check_repository_consistency]
178
+ strategy:
179
+ fail-fast: false
180
+ matrix:
181
+ config:
182
+ - name: Hub tests for models, schedulers, and pipelines
183
+ framework: hub_tests_pytorch
184
+ runner:
185
+ group: aws-general-8-plus
186
+ image: diffusers/diffusers-pytorch-cpu
187
+ report: torch_hub
188
+
189
+ name: ${{ matrix.config.name }}
190
+
191
+ runs-on: ${{ matrix.config.runner }}
192
+
193
+ container:
194
+ image: ${{ matrix.config.image }}
195
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
196
+
197
+ defaults:
198
+ run:
199
+ shell: bash
200
+
201
+ steps:
202
+ - name: Checkout diffusers
203
+ uses: actions/checkout@v3
204
+ with:
205
+ fetch-depth: 2
206
+
207
+ - name: Install dependencies
208
+ run: |
209
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
210
+ python -m uv pip install -e [quality,test]
211
+
212
+ - name: Environment
213
+ run: |
214
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
215
+ python utils/print_env.py
216
+
217
+ - name: Run Hub tests for models, schedulers, and pipelines on a staging env
218
+ if: ${{ matrix.config.framework == 'hub_tests_pytorch' }}
219
+ run: |
220
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
221
+ HUGGINGFACE_CO_STAGING=true python -m pytest \
222
+ -m "is_staging_test" \
223
+ --make-reports=tests_${{ matrix.config.report }} \
224
+ tests
225
+
226
+ - name: Failure short reports
227
+ if: ${{ failure() }}
228
+ run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
229
+
230
+ - name: Test suite reports artifacts
231
+ if: ${{ always() }}
232
+ uses: actions/upload-artifact@v2
233
+ with:
234
+ name: pr_${{ matrix.config.report }}_test_reports
235
+ path: reports
rollingdepth_src/diffusers/.github/workflows/pr_torch_dependency_test.yml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run Torch dependency tests
2
+
3
+ on:
4
+ pull_request:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "src/diffusers/**.py"
9
+ push:
10
+ branches:
11
+ - main
12
+
13
+ concurrency:
14
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
15
+ cancel-in-progress: true
16
+
17
+ jobs:
18
+ check_torch_dependencies:
19
+ runs-on: ubuntu-latest
20
+ steps:
21
+ - uses: actions/checkout@v3
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v4
24
+ with:
25
+ python-version: "3.8"
26
+ - name: Install dependencies
27
+ run: |
28
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
29
+ python -m pip install --upgrade pip uv
30
+ python -m uv pip install -e .
31
+ python -m uv pip install torch torchvision torchaudio
32
+ python -m uv pip install pytest
33
+ - name: Check for soft dependencies
34
+ run: |
35
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
36
+ pytest tests/others/test_dependencies.py
rollingdepth_src/diffusers/.github/workflows/push_tests.yml ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Slow Tests on main
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "src/diffusers/**.py"
9
+ - "examples/**.py"
10
+ - "tests/**.py"
11
+
12
+ env:
13
+ DIFFUSERS_IS_CI: yes
14
+ OMP_NUM_THREADS: 8
15
+ MKL_NUM_THREADS: 8
16
+ PYTEST_TIMEOUT: 600
17
+ PIPELINE_USAGE_CUTOFF: 50000
18
+
19
+ jobs:
20
+ setup_torch_cuda_pipeline_matrix:
21
+ name: Setup Torch Pipelines CUDA Slow Tests Matrix
22
+ runs-on:
23
+ group: aws-general-8-plus
24
+ container:
25
+ image: diffusers/diffusers-pytorch-cpu
26
+ outputs:
27
+ pipeline_test_matrix: ${{ steps.fetch_pipeline_matrix.outputs.pipeline_test_matrix }}
28
+ steps:
29
+ - name: Checkout diffusers
30
+ uses: actions/checkout@v3
31
+ with:
32
+ fetch-depth: 2
33
+ - name: Install dependencies
34
+ run: |
35
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
36
+ python -m uv pip install -e [quality,test]
37
+ - name: Environment
38
+ run: |
39
+ python utils/print_env.py
40
+ - name: Fetch Pipeline Matrix
41
+ id: fetch_pipeline_matrix
42
+ run: |
43
+ matrix=$(python utils/fetch_torch_cuda_pipeline_test_matrix.py)
44
+ echo $matrix
45
+ echo "pipeline_test_matrix=$matrix" >> $GITHUB_OUTPUT
46
+ - name: Pipeline Tests Artifacts
47
+ if: ${{ always() }}
48
+ uses: actions/upload-artifact@v2
49
+ with:
50
+ name: test-pipelines.json
51
+ path: reports
52
+
53
+ torch_pipelines_cuda_tests:
54
+ name: Torch Pipelines CUDA Tests
55
+ needs: setup_torch_cuda_pipeline_matrix
56
+ strategy:
57
+ fail-fast: false
58
+ max-parallel: 8
59
+ matrix:
60
+ module: ${{ fromJson(needs.setup_torch_cuda_pipeline_matrix.outputs.pipeline_test_matrix) }}
61
+ runs-on:
62
+ group: aws-g4dn-2xlarge
63
+ container:
64
+ image: diffusers/diffusers-pytorch-cuda
65
+ options: --shm-size "16gb" --ipc host --gpus 0
66
+ steps:
67
+ - name: Checkout diffusers
68
+ uses: actions/checkout@v3
69
+ with:
70
+ fetch-depth: 2
71
+ - name: NVIDIA-SMI
72
+ run: |
73
+ nvidia-smi
74
+ - name: Install dependencies
75
+ run: |
76
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
77
+ python -m uv pip install -e [quality,test]
78
+ python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
79
+ - name: Environment
80
+ run: |
81
+ python utils/print_env.py
82
+ - name: Slow PyTorch CUDA checkpoint tests on Ubuntu
83
+ env:
84
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
85
+ # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
86
+ CUBLAS_WORKSPACE_CONFIG: :16:8
87
+ run: |
88
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
89
+ -s -v -k "not Flax and not Onnx" \
90
+ --make-reports=tests_pipeline_${{ matrix.module }}_cuda \
91
+ tests/pipelines/${{ matrix.module }}
92
+ - name: Failure short reports
93
+ if: ${{ failure() }}
94
+ run: |
95
+ cat reports/tests_pipeline_${{ matrix.module }}_cuda_stats.txt
96
+ cat reports/tests_pipeline_${{ matrix.module }}_cuda_failures_short.txt
97
+ - name: Test suite reports artifacts
98
+ if: ${{ always() }}
99
+ uses: actions/upload-artifact@v2
100
+ with:
101
+ name: pipeline_${{ matrix.module }}_test_reports
102
+ path: reports
103
+
104
+ torch_cuda_tests:
105
+ name: Torch CUDA Tests
106
+ runs-on:
107
+ group: aws-g4dn-2xlarge
108
+ container:
109
+ image: diffusers/diffusers-pytorch-cuda
110
+ options: --shm-size "16gb" --ipc host --gpus 0
111
+ defaults:
112
+ run:
113
+ shell: bash
114
+ strategy:
115
+ matrix:
116
+ module: [models, schedulers, lora, others, single_file]
117
+ steps:
118
+ - name: Checkout diffusers
119
+ uses: actions/checkout@v3
120
+ with:
121
+ fetch-depth: 2
122
+
123
+ - name: Install dependencies
124
+ run: |
125
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
126
+ python -m uv pip install -e [quality,test]
127
+ python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
128
+ python -m uv pip install peft@git+https://github.com/huggingface/peft.git
129
+
130
+ - name: Environment
131
+ run: |
132
+ python utils/print_env.py
133
+
134
+ - name: Run PyTorch CUDA tests
135
+ env:
136
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
137
+ # https://pytorch.org/docs/stable/notes/randomness.html#avoiding-nondeterministic-algorithms
138
+ CUBLAS_WORKSPACE_CONFIG: :16:8
139
+ run: |
140
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
141
+ -s -v -k "not Flax and not Onnx" \
142
+ --make-reports=tests_torch_cuda \
143
+ tests/${{ matrix.module }}
144
+
145
+ - name: Failure short reports
146
+ if: ${{ failure() }}
147
+ run: |
148
+ cat reports/tests_torch_cuda_stats.txt
149
+ cat reports/tests_torch_cuda_failures_short.txt
150
+
151
+ - name: Test suite reports artifacts
152
+ if: ${{ always() }}
153
+ uses: actions/upload-artifact@v2
154
+ with:
155
+ name: torch_cuda_test_reports
156
+ path: reports
157
+
158
+ flax_tpu_tests:
159
+ name: Flax TPU Tests
160
+ runs-on: docker-tpu
161
+ container:
162
+ image: diffusers/diffusers-flax-tpu
163
+ options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --privileged
164
+ defaults:
165
+ run:
166
+ shell: bash
167
+ steps:
168
+ - name: Checkout diffusers
169
+ uses: actions/checkout@v3
170
+ with:
171
+ fetch-depth: 2
172
+
173
+ - name: Install dependencies
174
+ run: |
175
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
176
+ python -m uv pip install -e [quality,test]
177
+ python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
178
+
179
+ - name: Environment
180
+ run: |
181
+ python utils/print_env.py
182
+
183
+ - name: Run slow Flax TPU tests
184
+ env:
185
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
186
+ run: |
187
+ python -m pytest -n 0 \
188
+ -s -v -k "Flax" \
189
+ --make-reports=tests_flax_tpu \
190
+ tests/
191
+
192
+ - name: Failure short reports
193
+ if: ${{ failure() }}
194
+ run: |
195
+ cat reports/tests_flax_tpu_stats.txt
196
+ cat reports/tests_flax_tpu_failures_short.txt
197
+
198
+ - name: Test suite reports artifacts
199
+ if: ${{ always() }}
200
+ uses: actions/upload-artifact@v2
201
+ with:
202
+ name: flax_tpu_test_reports
203
+ path: reports
204
+
205
+ onnx_cuda_tests:
206
+ name: ONNX CUDA Tests
207
+ runs-on:
208
+ group: aws-g4dn-2xlarge
209
+ container:
210
+ image: diffusers/diffusers-onnxruntime-cuda
211
+ options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ --gpus 0
212
+ defaults:
213
+ run:
214
+ shell: bash
215
+ steps:
216
+ - name: Checkout diffusers
217
+ uses: actions/checkout@v3
218
+ with:
219
+ fetch-depth: 2
220
+
221
+ - name: Install dependencies
222
+ run: |
223
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
224
+ python -m uv pip install -e [quality,test]
225
+ python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
226
+
227
+ - name: Environment
228
+ run: |
229
+ python utils/print_env.py
230
+
231
+ - name: Run slow ONNXRuntime CUDA tests
232
+ env:
233
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
234
+ run: |
235
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile \
236
+ -s -v -k "Onnx" \
237
+ --make-reports=tests_onnx_cuda \
238
+ tests/
239
+
240
+ - name: Failure short reports
241
+ if: ${{ failure() }}
242
+ run: |
243
+ cat reports/tests_onnx_cuda_stats.txt
244
+ cat reports/tests_onnx_cuda_failures_short.txt
245
+
246
+ - name: Test suite reports artifacts
247
+ if: ${{ always() }}
248
+ uses: actions/upload-artifact@v2
249
+ with:
250
+ name: onnx_cuda_test_reports
251
+ path: reports
252
+
253
+ run_torch_compile_tests:
254
+ name: PyTorch Compile CUDA tests
255
+
256
+ runs-on:
257
+ group: aws-g4dn-2xlarge
258
+
259
+ container:
260
+ image: diffusers/diffusers-pytorch-compile-cuda
261
+ options: --gpus 0 --shm-size "16gb" --ipc host
262
+
263
+ steps:
264
+ - name: Checkout diffusers
265
+ uses: actions/checkout@v3
266
+ with:
267
+ fetch-depth: 2
268
+
269
+ - name: NVIDIA-SMI
270
+ run: |
271
+ nvidia-smi
272
+ - name: Install dependencies
273
+ run: |
274
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
275
+ python -m uv pip install -e [quality,test,training]
276
+ - name: Environment
277
+ run: |
278
+ python utils/print_env.py
279
+ - name: Run example tests on GPU
280
+ env:
281
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
282
+ RUN_COMPILE: yes
283
+ run: |
284
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "compile" --make-reports=tests_torch_compile_cuda tests/
285
+ - name: Failure short reports
286
+ if: ${{ failure() }}
287
+ run: cat reports/tests_torch_compile_cuda_failures_short.txt
288
+
289
+ - name: Test suite reports artifacts
290
+ if: ${{ always() }}
291
+ uses: actions/upload-artifact@v2
292
+ with:
293
+ name: torch_compile_test_reports
294
+ path: reports
295
+
296
+ run_xformers_tests:
297
+ name: PyTorch xformers CUDA tests
298
+
299
+ runs-on:
300
+ group: aws-g4dn-2xlarge
301
+
302
+ container:
303
+ image: diffusers/diffusers-pytorch-xformers-cuda
304
+ options: --gpus 0 --shm-size "16gb" --ipc host
305
+
306
+ steps:
307
+ - name: Checkout diffusers
308
+ uses: actions/checkout@v3
309
+ with:
310
+ fetch-depth: 2
311
+
312
+ - name: NVIDIA-SMI
313
+ run: |
314
+ nvidia-smi
315
+ - name: Install dependencies
316
+ run: |
317
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
318
+ python -m uv pip install -e [quality,test,training]
319
+ - name: Environment
320
+ run: |
321
+ python utils/print_env.py
322
+ - name: Run example tests on GPU
323
+ env:
324
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
325
+ run: |
326
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v -k "xformers" --make-reports=tests_torch_xformers_cuda tests/
327
+ - name: Failure short reports
328
+ if: ${{ failure() }}
329
+ run: cat reports/tests_torch_xformers_cuda_failures_short.txt
330
+
331
+ - name: Test suite reports artifacts
332
+ if: ${{ always() }}
333
+ uses: actions/upload-artifact@v2
334
+ with:
335
+ name: torch_xformers_test_reports
336
+ path: reports
337
+
338
+ run_examples_tests:
339
+ name: Examples PyTorch CUDA tests on Ubuntu
340
+
341
+ runs-on:
342
+ group: aws-g4dn-2xlarge
343
+
344
+ container:
345
+ image: diffusers/diffusers-pytorch-cuda
346
+ options: --gpus 0 --shm-size "16gb" --ipc host
347
+
348
+ steps:
349
+ - name: Checkout diffusers
350
+ uses: actions/checkout@v3
351
+ with:
352
+ fetch-depth: 2
353
+
354
+ - name: NVIDIA-SMI
355
+ run: |
356
+ nvidia-smi
357
+
358
+ - name: Install dependencies
359
+ run: |
360
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
361
+ python -m uv pip install -e [quality,test,training]
362
+
363
+ - name: Environment
364
+ run: |
365
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
366
+ python utils/print_env.py
367
+
368
+ - name: Run example tests on GPU
369
+ env:
370
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
371
+ run: |
372
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
373
+ python -m uv pip install timm
374
+ python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s -v --make-reports=examples_torch_cuda examples/
375
+
376
+ - name: Failure short reports
377
+ if: ${{ failure() }}
378
+ run: |
379
+ cat reports/examples_torch_cuda_stats.txt
380
+ cat reports/examples_torch_cuda_failures_short.txt
381
+
382
+ - name: Test suite reports artifacts
383
+ if: ${{ always() }}
384
+ uses: actions/upload-artifact@v2
385
+ with:
386
+ name: examples_test_reports
387
+ path: reports
rollingdepth_src/diffusers/.github/workflows/push_tests_fast.yml ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Fast tests on main
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "src/diffusers/**.py"
9
+ - "examples/**.py"
10
+ - "tests/**.py"
11
+
12
+ concurrency:
13
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
14
+ cancel-in-progress: true
15
+
16
+ env:
17
+ DIFFUSERS_IS_CI: yes
18
+ HF_HOME: /mnt/cache
19
+ OMP_NUM_THREADS: 8
20
+ MKL_NUM_THREADS: 8
21
+ PYTEST_TIMEOUT: 600
22
+ RUN_SLOW: no
23
+
24
+ jobs:
25
+ run_fast_tests:
26
+ strategy:
27
+ fail-fast: false
28
+ matrix:
29
+ config:
30
+ - name: Fast PyTorch CPU tests on Ubuntu
31
+ framework: pytorch
32
+ runner: aws-general-8-plus
33
+ image: diffusers/diffusers-pytorch-cpu
34
+ report: torch_cpu
35
+ - name: Fast Flax CPU tests on Ubuntu
36
+ framework: flax
37
+ runner: aws-general-8-plus
38
+ image: diffusers/diffusers-flax-cpu
39
+ report: flax_cpu
40
+ - name: Fast ONNXRuntime CPU tests on Ubuntu
41
+ framework: onnxruntime
42
+ runner: aws-general-8-plus
43
+ image: diffusers/diffusers-onnxruntime-cpu
44
+ report: onnx_cpu
45
+ - name: PyTorch Example CPU tests on Ubuntu
46
+ framework: pytorch_examples
47
+ runner: aws-general-8-plus
48
+ image: diffusers/diffusers-pytorch-cpu
49
+ report: torch_example_cpu
50
+
51
+ name: ${{ matrix.config.name }}
52
+
53
+ runs-on:
54
+ group: ${{ matrix.config.runner }}
55
+
56
+ container:
57
+ image: ${{ matrix.config.image }}
58
+ options: --shm-size "16gb" --ipc host -v /mnt/hf_cache:/mnt/cache/
59
+
60
+ defaults:
61
+ run:
62
+ shell: bash
63
+
64
+ steps:
65
+ - name: Checkout diffusers
66
+ uses: actions/checkout@v3
67
+ with:
68
+ fetch-depth: 2
69
+
70
+ - name: Install dependencies
71
+ run: |
72
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
73
+ python -m uv pip install -e [quality,test]
74
+
75
+ - name: Environment
76
+ run: |
77
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
78
+ python utils/print_env.py
79
+
80
+ - name: Run fast PyTorch CPU tests
81
+ if: ${{ matrix.config.framework == 'pytorch' }}
82
+ run: |
83
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
84
+ python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
85
+ -s -v -k "not Flax and not Onnx" \
86
+ --make-reports=tests_${{ matrix.config.report }} \
87
+ tests/
88
+
89
+ - name: Run fast Flax TPU tests
90
+ if: ${{ matrix.config.framework == 'flax' }}
91
+ run: |
92
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
93
+ python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
94
+ -s -v -k "Flax" \
95
+ --make-reports=tests_${{ matrix.config.report }} \
96
+ tests/
97
+
98
+ - name: Run fast ONNXRuntime CPU tests
99
+ if: ${{ matrix.config.framework == 'onnxruntime' }}
100
+ run: |
101
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
102
+ python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
103
+ -s -v -k "Onnx" \
104
+ --make-reports=tests_${{ matrix.config.report }} \
105
+ tests/
106
+
107
+ - name: Run example PyTorch CPU tests
108
+ if: ${{ matrix.config.framework == 'pytorch_examples' }}
109
+ run: |
110
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
111
+ python -m uv pip install peft timm
112
+ python -m pytest -n 4 --max-worker-restart=0 --dist=loadfile \
113
+ --make-reports=tests_${{ matrix.config.report }} \
114
+ examples
115
+
116
+ - name: Failure short reports
117
+ if: ${{ failure() }}
118
+ run: cat reports/tests_${{ matrix.config.report }}_failures_short.txt
119
+
120
+ - name: Test suite reports artifacts
121
+ if: ${{ always() }}
122
+ uses: actions/upload-artifact@v2
123
+ with:
124
+ name: pr_${{ matrix.config.report }}_test_reports
125
+ path: reports
rollingdepth_src/diffusers/.github/workflows/push_tests_mps.yml ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Fast mps tests on main
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - "src/diffusers/**.py"
9
+ - "tests/**.py"
10
+
11
+ env:
12
+ DIFFUSERS_IS_CI: yes
13
+ HF_HOME: /mnt/cache
14
+ OMP_NUM_THREADS: 8
15
+ MKL_NUM_THREADS: 8
16
+ PYTEST_TIMEOUT: 600
17
+ RUN_SLOW: no
18
+
19
+ concurrency:
20
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
21
+ cancel-in-progress: true
22
+
23
+ jobs:
24
+ run_fast_tests_apple_m1:
25
+ name: Fast PyTorch MPS tests on MacOS
26
+ runs-on: macos-13-xlarge
27
+
28
+ steps:
29
+ - name: Checkout diffusers
30
+ uses: actions/checkout@v3
31
+ with:
32
+ fetch-depth: 2
33
+
34
+ - name: Clean checkout
35
+ shell: arch -arch arm64 bash {0}
36
+ run: |
37
+ git clean -fxd
38
+
39
+ - name: Setup miniconda
40
+ uses: ./.github/actions/setup-miniconda
41
+ with:
42
+ python-version: 3.9
43
+
44
+ - name: Install dependencies
45
+ shell: arch -arch arm64 bash {0}
46
+ run: |
47
+ ${CONDA_RUN} python -m pip install --upgrade pip uv
48
+ ${CONDA_RUN} python -m uv pip install -e [quality,test]
49
+ ${CONDA_RUN} python -m uv pip install torch torchvision torchaudio
50
+ ${CONDA_RUN} python -m uv pip install accelerate@git+https://github.com/huggingface/accelerate.git
51
+ ${CONDA_RUN} python -m uv pip install transformers --upgrade
52
+
53
+ - name: Environment
54
+ shell: arch -arch arm64 bash {0}
55
+ run: |
56
+ ${CONDA_RUN} python utils/print_env.py
57
+
58
+ - name: Run fast PyTorch tests on M1 (MPS)
59
+ shell: arch -arch arm64 bash {0}
60
+ env:
61
+ HF_HOME: /System/Volumes/Data/mnt/cache
62
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
63
+ run: |
64
+ ${CONDA_RUN} python -m pytest -n 0 -s -v --make-reports=tests_torch_mps tests/
65
+
66
+ - name: Failure short reports
67
+ if: ${{ failure() }}
68
+ run: cat reports/tests_torch_mps_failures_short.txt
69
+
70
+ - name: Test suite reports artifacts
71
+ if: ${{ always() }}
72
+ uses: actions/upload-artifact@v2
73
+ with:
74
+ name: pr_torch_mps_test_reports
75
+ path: reports
rollingdepth_src/diffusers/.github/workflows/pypi_publish.yaml ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://blog.deepjyoti30.dev/pypi-release-github-action
2
+
3
+ name: PyPI release
4
+
5
+ on:
6
+ workflow_dispatch:
7
+ push:
8
+ tags:
9
+ - "*"
10
+
11
+ jobs:
12
+ find-and-checkout-latest-branch:
13
+ runs-on: ubuntu-latest
14
+ outputs:
15
+ latest_branch: ${{ steps.set_latest_branch.outputs.latest_branch }}
16
+ steps:
17
+ - name: Checkout Repo
18
+ uses: actions/checkout@v3
19
+
20
+ - name: Set up Python
21
+ uses: actions/setup-python@v4
22
+ with:
23
+ python-version: '3.8'
24
+
25
+ - name: Fetch latest branch
26
+ id: fetch_latest_branch
27
+ run: |
28
+ pip install -U requests packaging
29
+ LATEST_BRANCH=$(python utils/fetch_latest_release_branch.py)
30
+ echo "Latest branch: $LATEST_BRANCH"
31
+ echo "latest_branch=$LATEST_BRANCH" >> $GITHUB_ENV
32
+
33
+ - name: Set latest branch output
34
+ id: set_latest_branch
35
+ run: echo "::set-output name=latest_branch::${{ env.latest_branch }}"
36
+
37
+ release:
38
+ needs: find-and-checkout-latest-branch
39
+ runs-on: ubuntu-latest
40
+
41
+ steps:
42
+ - name: Checkout Repo
43
+ uses: actions/checkout@v3
44
+ with:
45
+ ref: ${{ needs.find-and-checkout-latest-branch.outputs.latest_branch }}
46
+
47
+ - name: Setup Python
48
+ uses: actions/setup-python@v4
49
+ with:
50
+ python-version: "3.8"
51
+
52
+ - name: Install dependencies
53
+ run: |
54
+ python -m pip install --upgrade pip
55
+ pip install -U setuptools wheel twine
56
+ pip install -U torch --index-url https://download.pytorch.org/whl/cpu
57
+ pip install -U transformers
58
+
59
+ - name: Build the dist files
60
+ run: python setup.py bdist_wheel && python setup.py sdist
61
+
62
+ - name: Publish to the test PyPI
63
+ env:
64
+ TWINE_USERNAME: ${{ secrets.TEST_PYPI_USERNAME }}
65
+ TWINE_PASSWORD: ${{ secrets.TEST_PYPI_PASSWORD }}
66
+ run: twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
67
+
68
+ - name: Test installing diffusers and importing
69
+ run: |
70
+ pip install diffusers && pip uninstall diffusers -y
71
+ pip install -i https://testpypi.python.org/pypi diffusers
72
+ python -c "from diffusers import __version__; print(__version__)"
73
+ python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('fusing/unet-ldm-dummy-update'); pipe()"
74
+ python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=None); pipe('ah suh du')"
75
+ python -c "from diffusers import *"
76
+
77
+ - name: Publish to PyPI
78
+ env:
79
+ TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
80
+ TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
81
+ run: twine upload dist/* -r pypi
rollingdepth_src/diffusers/.github/workflows/run_tests_from_a_pr.yml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Check running SLOW tests from a PR (only GPU)
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ inputs:
6
+ docker_image:
7
+ default: 'diffusers/diffusers-pytorch-cuda'
8
+ description: 'Name of the Docker image'
9
+ required: true
10
+ branch:
11
+ description: 'PR Branch to test on'
12
+ required: true
13
+ test:
14
+ description: 'Tests to run (e.g.: `tests/models`).'
15
+ required: true
16
+
17
+ env:
18
+ DIFFUSERS_IS_CI: yes
19
+ IS_GITHUB_CI: "1"
20
+ HF_HOME: /mnt/cache
21
+ OMP_NUM_THREADS: 8
22
+ MKL_NUM_THREADS: 8
23
+ PYTEST_TIMEOUT: 600
24
+ RUN_SLOW: yes
25
+
26
+ jobs:
27
+ run_tests:
28
+ name: "Run a test on our runner from a PR"
29
+ runs-on:
30
+ group: aws-g4dn-2xlarge
31
+ container:
32
+ image: ${{ github.event.inputs.docker_image }}
33
+ options: --gpus 0 --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
34
+
35
+ steps:
36
+ - name: Validate test files input
37
+ id: validate_test_files
38
+ env:
39
+ PY_TEST: ${{ github.event.inputs.test }}
40
+ run: |
41
+ if [[ ! "$PY_TEST" =~ ^tests/ ]]; then
42
+ echo "Error: The input string must start with 'tests/'."
43
+ exit 1
44
+ fi
45
+
46
+ if [[ ! "$PY_TEST" =~ ^tests/(models|pipelines) ]]; then
47
+ echo "Error: The input string must contain either 'models' or 'pipelines' after 'tests/'."
48
+ exit 1
49
+ fi
50
+
51
+ if [[ "$PY_TEST" == *";"* ]]; then
52
+ echo "Error: The input string must not contain ';'."
53
+ exit 1
54
+ fi
55
+ echo "$PY_TEST"
56
+
57
+ - name: Checkout PR branch
58
+ uses: actions/checkout@v4
59
+ with:
60
+ ref: ${{ github.event.inputs.branch }}
61
+ repository: ${{ github.event.pull_request.head.repo.full_name }}
62
+
63
+
64
+ - name: Install pytest
65
+ run: |
66
+ python -m venv /opt/venv && export PATH="/opt/venv/bin:$PATH"
67
+ python -m uv pip install -e [quality,test]
68
+ python -m uv pip install peft
69
+
70
+ - name: Run tests
71
+ env:
72
+ PY_TEST: ${{ github.event.inputs.test }}
73
+ run: |
74
+ pytest "$PY_TEST"
rollingdepth_src/diffusers/.github/workflows/ssh-pr-runner.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: SSH into PR runners
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ inputs:
6
+ docker_image:
7
+ description: 'Name of the Docker image'
8
+ required: true
9
+
10
+ env:
11
+ IS_GITHUB_CI: "1"
12
+ HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
13
+ HF_HOME: /mnt/cache
14
+ DIFFUSERS_IS_CI: yes
15
+ OMP_NUM_THREADS: 8
16
+ MKL_NUM_THREADS: 8
17
+ RUN_SLOW: yes
18
+
19
+ jobs:
20
+ ssh_runner:
21
+ name: "SSH"
22
+ runs-on:
23
+ group: aws-highmemory-32-plus
24
+ container:
25
+ image: ${{ github.event.inputs.docker_image }}
26
+ options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --privileged
27
+
28
+ steps:
29
+ - name: Checkout diffusers
30
+ uses: actions/checkout@v3
31
+ with:
32
+ fetch-depth: 2
33
+
34
+ - name: Tailscale # In order to be able to SSH when a test fails
35
+ uses: huggingface/tailscale-action@main
36
+ with:
37
+ authkey: ${{ secrets.TAILSCALE_SSH_AUTHKEY }}
38
+ slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
39
+ slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
40
+ waitForSSH: true
rollingdepth_src/diffusers/.github/workflows/ssh-runner.yml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: SSH into GPU runners
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ inputs:
6
+ runner_type:
7
+ description: 'Type of runner to test (a10 or t4)'
8
+ required: true
9
+ docker_image:
10
+ description: 'Name of the Docker image'
11
+ required: true
12
+
13
+ env:
14
+ IS_GITHUB_CI: "1"
15
+ HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
16
+ HF_HOME: /mnt/cache
17
+ DIFFUSERS_IS_CI: yes
18
+ OMP_NUM_THREADS: 8
19
+ MKL_NUM_THREADS: 8
20
+ RUN_SLOW: yes
21
+
22
+ jobs:
23
+ ssh_runner:
24
+ name: "SSH"
25
+ runs-on:
26
+ group: "${{ github.event.inputs.runner_type }}"
27
+ container:
28
+ image: ${{ github.event.inputs.docker_image }}
29
+ options: --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface/diffusers:/mnt/cache/ --gpus 0 --privileged
30
+
31
+ steps:
32
+ - name: Checkout diffusers
33
+ uses: actions/checkout@v3
34
+ with:
35
+ fetch-depth: 2
36
+
37
+ - name: NVIDIA-SMI
38
+ run: |
39
+ nvidia-smi
40
+
41
+ - name: Tailscale # In order to be able to SSH when a test fails
42
+ uses: huggingface/tailscale-action@main
43
+ with:
44
+ authkey: ${{ secrets.TAILSCALE_SSH_AUTHKEY }}
45
+ slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
46
+ slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
47
+ waitForSSH: true
rollingdepth_src/diffusers/.github/workflows/stale.yml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Stale Bot
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 15 * * *"
6
+
7
+ jobs:
8
+ close_stale_issues:
9
+ name: Close Stale Issues
10
+ if: github.repository == 'huggingface/diffusers'
11
+ runs-on: ubuntu-latest
12
+ env:
13
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
14
+ steps:
15
+ - uses: actions/checkout@v2
16
+
17
+ - name: Setup Python
18
+ uses: actions/setup-python@v1
19
+ with:
20
+ python-version: 3.8
21
+
22
+ - name: Install requirements
23
+ run: |
24
+ pip install PyGithub
25
+ - name: Close stale issues
26
+ run: |
27
+ python utils/stale.py