Baraaqasem commited on
Commit
14ee1a9
1 Parent(s): 356b53a

Upload 29 files

Browse files
.gitignore ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Images
2
+ *jpg
3
+ *png
4
+ *mp4
5
+ .idea/
6
+ # Src
7
+ env_cfg/src
8
+ results/
9
+ checkpoints/*
10
+ temp/*
11
+
12
+
13
+
14
+ # Logging
15
+ *.out
16
+ *.bak
17
+
18
+ # OS generated files #
19
+ ######################
20
+ .DS_Store
21
+ .DS_Store?
22
+ ._*
23
+ .Spotlight-V100
24
+ .Trashes
25
+ ehthumbs.db
26
+ Thumbs.db
27
+
28
+ # Ignore ckpt, pt
29
+ *.pt
30
+ *.ckpt
31
+ *.pth
32
+
33
+ # Byte-compiled / optimized / DLL files
34
+ __pycache__/
35
+ *.py[cod]
36
+ *$py.class
37
+
38
+ # C extensions
39
+ *.so
40
+
41
+ # Distribution / packaging
42
+ .Python
43
+ build/
44
+ develop-eggs/
45
+ dist/
46
+ downloads/
47
+ eggs/
48
+ .eggs/
49
+ lib/
50
+ lib64/
51
+ parts/
52
+ sdist/
53
+ var/
54
+ wheels/
55
+ share/python-wheels/
56
+ *.egg-info/
57
+ .installed.cfg
58
+ *.egg
59
+ MANIFEST
60
+
61
+ # PyInstaller
62
+ # Usually these files are written by a python script from a template
63
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
64
+ *.manifest
65
+ *.spec
66
+
67
+ # Installer logs
68
+ pip-log.txt
69
+ pip-delete-this-directory.txt
70
+
71
+ # Unit test / coverage reports
72
+ htmlcov/
73
+ .tox/
74
+ .nox/
75
+ .coverage
76
+ .coverage.*
77
+ .cache
78
+ nosetests.xml
79
+ coverage.xml
80
+ *.cover
81
+ *.py,cover
82
+ .hypothesis/
83
+ .pytest_cache/
84
+ cover/
85
+
86
+ # Translations
87
+ *.mo
88
+ *.pot
89
+
90
+ # Django stuff:
91
+ *.log
92
+ local_settings.py
93
+ db.sqlite3
94
+ db.sqlite3-journal
95
+
96
+ # Flask stuff:
97
+ instance/
98
+ .webassets-cache
99
+
100
+ # Scrapy stuff:
101
+ .scrapy
102
+
103
+ # Sphinx documentation
104
+ docs/_build/
105
+
106
+ # PyBuilder
107
+ .pybuilder/
108
+ target/
109
+
110
+ # Jupyter Notebook
111
+ .ipynb_checkpoints
112
+
113
+ # IPython
114
+ profile_default/
115
+ ipython_config.py
116
+
117
+ # pyenv
118
+ # For a library or package, you might want to ignore these files since the code is
119
+ # intended to run in multiple environments; otherwise, check them in:
120
+ # .python-version
121
+
122
+ # pipenv
123
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
124
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
125
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
126
+ # install all needed dependencies.
127
+ #Pipfile.lock
128
+
129
+ # poetry
130
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
131
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
132
+ # commonly ignored for libraries.
133
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
134
+ #poetry.lock
135
+
136
+ # pdm
137
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
138
+ #pdm.lock
139
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
140
+ # in version control.
141
+ # https://pdm.fming.dev/#use-with-ide
142
+ .pdm.toml
143
+
144
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
145
+ __pypackages__/
146
+
147
+ # Celery stuff
148
+ celerybeat-schedule
149
+ celerybeat.pid
150
+
151
+ # SageMath parsed files
152
+ *.sage.py
153
+
154
+ # Environments
155
+ .env
156
+ .venv
157
+ env/
158
+ venv/
159
+ ENV/
160
+ env.bak/
161
+ venv.bak/
162
+
163
+ # Spyder project settings
164
+ .spyderproject
165
+ .spyproject
166
+
167
+ # Rope project settings
168
+ .ropeproject
169
+
170
+ # mkdocs documentation
171
+ /site
172
+
173
+ # mypy
174
+ .mypy_cache/
175
+ .dmypy.json
176
+ dmypy.json
177
+
178
+ # Pyre type checker
179
+ .pyre/
180
+
181
+ # pytype static type analyzer
182
+ .pytype/
183
+
184
+ # Cython debug symbols
185
+ cython_debug/
186
+
187
+ # PyCharm
188
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
189
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
190
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
191
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
192
+ #.idea/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 TIGER Lab
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
build_pypi.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ rm -rf dist
2
+ rm -rf build
3
+ python setup.py sdist
4
+ python setup.py bdist_wheel
5
+ twine check dist/*
6
+ twine upload dist/*
conftest.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ # Calculate the path to the root of the project
5
+ root_path = os.path.dirname(os.path.abspath(__file__))
6
+
7
+ # Add the project root to the sys.path
8
+ sys.path.append(root_path)
9
+
10
+ sys.path.append(os.path.join(root_path, "src"))
correct_mp4.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from moviepy.editor import VideoFileClip
4
+
5
+ def reprocess_video(input_path, output_path):
6
+ # Load the video file
7
+ clip = VideoFileClip(input_path)
8
+
9
+ # Write the clip to a new file with the desired encoding.
10
+ clip.write_videofile(output_path, codec="libx264", audio_codec="aac")
11
+
12
+ def find_and_replace_videos(directory):
13
+ for root, dirs, files in os.walk(directory):
14
+ for file in files:
15
+ if file.endswith(".mp4"):
16
+ full_path = os.path.join(root, file)
17
+ print(f"Processing {full_path}...")
18
+
19
+ # Define the output path, could overwrite or create a new file
20
+ output_path = full_path # This will overwrite the original file
21
+
22
+ # To prevent overwriting, uncomment the following line and comment out the above line
23
+ # output_path = os.path.splitext(full_path)[0] + "_corrected.mp4"
24
+ reprocess_video(full_path, output_path)
25
+
26
+ if __name__ == "__main__":
27
+ parser = argparse.ArgumentParser(description="Reprocess MP4 files in a folder with correct encoding.")
28
+ parser.add_argument("directory", help="The directory to search for MP4 files")
29
+
30
+ args = parser.parse_args()
31
+
32
+ find_and_replace_videos(args.directory)
docs/Contributing/basics.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The Basics
2
+ We believe that everyone can contribute and make a difference. Whether it's writing code 💻, fixing bugs 🐛, or simply sharing feedback 💬, your contributions are definitely welcome and appreciated 🙌
3
+
4
+ And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
5
+ - Star the project
6
+ - Tweet about it
7
+ - Refer this project in your project's readme
8
+ - Mention the project at local meetups and tell your friends/colleagues
9
+
10
+ ## Fork and Pull Request
11
+
12
+ When contributing to the codebase of VideoGenHub, the first step is to create a fork of the repository. Then, it’s best practice to create a separate branch for each new pull request (PR) you create. This can be done using:
13
+
14
+ ```shell
15
+ git checkout -b name_of_your_branch
16
+ ```
17
+
18
+ The main branch then simply has the role of being kept up to date with upstream. You can create PRs based on the main branch of your fork, but this will make things more complicated if you would then like to create additional PRs in the future.
19
+
20
+
21
+ ## Each Pull Request point to an issue
22
+
23
+ We suggest using one pull-request for each issue. This makes changes clear and simple.
24
+
25
+ * Link each Pull Request to an issue: `- [ ] #issue_number`
26
+
27
+ ### Commits across minimum files on each Pull Request
28
+
29
+ Since each Pull Request links to one issue, don’t change too many files at once. Keeping changes small makes it easier for others to check and understand. It also reduces mistakes and mix-ups. So, always keep it simple and focused.
docs/Contributing/coding.md ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code Contribution
2
+
3
+ * We expect Type Hints in all methods.
4
+
5
+ You don't need to type hint every variable or function. Start with critical components, or where types might be confusing.
6
+ Type hints are not enforced at runtime. They're purely for the developer and tooling.
7
+ For large projects, consider gradual typing. You can add hints incrementally.
8
+ Type hints, when used judiciously, can make your Python code more readable and maintainable, and can catch potential bugs before runtime.
9
+
10
+ * We expect Docstrings in all methods and classes.
11
+
12
+ For modules, the docstring should list the classes, exceptions, and functions (and any other objects) that are exported by the module. For classes, document its methods and instance variables.
13
+
14
+ ## Type Hints
15
+
16
+ Type hints, introduced in Python 3.5 via PEP 484, provide a way to specify the expected data types for variables, function arguments, and return values. This can greatly improve the readability of your code, facilitate debugging, and enable better tooling (like type checkers and IDE assistance). Here's a guide to get you started with type hints.
17
+
18
+ ### Function and Method Signatures
19
+
20
+ Type hints can be added to function arguments and return values:
21
+ ```python
22
+ def greet(name: str) -> None:
23
+ print(f"Hello, {name}!")
24
+
25
+ def add(a: int, b: int) -> int:
26
+ return a + b
27
+ ```
28
+
29
+ ### Type Hinting Lists, Dictionaries, and Other Collections
30
+
31
+ For collections, you can use the `typing` module:
32
+
33
+ ```python
34
+ from typing import List, Dict
35
+
36
+ def get_names() -> List[str]:
37
+ return ["Alice", "Bob", "Charlie"]
38
+
39
+ def get_age_mapping() -> Dict[str, int]:
40
+ return {"Alice": 30, "Bob": 25, "Charlie": 28}
41
+ ```
42
+
43
+ ### Optional Types
44
+
45
+ If a variable might be of a certain type or None, use `Optional`:
46
+
47
+ ```python
48
+ from typing import Optional
49
+
50
+ def find_user(username: str) -> Optional[Dict[str, str]]:
51
+ # Return user dict if found, else None
52
+ pass
53
+ ```
54
+
55
+ ### Union Types
56
+
57
+ If a variable can be one of several types, use `Union`:
58
+
59
+ ```python
60
+ from typing import Union
61
+
62
+ def process_data(data: Union[str, bytes]) -> None:
63
+ pass
64
+ ```
65
+
66
+ ### Classes and Type Hints
67
+
68
+ You can also use type hints in class definitions:
69
+
70
+ ```python
71
+ class Person:
72
+ def __init__(self, name: str, age: int) -> None:
73
+ self.name: str = name
74
+ self.age: int = age
75
+ ```
76
+
77
+ ## Docstrings
78
+
79
+ Docstrings, or documentation strings, are an essential aspect of Python programming. They provide concise descriptions of how a module, class, method, or function works. The PEP-8 style guide suggests a specific format for these descriptions. We'll walk through the process of adding PEP-8 style docstrings to your Python code. This style is commonly associated with Google's Python style guide.
80
+
81
+ ### What is PEP-8
82
+
83
+ PEP-8 is the Python Enhancement Proposal that provides coding conventions for the Python code comprising the standard library in the main Python distribution. These conventions help in making the code readable and maintainable.
84
+
85
+ ### Why Docstrings
86
+
87
+ Docstrings provide a built-in system for associating blocks of documentation with modules, classes, methods, and functions. This documentation can be accessed at runtime using the `help()` function or outside the runtime using tools like Sphinx.
88
+
89
+ ### PEP-8 Docstring Format
90
+
91
+ Here's the basic structure:
92
+
93
+ ```python
94
+ def function_name(arg1, arg2):
95
+ """Brief description.
96
+
97
+ More detailed description.
98
+
99
+ Args:
100
+ arg1 (type): Description of arg1.
101
+ arg2 (type): Description of arg2.
102
+
103
+ Returns:
104
+ type: Description of the return value.
105
+
106
+ Raises:
107
+ ExceptionType: Description of the circumstances under which the exception is raised.
108
+ """
109
+ pass
110
+
111
+ ```
112
+
docs/Contributing/docs.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Improving The Documentation
2
+
3
+ We are looking for help in improving the documentation.
4
+
5
+ ## Building the Docs locally
6
+
7
+ The easiest way to build the docs is to use the `docs/make_docs.sh` script.
8
+
9
+ Make sure that you are in the correct conda environment:
10
+ ```shell
11
+ cd docs
12
+ ./make_docs.sh
13
+ ```
14
+
15
+ This script will build the docs and store it in `docs/_build`.
docs/Guidelines/custommodel.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adding new models
2
+
3
+ * You developed a new model / framework that perform very good results. Now you want to benchmark it with other models. How you can do it?
4
+
5
+ In this guide we will be adding new model to the codebase and extend the code.
6
+
7
+ ## Integrating your model into VideoGenHub
8
+
9
+
10
+ To add your model codebase into VideoGenHub codebase, you must modify the following folders:
11
+
12
+ * `src/videogen_hub/infermodels` : where you create a class interface for the model inference.
13
+ * `src/videogen_hub/pipelines` : where you move your codebase into it without much tidy up work.
14
+
15
+ ### How to write the infermodel class
16
+ The infermodel class is designed to have minimal methods. However, it must contain the following methods:
17
+
18
+ * `__init__(args)` for class initialization.
19
+ * `infer_one_video(args)` to produce 1 video output. Please try to set the seed as 42.
20
+
21
+ In that case, you will add a new file in `infermodels` folder.
22
+ `infermodels/awesome_model.py`
23
+ ```python
24
+ import torch
25
+ from videogen_hub.pipelines.awesome_model import AwesomeModelPipeline
26
+ class AwesomeModelClass():
27
+ """
28
+ A wrapper ...
29
+ """
30
+ def __init__(self, device="cuda"):
31
+ """
32
+ docstring
33
+ """
34
+ self.pipe = AwesomeModelPipeline(device=device)
35
+
36
+ def infer_one_video(self, prompt, seed=42):
37
+ """
38
+ docstring
39
+ """
40
+ self.pipe.set_seed(seed)
41
+ video = self.pipe(prompt=prompt)
42
+ return video
43
+ ```
44
+ Then you can add a line in `infermodels/__init__.py`:
45
+ ```shell
46
+ from .awesome_model import AwesomeModelClass
47
+ ```
48
+
49
+ ### Writing your pipeline
50
+ About `AwesomeModelPipeline`, it means you need to write a Pipeline file that wraps the function of your codebase, such that the infermodel class can call it with ease.
51
+
52
+ We recommend structuring code in the `pipelines` folder in this way:
53
+
54
+ ```shell
55
+ └── awesome_model
56
+ ├── pipeline_awesome_model.py
57
+ ├── awesome_model_src
58
+ │ └── ...
59
+ └── __init__.py
60
+ ```
61
+
62
+ ## Running experiment with new model
63
+ After finishing and reinstalling the package through
64
+ ```shell
65
+ pip install -e .
66
+ ```
67
+ You should be able to use the new model.
68
+
69
+
70
+ ### Matching environment
71
+ Make sure the code can be run with the VideoGenHub environment. If new dependency is added, please add them to the env_cfg file.
72
+
73
+ ## Submitting your model as through a PR
74
+
75
+ Finally, you can submit this new model through submiting a Pull Request! Make sure it match the code style in our contribution guide.
docs/Guidelines/install.md ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Installation
2
+
3
+ ## Installing the project
4
+
5
+ To install from pypi:
6
+ ```shall
7
+ pip install videogen-hub
8
+ ```
9
+
10
+ To install from github:
11
+ ```shall
12
+ git clone https://github.com/TIGER-AI-Lab/VideoGenHub.git
13
+ cd VideoGenHub
14
+ cd env_cfg
15
+ pip install -r requirements.txt
16
+ cd ..
17
+ pip install -e .
18
+ ```
19
+ The requirement of opensora is in `env_cfg/opensora.txt`
20
+
21
+ For some models like show one, you need to login through `huggingface-cli`.
22
+
23
+ ## Verify the installation
24
+ ```python
25
+ import videogen_hub
26
+
27
+ print(videogen_hub.__version__) # should print a string
28
+ ```
29
+
30
+ ## **Downloading weights** into `checkpoints` folder
31
+ ```shell
32
+ ./download_models.sh
33
+ ```
docs/Guidelines/quickstart.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Quickstart
2
+
3
+ ## Running single model
4
+ ```python
5
+ import videogen_hub
6
+
7
+ model = videogen_hub.load('VideoCrafter2')
8
+ video = model.infer_one_video(prompt="A child excitedly swings on a rusty swing set, laughter filling the air.")
9
+
10
+ # Here video is a torch tensor of shape torch.Size([16, 3, 320, 512])
11
+ ```
docs/Makefile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line, and also
5
+ # from the environment for the first two.
6
+ SPHINXOPTS ?=
7
+ SPHINXBUILD ?= sphinx-build
8
+ SOURCEDIR = .
9
+ BUILDDIR = build
10
+
11
+ # Put it first so that "make" without argument is like "make help".
12
+ help:
13
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14
+
15
+ .PHONY: help Makefile
16
+
17
+ # Catch-all target: route all unknown targets to Sphinx using the new
18
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19
+ %: Makefile
20
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
docs/Overview/intro.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Introduction
2
+
3
+ As a video generation correspondence of ImagenHub, VideoGenHub is a centralized framework to standardize the evaluation of conditional video generation models by curating unified datasets, building an inference library and a benchmark that align with real life applications. This is continuous effort to publish leaderboard to help everyone track the progress in the field.
4
+
5
+ ## Why VideoGenHub?
6
+
7
+
8
+ ### What sets #VideoGenHub apart?
9
+ 1) Unified Datasets: We’ve meticulously curated evaluation datasets for 2 video generation tasks. This ensures comprehensive testing of models across diverse scenarios.
10
+ 2) Inference Library: Say goodbye to inconsistent comparisons. Our unified inference pipeline ensures that every model is evaluated on a level playing field with full transparency.
11
+ 3) Human-centric Evaluation: Beyond traditional metrics, we’ve innovated with human evaluation scores that measure Semantic Consistency & Perceptual Quality. This aligns evaluations closer to human perceptions, while better than existing Human-preference evaluation methods.
12
+
13
+ ### Why should you use #VideoGenHub?
14
+ 1) Streamlined Research: We’ve taken the guesswork out of research by defining clear tasks and providing curated datasets.
15
+ Objective Evaluation: Our framework ensures a bias-free, standardized evaluation, giving a true measure of a model’s capabilities.
16
+ 2) Experiment Transparency: By standardizing the human-evaluation dataset, human evaluation results would be come far more convincing with the experiment transparency.
17
+ 3) Collaborative Spirit: We believe in the power of community. Our platform is designed to foster collaboration, idea exchange, and innovation in the realm of image generation.
18
+ 4) Comprehensive Functionality: From common GenAI metrics to visualization tools, we’ve got you covered. Also stay tuned for our upcoming Amazon Mechanical Turk Templates!
19
+ 5) Engineering Excellence: We emphasize good engineering practice. Documentations, type hints, and (coming soon!) extensive code coverage.
20
+
docs/Overview/models.md ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model Zoo
2
+
3
+ We included about 10 models in Video Generation.
4
+ Please see https://github.com/TIGER-AI-Lab/VideoGenHub for more information.
5
+
6
+ ## Text to Video Generation Model
7
+
8
+ * [LaVie](https://vchitect.github.io/LaVie-project/)
9
+ * [VideoCrafter2](https://ailab-cvc.github.io/videocrafter2/)
10
+ * [ModelScope](https://modelscope.cn/models/iic/text-to-video-synthesis/summary)
11
+ * [StreamingT2V](https://streamingt2v.github.io/)
12
+ * [Show 1](https://showlab.github.io/Show-1/)
13
+ * [OpenSora](https://hpcaitech.github.io/Open-Sora/)
14
+
15
+ ## Image to Video Generation Model
16
+
17
+ * [DynamiCrafter2](https://doubiiu.github.io/projects/DynamiCrafter/)
18
+ * [SEINE](https://vchitect.github.io/SEINE-project/)
19
+ * [Consisti2v](https://tiger-ai-lab.github.io/ConsistI2V/)
20
+ * [I2VGenXL](https://i2vgen-xl.github.io/)
docs/Overview/philosophy.rst ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Philosophy
2
+ +++++++++++++++++++++
3
+
4
+ As the video correspondence of Imagenhub, VideoGenHub plays a pivotal role in propelling the field of Video Generation
5
+ by streamlining research and collaboration.
6
+
7
+ Purity of Evaluation
8
+ --------------------
9
+ We ensure a fair and consistent evaluation for all models, eliminating biases.
10
+
11
+ Research Roadmap
12
+ ----------------
13
+ By defining tasks and curating datasets, we provide clear direction for researchers.
14
+
15
+ Open Collaboration
16
+ ------------------
17
+ Our platform fosters the exchange and cooperation of related technologies, bringing together minds and innovations.
docs/conf.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration file for the Sphinx documentation builder.
2
+ #
3
+ # This file only contains a selection of the most common options. For a full
4
+ # list see the documentation:
5
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
+
7
+ # -- Path setup --------------------------------------------------------------
8
+
9
+ # If extensions (or modules to document with autodoc) are in another directory,
10
+ # add these directories to sys.path here. If the directory is relative to the
11
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
12
+ #
13
+ # import os
14
+ # import sys
15
+ # sys.path.insert(0, os.path.abspath('.'))
16
+
17
+
18
+ # -- Project information -----------------------------------------------------
19
+
20
+ project = 'VideoGenHub'
21
+ copyright = '2024, TIGER Lab'
22
+ author = 'TIGER Lab'
23
+
24
+ # -- General configuration ---------------------------------------------------
25
+
26
+ # Add any Sphinx extension module names here, as strings. They can be
27
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
28
+ # ones.
29
+ extensions = [
30
+ 'sphinx.ext.napoleon',
31
+ 'sphinx.ext.duration',
32
+ 'sphinx.ext.doctest',
33
+ 'sphinx.ext.autodoc',
34
+ 'myst_parser'
35
+ ]
36
+
37
+
38
+ # Add any paths that contain templates here, relative to this directory.
39
+ templates_path = ['_templates']
40
+
41
+ # List of patterns, relative to source directory, that match files and
42
+ # directories to ignore when looking for source files.
43
+ # This pattern also affects html_static_path and html_extra_path.
44
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
45
+
46
+
47
+ # -- Options for HTML output -------------------------------------------------
48
+
49
+ # The theme to use for HTML and HTML Help pages. See the documentation for
50
+ # a list of builtin themes.
51
+ #
52
+ html_theme = 'sphinx_rtd_theme'
53
+
54
+ # Add any paths that contain custom static files (such as style sheets) here,
55
+ # relative to this directory. They are copied after the builtin static files,
56
+ # so a file named "default.css" will overwrite the builtin "default.css".
57
+ html_static_path = ['_static']
docs/index.rst ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. VideoGenHub documentation master file, created by
2
+ sphinx-quickstart on Wed May 22 16:32:32 2024.
3
+ You can adapt this file completely to your liking, but it should at least
4
+ contain the root `toctree` directive.
5
+
6
+ Welcome to VideoGenHub's documentation!
7
+ =======================================
8
+
9
+ VideoGenHub is a centralized framework to standardize the evaluation of conditional video generation models by curating unified datasets, building an inference library and a benchmark that align with real life applications.
10
+
11
+ .. _GitHub Code: https://github.com/TIGER-AI-Lab/VideoGenHub
12
+ .. _Pypi Page: https://pypi.org/project/videogen-hub/
13
+
14
+ * `GitHub Code`_
15
+ * `Pypi Page`_
16
+
17
+ .. toctree::
18
+ :maxdepth: 2
19
+ :caption: Overview
20
+
21
+
22
+ .. toctree::
23
+ :maxdepth: 2
24
+ :caption: Overview
25
+
26
+ Overview/intro
27
+ Overview/philosophy
28
+ Overview/models
29
+ Overview/datasets
30
+
31
+ .. toctree::
32
+ :maxdepth: 2
33
+ :caption: Guidelines
34
+
35
+ Guidelines/install
36
+ Guidelines/quickstart
37
+ Guidelines/custombenchmark
38
+ Guidelines/deepdive
39
+
40
+ .. toctree::
41
+ :maxdepth: 2
42
+ :hidden:
43
+ :caption: Researcher Guidelines
44
+
45
+ Guidelines/custommodel
46
+ Guidelines/humaneval
47
+ Guidelines/imagenmuseum
48
+
49
+ .. toctree::
50
+ :maxdepth: 2
51
+ :hidden:
52
+ :caption: Contributing
53
+
54
+ Contributing/basics
55
+ Contributing/coding
56
+ Contributing/docs
57
+
58
+ .. toctree::
59
+ :maxdepth: 2
60
+ :hidden:
61
+ :caption: API Reference
62
+
63
+ source/modules
64
+ source/videogen_hub.infermodels
65
+ source/videogen_hub.benchmark
66
+ source/videogen_hub.pipelines
docs/make.bat ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @ECHO OFF
2
+
3
+ pushd %~dp0
4
+
5
+ REM Command file for Sphinx documentation
6
+
7
+ if "%SPHINXBUILD%" == "" (
8
+ set SPHINXBUILD=sphinx-build
9
+ )
10
+ set SOURCEDIR=.
11
+ set BUILDDIR=_build
12
+
13
+ %SPHINXBUILD% >NUL 2>NUL
14
+ if errorlevel 9009 (
15
+ echo.
16
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17
+ echo.installed, then set the SPHINXBUILD environment variable to point
18
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
19
+ echo.may add the Sphinx directory to PATH.
20
+ echo.
21
+ echo.If you don't have Sphinx installed, grab it from
22
+ echo.https://www.sphinx-doc.org/
23
+ exit /b 1
24
+ )
25
+
26
+ if "%1" == "" goto help
27
+
28
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29
+ goto end
30
+
31
+ :help
32
+ %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33
+
34
+ :end
35
+ popd
docs/make_docs.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Get the directory of the current script.
4
+ SCRIPT_DIR=$(dirname "$0")
5
+
6
+ # Change to the directory where the script is located.
7
+ cd "$SCRIPT_DIR"
8
+
9
+ cd ..
10
+ ## rm -rf docs/source/
11
+ pip install -e .
12
+ ## sphinx-apidoc -o docs/source/ src/videogen_hub
13
+ cd docs
14
+
15
+ make clean
16
+ make html
download_models.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ python load_models.py
env_cfg/opensora.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ colossalai; platform_system != "Windows"
2
+ accelerate
3
+ diffusers
4
+ ftfy
5
+ gdown
6
+ mmengine
7
+ pandas
8
+ pre-commit
9
+ pyarrow
10
+ pyav
11
+ tensorboard
12
+ timm
13
+ tqdm
14
+ transformers
15
+ wandb
16
+ rotary_embedding_torch
17
+ pandarallel
env_cfg/requirements.txt ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ deepspeed; platform_system != "Windows"
2
+ bitsandbytes
3
+ flask
4
+ flask_restful
5
+ flask_cors
6
+ faiss-cpu
7
+ fire
8
+ h5py
9
+ numpy>=1.23.5
10
+ pandas<2.0.0
11
+ peft
12
+ torch
13
+ torchvision
14
+ torchaudio
15
+ xformers~=0.0.20
16
+ jupyterlab>=4.0.2
17
+ notebook>=6.5.4
18
+ albumentations>=1.1.0
19
+ opencv-python>=4.2.0
20
+ pudb~=2019.2
21
+ imageio>=2.14.1
22
+ imageio-ffmpeg>=0.4.7
23
+ pytorch-lightning>=1.5.9
24
+ omegaconf~=2.1.1
25
+ gradio==5.0.0
26
+ pillow~=9.5.0
27
+ einops>=0.4.1
28
+ torch-fidelity>=0.3.0
29
+ setuptools>=59.5.0
30
+ transformers==4.37.2
31
+ torchmetrics>=0.6.0
32
+ lpips
33
+ dreamsim
34
+ image-reward
35
+ kornia>=0.6
36
+ diffusers>=0.18.0
37
+ accelerate>=0.20.3
38
+ safetensors
39
+ datasets
40
+ tqdm>=4.64.1
41
+ matplotlib>=3.7.1
42
+ taming-transformers-rom1504~=0.0.6
43
+ madgrad>=1.1
44
+ -e git+https://github.com/openai/CLIP.git@main#egg=clip
45
+ dominate>=2.8.0
46
+ -e git+https://github.com/CompVis/latent-diffusion.git#egg=latent-diffusion #ldm
47
+ jsonargparse
48
+ openai
49
+ nltk~=3.8.1
50
+ krippendorff
51
+ statsmodels
52
+ plotly
53
+ fal_client
54
+ open_clip_torch
55
+ decord
56
+ huggingface_hub
57
+ open-clip-torch-any-py3
58
+ modelscope
59
+ protobuf==3.20.*
60
+ rotary_embedding_torch
61
+ av
62
+ natsort
63
+ tensorboard
64
+ wandb
load_models.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import traceback
4
+
5
+ import torch
6
+
7
+
8
+ def load_all_models():
9
+ """
10
+ Download models of Lavie, VideoCrafter2, SEINE, ModelScope, and DynamiCrafter,
11
+ into the directory defined by MODEL_PATH,
12
+ with cuda cache emptied.
13
+ Returns: None
14
+ """
15
+ sys.path.insert(0, './src/')
16
+ #from src.videogen_hub.infermodels import CogVideo
17
+ from src.videogen_hub.infermodels import ConsistI2V
18
+ from src.videogen_hub.infermodels import DynamiCrafter
19
+ from src.videogen_hub.infermodels import I2VGenXL
20
+ from src.videogen_hub.infermodels import LaVie
21
+ from src.videogen_hub.infermodels import ModelScope
22
+ from src.videogen_hub.infermodels import OpenSora
23
+ from src.videogen_hub.infermodels import OpenSoraPlan
24
+ from src.videogen_hub.infermodels import SEINE
25
+ from src.videogen_hub.infermodels import ShowOne
26
+ from src.videogen_hub.infermodels import StreamingT2V
27
+ from src.videogen_hub.infermodels import T2VTurbo
28
+ from src.videogen_hub.infermodels import VideoCrafter2
29
+
30
+ from src.videogen_hub import MODEL_PATH
31
+
32
+ try:
33
+ ConsistI2V()
34
+ except:
35
+ pass
36
+ torch.cuda.empty_cache()
37
+ assert os.path.exists(os.path.join(MODEL_PATH, 'ConsistI2V'))
38
+ print("ConsistI2V has already been downloaded!")
39
+
40
+ try:
41
+ DynamiCrafter()
42
+ except:
43
+ pass
44
+ torch.cuda.empty_cache()
45
+ assert os.path.exists(os.path.join(MODEL_PATH, 'dynamicrafter_256_v1'))
46
+ print("DynamiCrafter has already been downloaded!")
47
+
48
+ try:
49
+ I2VGenXL()
50
+ except:
51
+ pass
52
+ torch.cuda.empty_cache()
53
+ assert os.path.exists(os.path.join(MODEL_PATH, 'i2vgen-xl'))
54
+ print("I2VGenXL has already been downloaded!")
55
+
56
+ try:
57
+ LaVie()
58
+ except:
59
+ pass
60
+ torch.cuda.empty_cache()
61
+ assert os.path.exists(os.path.join(MODEL_PATH, 'lavie'))
62
+ print("Lavie Model has already been downloaded!")
63
+
64
+ try:
65
+ ModelScope()
66
+ except:
67
+ pass
68
+ torch.cuda.empty_cache()
69
+ assert os.path.exists(os.path.join(MODEL_PATH, 'modelscope'))
70
+ print("ModelScope has already been downloaded!")
71
+
72
+ try:
73
+ SEINE()
74
+ except:
75
+ pass
76
+ torch.cuda.empty_cache()
77
+ assert os.path.exists(os.path.join(MODEL_PATH, 'SEINE'))
78
+ print("SEINE has already been downloaded!")
79
+
80
+ try:
81
+ ShowOne()
82
+ except:
83
+ pass
84
+ torch.cuda.empty_cache()
85
+ assert os.path.exists(os.path.join(MODEL_PATH, 'showlab'))
86
+ print("ShowOne has already been downloaded!")
87
+
88
+ try:
89
+ StreamingT2V()
90
+ except:
91
+ pass
92
+ torch.cuda.empty_cache()
93
+ assert os.path.exists(os.path.join(MODEL_PATH, 'streamingtv2'))
94
+ print("StreamingTV has already been downloaded!")
95
+
96
+ try:
97
+ T2VTurbo()
98
+ except:
99
+ pass
100
+ torch.cuda.empty_cache()
101
+ assert os.path.exists(os.path.join(MODEL_PATH, 'T2V-Turbo-VC2'))
102
+ print("T2VTurbo has already been downloaded!")
103
+
104
+ try:
105
+ VideoCrafter2()
106
+ except:
107
+ pass
108
+ torch.cuda.empty_cache()
109
+ assert os.path.exists(os.path.join(MODEL_PATH, 'videocrafter2'))
110
+ print("VideoCrafter has already been downloaded!")
111
+
112
+ # Do these last, as they're linux-only...
113
+ try:
114
+ OpenSora()
115
+ except:
116
+ pass
117
+ torch.cuda.empty_cache()
118
+ assert os.path.exists(os.path.join(MODEL_PATH, 'STDiT2-XL_2'))
119
+ print("OpenSora has already been downloaded!")
120
+
121
+ try:
122
+ OpenSoraPlan()
123
+ except:
124
+ pass
125
+ torch.cuda.empty_cache()
126
+ assert os.path.exists(os.path.join(MODEL_PATH, 'Open-Sora-Plan-v1.1.0'))
127
+ print("OpenSoraPlan has already been downloaded!")
128
+
129
+
130
+
131
+ if __name__ == '__main__':
132
+ load_all_models()
pyproject.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools", "wheel"]
setup.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+ import os
3
+
4
+ __version__ = None
5
+
6
+ with open('README.md') as f:
7
+ readme = f.read()
8
+
9
+ with open('LICENSE') as f:
10
+ license = f.read()
11
+
12
+ with open(os.path.join("src", "videogen_hub", "_version.py")) as f:
13
+ exec(f.read(), __version__)
14
+
15
+ setup(
16
+ name='videogen_hub',
17
+ version=__version__,
18
+ packages=find_packages(where='src'),
19
+ package_dir={'': 'src'},
20
+ description='VideoGenHub is a one-stop library to standardize the inference and evaluation of all the conditional '
21
+ 'video generation models.',
22
+ long_description=readme,
23
+ long_description_content_type='text/markdown',
24
+ author='Max Ku',
25
+ author_email='m3ku@uwaterloo.ca',
26
+ url='https://github.com/TIGER-AI-Lab/VideoGenHub',
27
+ license=license,
28
+ classifiers=[
29
+ "Development Status :: 4 - Beta",
30
+ "Environment :: GPU :: NVIDIA CUDA",
31
+ "Intended Audience :: Developers",
32
+ "Intended Audience :: Education",
33
+ "Intended Audience :: Science/Research",
34
+ "Operating System :: OS Independent",
35
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
36
+ "Programming Language :: Python :: 3",
37
+ ]
38
+ )
t2v_inference.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Check if model and device parameters are provided
4
+ if [ "$#" -lt 2 ]; then
5
+ echo "Usage: $0 <model> <device>"
6
+ exit 1
7
+ fi
8
+
9
+ # The first command line argument is the model, the second is the device
10
+ model="$1"
11
+ device="$2"
12
+
13
+ CUDA_VISIBLE_DEVICES="$device" python3 ./src/videogen_hub/benchmark/text_guided_t2v.py --model_name "$model"
tests/cluster_label2.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b87880fdbe89670f12844377b9cf97a9733b1f54e3a9b73cbb9835084c4e02ec
3
+ size 160128
tests/test_i2v.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import torch
3
+ from diffusers.utils import load_image
4
+ # Directly run `python -m pytest` or
5
+ # Directly run `python -m pytest -v -s --disable-warnings` for Debugging
6
+
7
+ # To test single function:
8
+ # pytest tests/test_i2v.py::test_function_name
9
+
10
+ dummy_prompt = "A tiger in a lab coat with a 1980s Miami vibe, turning a well oiled science content machine."
11
+ dummy_image = load_image("https://chromaica.github.io/Museum/ImagenHub_Text-Guided_IG/DALLE3/sample_69.jpg")
12
+
13
+ import sys
14
+ sys.path.append("src")
15
+
16
+ def test_SEINE():
17
+ from videogen_hub.infermodels import SEINE
18
+
19
+ model = SEINE()
20
+ assert model is not None
21
+ out_video = model.infer_one_video(dummy_image, dummy_prompt)
22
+ assert out_video is not None
23
+ # check if out_video is a tensor or not
24
+ assert isinstance(out_video, torch.Tensor)
25
+ print(out_video.shape)
26
+
27
+ def test_ConsistI2V():
28
+ from videogen_hub.infermodels import ConsistI2V
29
+
30
+ model = ConsistI2V()
31
+ assert model is not None
32
+ out_video = model.infer_one_video(dummy_image, dummy_prompt)
33
+ assert out_video is not None
34
+ # check if out_video is a tensor or not
35
+ assert isinstance(out_video, torch.Tensor)
36
+ print(out_video.shape)
37
+
38
+ def test_DynamiCrafter():
39
+ from videogen_hub.infermodels import DynamiCrafter
40
+
41
+ model = DynamiCrafter()
42
+ assert model is not None
43
+ out_video = model.infer_one_video(dummy_image, dummy_prompt)
44
+ assert out_video is not None
45
+ # check if out_video is a tensor or not
46
+ assert isinstance(out_video, torch.Tensor)
47
+ print(out_video.shape)
tests/test_t2v.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ import torch
3
+
4
+ # Directly run `python -m pytest` or
5
+ # Directly run `python -m pytest -v -s --disable-warnings` for Debugging
6
+
7
+ # To test single function:
8
+ # pytest tests/test_t2v.py::test_function_name
9
+
10
+ dummy_prompts = [
11
+ "a teddy bear walking on the street, 2k, high quality",
12
+ "a panda taking a selfie, 2k, high quality",
13
+ "a polar bear playing drum kit in NYC Times Square, 4k, high resolution",
14
+ "jungle river at sunset, ultra quality",
15
+ "a shark swimming in clear Carribean ocean, 2k, high quality",
16
+ "a Corgi walking in the park at sunrise, oil painting style",
17
+ ]
18
+
19
+ import sys
20
+ sys.path.append("src")
21
+
22
+ def test_LaVie():
23
+ from videogen_hub.infermodels import LaVie
24
+
25
+ model = LaVie()
26
+ assert model is not None
27
+ out_video = model.infer_one_video(dummy_prompts[0])
28
+ assert out_video is not None
29
+ # check if out_video is a tensor or not
30
+ assert isinstance(out_video, torch.Tensor)
31
+ print(out_video.shape)
32
+
33
+
34
+ def test_VideoCrafter2():
35
+ from videogen_hub.infermodels import VideoCrafter2
36
+
37
+ model = VideoCrafter2()
38
+ assert model is not None
39
+ out_video = model.infer_one_video(dummy_prompts[0])
40
+ assert out_video is not None
41
+ # check if out_video is a tensor or not
42
+ assert isinstance(out_video, torch.Tensor)
43
+ print(out_video.shape)
44
+
45
+ def test_ModelScope():
46
+ from videogen_hub.infermodels import ModelScope
47
+ model = ModelScope()
48
+ assert model is not None
49
+ out_video = model.infer_one_video(dummy_prompts[0])
50
+ print("video ouputted")
51
+ assert out_video is not None
52
+ # check if out_video is a tensor or not
53
+ assert isinstance(out_video, torch.Tensor)
54
+ print(out_video.shape)
55
+
56
+ def test_StreamingT2V():
57
+ from videogen_hub.infermodels import StreamingT2V
58
+
59
+ model = StreamingT2V()
60
+ assert model is not None
61
+ out_video = model.infer_one_video(dummy_prompts[0])
62
+ print("video ouputted")
63
+ assert out_video is not None
64
+ # check if out_video is a tensor or not
65
+ assert isinstance(out_video, torch.Tensor)
66
+ print(out_video.shape)
67
+
68
+ def test_OpenSora():
69
+ from videogen_hub.infermodels import OpenSora
70
+
71
+ model = OpenSora()
72
+ assert model is not None
73
+ out_video = model.infer_one_video(dummy_prompts[0])
74
+ assert out_video is not None
75
+ # check if out_video is a tensor or not
76
+ assert isinstance(out_video, torch.Tensor)
77
+ print(out_video.shape)
78
+
79
+
80
+ def test_ShowOne():
81
+ from videogen_hub.infermodels import ShowOne
82
+
83
+ model = ShowOne()
84
+ assert model is not None
85
+ out_video = model.infer_one_video(dummy_prompts[0])
86
+ assert out_video is not None
87
+ # check if out_video is a tensor or not
88
+ assert isinstance(out_video, torch.Tensor)
89
+ print(out_video.shape)
90
+
91
+
92
+ if __name__ == "__main__":
93
+ test_ShowOne()
94
+ print("Everything passed")
95
+ pass