swimcyclecode hysts HF staff commited on
Commit
7c7f271
0 Parent(s):

Duplicate from damo-vilab/modelscope-text-to-video-synthesis

Browse files

Co-authored-by: hysts <hysts@users.noreply.huggingface.co>

Files changed (8) hide show
  1. .gitattributes +34 -0
  2. .gitignore +162 -0
  3. .pre-commit-config.yaml +37 -0
  4. .style.yapf +5 -0
  5. README.md +13 -0
  6. app.py +140 -0
  7. requirements.txt +8 -0
  8. style.css +191 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ weights/
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # C extensions
9
+ *.so
10
+
11
+ # Distribution / packaging
12
+ .Python
13
+ build/
14
+ develop-eggs/
15
+ dist/
16
+ downloads/
17
+ eggs/
18
+ .eggs/
19
+ lib/
20
+ lib64/
21
+ parts/
22
+ sdist/
23
+ var/
24
+ wheels/
25
+ share/python-wheels/
26
+ *.egg-info/
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
+
80
+ # Jupyter Notebook
81
+ .ipynb_checkpoints
82
+
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
+ # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # poetry
100
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
+ #poetry.lock
105
+
106
+ # pdm
107
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
+ #pdm.lock
109
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
+ # in version control.
111
+ # https://pdm.fming.dev/#use-with-ide
112
+ .pdm.toml
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exclude: patch
2
+ repos:
3
+ - repo: https://github.com/pre-commit/pre-commit-hooks
4
+ rev: v4.2.0
5
+ hooks:
6
+ - id: check-executables-have-shebangs
7
+ - id: check-json
8
+ - id: check-merge-conflict
9
+ - id: check-shebang-scripts-are-executable
10
+ - id: check-toml
11
+ - id: check-yaml
12
+ - id: double-quote-string-fixer
13
+ - id: end-of-file-fixer
14
+ - id: mixed-line-ending
15
+ args: ['--fix=lf']
16
+ - id: requirements-txt-fixer
17
+ - id: trailing-whitespace
18
+ - repo: https://github.com/myint/docformatter
19
+ rev: v1.4
20
+ hooks:
21
+ - id: docformatter
22
+ args: ['--in-place']
23
+ - repo: https://github.com/pycqa/isort
24
+ rev: 5.12.0
25
+ hooks:
26
+ - id: isort
27
+ - repo: https://github.com/pre-commit/mirrors-mypy
28
+ rev: v0.991
29
+ hooks:
30
+ - id: mypy
31
+ args: ['--ignore-missing-imports']
32
+ additional_dependencies: ['types-python-slugify']
33
+ - repo: https://github.com/google/yapf
34
+ rev: v0.32.0
35
+ hooks:
36
+ - id: yapf
37
+ args: ['--parallel', '--in-place']
.style.yapf ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [style]
2
+ based_on_style = pep8
3
+ blank_line_before_nested_class_or_def = false
4
+ spaces_before_comment = 2
5
+ split_before_logical_operator = true
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ModelScope Text To Video Synthesis
3
+ emoji: 🚀
4
+ colorFrom: pink
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.23.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: damo-vilab/modelscope-text-to-video-synthesis
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import random
7
+ import tempfile
8
+
9
+ import gradio as gr
10
+ import imageio
11
+ import numpy as np
12
+ import torch
13
+ from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
14
+
15
+ DESCRIPTION = '# [ModelScope Text to Video Synthesis](https://modelscope.cn/models/damo/text-to-video-synthesis/summary)'
16
+ DESCRIPTION += '\n<p>For Colab usage, you can view <a href="https://colab.research.google.com/drive/1uW1ZqswkQ9Z9bp5Nbo5z59cAn7I0hE6R?usp=sharing" style="text-decoration: underline;" target="_blank">this webpage</a>.(the latest update on 2023.03.21)</p>'
17
+ DESCRIPTION += '\n<p>This model can only be used for non-commercial purposes. To learn more about the model, take a look at the <a href="https://huggingface.co/damo-vilab/modelscope-damo-text-to-video-synthesis" style="text-decoration: underline;" target="_blank">model card</a>.</p>'
18
+ if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
19
+ DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
20
+
21
+ MAX_NUM_FRAMES = int(os.getenv('MAX_NUM_FRAMES', '200'))
22
+ DEFAULT_NUM_FRAMES = min(MAX_NUM_FRAMES,
23
+ int(os.getenv('DEFAULT_NUM_FRAMES', '16')))
24
+
25
+ pipe = DiffusionPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b',
26
+ torch_dtype=torch.float16,
27
+ variant='fp16')
28
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
29
+ pipe.enable_model_cpu_offload()
30
+ pipe.enable_vae_slicing()
31
+
32
+
33
+ def to_video(frames: list[np.ndarray], fps: int) -> str:
34
+ out_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False)
35
+ writer = imageio.get_writer(out_file.name, format='FFMPEG', fps=fps)
36
+ for frame in frames:
37
+ writer.append_data(frame)
38
+ writer.close()
39
+ return out_file.name
40
+
41
+
42
+ def generate(prompt: str, seed: int, num_frames: int,
43
+ num_inference_steps: int) -> str:
44
+ if seed == -1:
45
+ seed = random.randint(0, 1000000)
46
+ generator = torch.Generator().manual_seed(seed)
47
+ frames = pipe(prompt,
48
+ num_inference_steps=num_inference_steps,
49
+ num_frames=num_frames,
50
+ generator=generator).frames
51
+ return to_video(frames, 8)
52
+
53
+
54
+ examples = [
55
+ ['An astronaut riding a horse.', 0, 16, 25],
56
+ ['A panda eating bamboo on a rock.', 0, 16, 25],
57
+ ['Spiderman is surfing.', 0, 16, 25],
58
+ ]
59
+
60
+ with gr.Blocks(css='style.css') as demo:
61
+ gr.Markdown(DESCRIPTION)
62
+ with gr.Group():
63
+ with gr.Box():
64
+ with gr.Row(elem_id='prompt-container').style(equal_height=True):
65
+ prompt = gr.Text(
66
+ label='Prompt',
67
+ show_label=False,
68
+ max_lines=1,
69
+ placeholder='Enter your prompt',
70
+ elem_id='prompt-text-input').style(container=False)
71
+ run_button = gr.Button('Generate video').style(
72
+ full_width=False)
73
+ result = gr.Video(label='Result', show_label=False, elem_id='gallery')
74
+ with gr.Accordion('Advanced options', open=False):
75
+ seed = gr.Slider(
76
+ label='Seed',
77
+ minimum=-1,
78
+ maximum=1000000,
79
+ step=1,
80
+ value=-1,
81
+ info='If set to -1, a different seed will be used each time.')
82
+ num_frames = gr.Slider(
83
+ label='Number of frames',
84
+ minimum=16,
85
+ maximum=MAX_NUM_FRAMES,
86
+ step=1,
87
+ value=16,
88
+ info=
89
+ 'Note that the content of the video also changes when you change the number of frames.'
90
+ )
91
+ num_inference_steps = gr.Slider(label='Number of inference steps',
92
+ minimum=10,
93
+ maximum=50,
94
+ step=1,
95
+ value=25)
96
+
97
+ inputs = [
98
+ prompt,
99
+ seed,
100
+ num_frames,
101
+ num_inference_steps,
102
+ ]
103
+ gr.Examples(examples=examples,
104
+ inputs=inputs,
105
+ outputs=result,
106
+ fn=generate,
107
+ cache_examples=os.getenv('SYSTEM') == 'spaces')
108
+
109
+ prompt.submit(fn=generate, inputs=inputs, outputs=result)
110
+ run_button.click(fn=generate, inputs=inputs, outputs=result)
111
+
112
+
113
+ with gr.Accordion(label='We are hiring(Based in Beijing / Hangzhou, China.)', open=False):
114
+ gr.HTML("""<div class="acknowledgments">
115
+ <p>
116
+ If you're looking for an exciting challenge and the opportunity to work with cutting-edge technologies in AIGC and large-scale pretraining, then we are the place for you. We are looking for talented, motivated and creative individuals to join our team. If you are interested, please send your CV to us.
117
+ </p>
118
+ <p>
119
+ <b>EMAIL: yingya.zyy@alibaba-inc.com</b>.
120
+ </p>
121
+ </div>
122
+ """)
123
+
124
+ with gr.Accordion(label='Biases and content acknowledgment', open=False):
125
+ gr.HTML("""<div class="acknowledgments">
126
+ <h4>Biases and content acknowledgment</h4>
127
+ <p>
128
+ Despite how impressive being able to turn text into video is, beware to the fact that this model may output content that reinforces or exacerbates societal biases. The training data includes LAION5B, ImageNet, Webvid and other public datasets. The model was not trained to realistically represent people or events, so using it to generate such content is beyond the model's capabilities.
129
+ </p>
130
+ <p>
131
+ It is not intended to generate content that is demeaning or harmful to people or their environment, culture, religion, etc. Similarly, it is not allowed to generate pornographic, violent and bloody content generation. <b>The model is meant for research purposes</b>.
132
+ </p>
133
+ <p>
134
+ To learn more about the model, head to its <a href="https://huggingface.co/damo-vilab/modelscope-damo-text-to-video-synthesis" style="text-decoration: underline;" target="_blank">model card</a>.
135
+ </p>
136
+ </div>
137
+ """)
138
+
139
+
140
+ demo.queue(api_open=False, max_size=15).launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.17.1
2
+ git+https://github.com/huggingface/diffusers@9dc8444
3
+ gradio==3.23.0
4
+ huggingface-hub==0.13.3
5
+ imageio[ffmpeg]==2.26.1
6
+ torch==2.0.0
7
+ torchvision==0.15.1
8
+ transformers==4.27.2
style.css ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ This CSS file is copied from here:
3
+ https://huggingface.co/spaces/stabilityai/stable-diffusion/blob/2794a3c3ba66115c307075098e713f572b08bf80/app.py
4
+ */
5
+
6
+ h1 {
7
+ text-align: center;
8
+ }
9
+
10
+ .gradio-container {
11
+ font-family: 'IBM Plex Sans', sans-serif;
12
+ }
13
+
14
+ .gr-button {
15
+ color: white;
16
+ border-color: black;
17
+ background: black;
18
+ }
19
+
20
+ input[type='range'] {
21
+ accent-color: black;
22
+ }
23
+
24
+ .dark input[type='range'] {
25
+ accent-color: #dfdfdf;
26
+ }
27
+
28
+ .container {
29
+ max-width: 730px;
30
+ margin: auto;
31
+ padding-top: 1.5rem;
32
+ }
33
+
34
+ #gallery {
35
+ min-height: 22rem;
36
+ margin-bottom: 15px;
37
+ margin-left: auto;
38
+ margin-right: auto;
39
+ border-bottom-right-radius: .5rem !important;
40
+ border-bottom-left-radius: .5rem !important;
41
+ }
42
+
43
+ #gallery>div>.h-full {
44
+ min-height: 20rem;
45
+ }
46
+
47
+ .details:hover {
48
+ text-decoration: underline;
49
+ }
50
+
51
+ .gr-button {
52
+ white-space: nowrap;
53
+ }
54
+
55
+ .gr-button:focus {
56
+ border-color: rgb(147 197 253 / var(--tw-border-opacity));
57
+ outline: none;
58
+ box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
59
+ --tw-border-opacity: 1;
60
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
61
+ --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
62
+ --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
63
+ --tw-ring-opacity: .5;
64
+ }
65
+
66
+ #advanced-btn {
67
+ font-size: .7rem !important;
68
+ line-height: 19px;
69
+ margin-top: 12px;
70
+ margin-bottom: 12px;
71
+ padding: 2px 8px;
72
+ border-radius: 14px !important;
73
+ }
74
+
75
+ #advanced-options {
76
+ display: none;
77
+ margin-bottom: 20px;
78
+ }
79
+
80
+ .footer {
81
+ margin-bottom: 45px;
82
+ margin-top: 35px;
83
+ text-align: center;
84
+ border-bottom: 1px solid #e5e5e5;
85
+ }
86
+
87
+ .footer>p {
88
+ font-size: .8rem;
89
+ display: inline-block;
90
+ padding: 0 10px;
91
+ transform: translateY(10px);
92
+ background: white;
93
+ }
94
+
95
+ .dark .footer {
96
+ border-color: #303030;
97
+ }
98
+
99
+ .dark .footer>p {
100
+ background: #0b0f19;
101
+ }
102
+
103
+ .acknowledgments h4 {
104
+ margin: 1.25em 0 .25em 0;
105
+ font-weight: bold;
106
+ font-size: 115%;
107
+ }
108
+
109
+ .animate-spin {
110
+ animation: spin 1s linear infinite;
111
+ }
112
+
113
+ @keyframes spin {
114
+ from {
115
+ transform: rotate(0deg);
116
+ }
117
+
118
+ to {
119
+ transform: rotate(360deg);
120
+ }
121
+ }
122
+
123
+ #share-btn-container {
124
+ display: flex;
125
+ padding-left: 0.5rem !important;
126
+ padding-right: 0.5rem !important;
127
+ background-color: #000000;
128
+ justify-content: center;
129
+ align-items: center;
130
+ border-radius: 9999px !important;
131
+ width: 13rem;
132
+ margin-top: 10px;
133
+ margin-left: auto;
134
+ }
135
+
136
+ #share-btn {
137
+ all: initial;
138
+ color: #ffffff;
139
+ font-weight: 600;
140
+ cursor: pointer;
141
+ font-family: 'IBM Plex Sans', sans-serif;
142
+ margin-left: 0.5rem !important;
143
+ padding-top: 0.25rem !important;
144
+ padding-bottom: 0.25rem !important;
145
+ right: 0;
146
+ }
147
+
148
+ #share-btn * {
149
+ all: unset;
150
+ }
151
+
152
+ #share-btn-container div:nth-child(-n+2) {
153
+ width: auto !important;
154
+ min-height: 0px !important;
155
+ }
156
+
157
+ #share-btn-container .wrap {
158
+ display: none !important;
159
+ }
160
+
161
+ .gr-form {
162
+ flex: 1 1 50%;
163
+ border-top-right-radius: 0;
164
+ border-bottom-right-radius: 0;
165
+ }
166
+
167
+ #prompt-container {
168
+ gap: 0;
169
+ }
170
+
171
+ #prompt-text-input,
172
+ #negative-prompt-text-input {
173
+ padding: .45rem 0.625rem
174
+ }
175
+
176
+ #component-16 {
177
+ border-top-width: 1px !important;
178
+ margin-top: 1em
179
+ }
180
+
181
+ .image_duplication {
182
+ position: absolute;
183
+ width: 100px;
184
+ left: 50px
185
+ }
186
+
187
+ #component-0 {
188
+ max-width: 730px;
189
+ margin: auto;
190
+ padding-top: 1.5rem;
191
+ }