lnyan's picture
Add stage2 support
113ecf3
raw
history blame
3.33 kB
#!/usr/bin/env python
from __future__ import annotations
import gradio as gr
from model import AppModel
DESCRIPTION = '''# <a href="https://github.com/THUDM/CogVideo">CogVideo</a>
This Space supports the first stage and the second stage (better quality) of the CogVideo pipeline.
Note that the second stage of CogVideo is **much slower**.
The model accepts only Chinese as input.
By checking the "Translate to Chinese" checkbox, the results of English to Chinese translation with [this Space](https://huggingface.co/spaces/chinhon/translation_eng2ch) will be used as input.
Since the translation model may mistranslate, you may want to use the translation results from other translation services.
'''
NOTES = 'This app is adapted from <a href="https://github.com/hysts/CogVideo_demo">https://github.com/hysts/CogVideo_demo</a>. It would be recommended to use the repo if you want to run the app yourself.'
FOOTER = '<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=THUDM.CogVideo" />'
def main():
only_first_stage = False
model = AppModel(only_first_stage)
with gr.Blocks(css='style.css') as demo:
gr.Markdown(DESCRIPTION)
with gr.Row():
with gr.Column():
with gr.Group():
text = gr.Textbox(label='Input Text')
translate = gr.Checkbox(label='Translate to Chinese',
value=False)
seed = gr.Slider(0,
100000,
step=1,
value=1234,
label='Seed')
run_only_first_stage = gr.Checkbox(
label='Only First Stage',
value=True,
visible=not only_first_stage)
image_prompt = gr.Image(type="filepath",
label="Image Prompt",
value=None)
run_button = gr.Button('Run')
with gr.Column():
with gr.Group():
translated_text = gr.Textbox(label='Translated Text')
with gr.Tabs():
with gr.TabItem('Output (Video)'):
result_video = gr.Video(show_label=False)
examples = gr.Examples(
examples=[['骑滑板的皮卡丘', False, 1234, True, None],
['a cat playing chess', True, 1253, False, None]],
fn=model.run_with_translation,
inputs=[text, translate, seed, run_only_first_stage, image_prompt],
outputs=[translated_text, result_video],
cache_examples=True)
gr.Markdown(NOTES)
gr.Markdown(FOOTER)
run_button.click(fn=model.run_with_translation,
inputs=[
text,
translate,
seed,
run_only_first_stage,
image_prompt
],
outputs=[translated_text, result_video])
demo.launch()
if __name__ == '__main__':
main()