CogVideo / app.py
hjs8's picture
Update app.py
f89a59f
#!/usr/bin/env python
from __future__ import annotations
import gradio as gr
# from model import AppModel
MAINTENANCE_NOTICE='Sorry, due to computing resources issues, this space is under maintenance, and will be restored as soon as possible. '
DESCRIPTION = '''# <a href="https://ic66.ml"></a></br>
'''
NOTES = '<p>本项目基于清华的CogVideo进行修改</p>'
FOOTER = ''
import json
import requests
import numpy as np
import imageio.v2 as iio
def post(
text,
translate,
seed,
only_first_stage,
image_prompt
):
url = 'https://ccb8is4fqtofrtdsfjebg.ml-platform-cn-beijing.volces.com/devinstance/di-20221130120908-bhpxq/proxy/6201'
headers = {
"Content-Type": "application/json; charset=UTF-8",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36",
}
data = json.dumps({'text': text,
'translate': translate,
'seed': seed,
'only_first_stage': only_first_stage,
'image_prompt': image_prompt
})
r = requests.post(url, data, headers=headers)
translated_text = r.json()['data']['translated_text']
result_video = r.json()['data']['result_video']
frames = r.json()['data']['frames']
for i in range(4):
writer = iio.get_writer(result_video[i], fps=4)
for frame in frames[i]:
writer.append_data(np.array(frame))
writer.close()
print('finish')
return result_video[0], result_video[1], result_video[2], result_video[3]
def main():
only_first_stage = True
# model = AppModel(only_first_stage)
with gr.Blocks(css='style.css') as demo:
# gr.Markdown(MAINTENANCE_NOTICE)
gr.Markdown(DESCRIPTION)
with gr.Row():
with gr.Column():
with gr.Group():
text = gr.Textbox(label='Input Text')
translate = gr.Checkbox(label='Translate to Chinese',
value=False)
seed = gr.Slider(0,
100000,
step=1,
value=1234,
label='Seed')
only_first_stage = gr.Checkbox(
label='Only First Stage',
value=only_first_stage,
visible=not only_first_stage)
image_prompt = gr.Image(type="filepath",
label="Image Prompt",
value=None)
run_button = gr.Button('Run')
with gr.Column():
with gr.Group():
#translated_text = gr.Textbox(label='Translated Text')
with gr.Tabs():
with gr.TabItem('Output (Video)'):
result_video1 = gr.Video(show_label=False)
result_video2 = gr.Video(show_label=False)
result_video3 = gr.Video(show_label=False)
result_video4 = gr.Video(show_label=False)
# examples = gr.Examples(
# examples=[['骑滑板的皮卡丘', False, 1234, True,None],
# ['a cat playing chess', True, 1253, True,None]],
# fn=model.run_with_translation,
# inputs=[text, translate, seed, only_first_stage,image_prompt],
# outputs=[translated_text, result_video],
# cache_examples=True)
gr.Markdown(NOTES)
gr.Markdown(FOOTER)
print(gr.__version__)
run_button.click(fn=post,
inputs=[
text,
translate,
seed,
only_first_stage,
image_prompt
],
outputs=[result_video1, result_video2, result_video3, result_video4])
print(gr.__version__)
demo.launch()
if __name__ == '__main__':
main()