fffiloni commited on
Commit
984a212
1 Parent(s): 3f1fd23

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -13
app.py CHANGED
@@ -1,8 +1,7 @@
1
  import gradio as gr
2
- from share_btn import community_icon_html, loading_icon_html, share_js
3
  import torch
4
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
5
- from diffusers.utils import export_to_video
6
 
7
  pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
8
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
@@ -15,12 +14,21 @@ def create_image_caption(image_init):
15
  print("cap: " + cap)
16
  return cap
17
 
 
 
 
 
 
 
 
 
 
18
  def infer(image_init):
19
  prompt = create_image_caption(image_init)
20
- video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
21
- video_path = export_to_video(video_frames)
22
  print(video_path)
23
- return prompt, video_path, gr.Group.update(visible=True)
24
 
25
  css = """
26
  #col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
@@ -108,16 +116,9 @@ with gr.Blocks(css=css) as demo:
108
  coca_cap = gr.Textbox(label="Caption", placeholder="CoCa Caption will be displayed here", elem_id="coca-cap-in")
109
  video_result = gr.Video(label="Video Output", elem_id="video-output")
110
 
111
- with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
112
- community_icon = gr.HTML(community_icon_html)
113
- loading_icon = gr.HTML(loading_icon_html)
114
- share_button = gr.Button("Share to community", elem_id="share-btn")
115
-
116
  submit_btn.click(fn=infer,
117
  inputs=[image_init],
118
- outputs=[coca_cap, video_result, share_group])
119
-
120
- share_button.click(None, [], [], _js=share_js)
121
 
122
  demo.queue(max_size=12).launch()
123
 
 
1
  import gradio as gr
2
+
3
  import torch
4
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
 
5
 
6
  pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
7
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
 
14
  print("cap: " + cap)
15
  return cap
16
 
17
+ def export_to_video(frames: np.ndarray, fps: int) -> str:
18
+ frames = np.clip((frames * 255), 0, 255).astype(np.uint8)
19
+ out_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
20
+ writer = imageio.get_writer(out_file.name, format="FFMPEG", fps=fps)
21
+ for frame in frames:
22
+ writer.append_data(frame)
23
+ writer.close()
24
+ return out_file.name
25
+
26
  def infer(image_init):
27
  prompt = create_image_caption(image_init)
28
+ video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames[0]
29
+ video_path = export_to_video(video_frames, 12)
30
  print(video_path)
31
+ return prompt, video_path
32
 
33
  css = """
34
  #col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
 
116
  coca_cap = gr.Textbox(label="Caption", placeholder="CoCa Caption will be displayed here", elem_id="coca-cap-in")
117
  video_result = gr.Video(label="Video Output", elem_id="video-output")
118
 
 
 
 
 
 
119
  submit_btn.click(fn=infer,
120
  inputs=[image_init],
121
+ outputs=[coca_cap, video_result])
 
 
122
 
123
  demo.queue(max_size=12).launch()
124