Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +14 -19
  3. requirements.txt +1 -2
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: red
5
  colorTo: gray
6
  sdk: gradio
7
  python_version: 3.10.12
8
- sdk_version: 4.36.1
9
  app_file: app.py
10
  pinned: false
11
  ---
 
5
  colorTo: gray
6
  sdk: gradio
7
  python_version: 3.10.12
8
+ sdk_version: 3.50.2
9
  app_file: app.py
10
  pinned: false
11
  ---
app.py CHANGED
@@ -1,31 +1,19 @@
1
  import gradio as gr
2
- import cv2
3
- import numpy as np
4
- import tempfile
5
- import imageio
6
  import torch
7
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
 
8
 
9
  pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
10
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
11
  pipe.enable_model_cpu_offload()
12
 
13
- def export_to_video(frames: np.ndarray, fps: int) -> str:
14
- frames = np.clip((frames * 255), 0, 255).astype(np.uint8)
15
- out_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
16
- writer = imageio.get_writer(out_file.name, format="FFMPEG", fps=fps)
17
- for frame in frames:
18
- writer.append_data(frame)
19
- writer.close()
20
- return out_file.name
21
-
22
  def infer(prompt):
23
  negative_prompt = "text, watermark, copyright, blurry, nsfw"
24
- video_frames = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames[0]
25
-
26
- video_path = export_to_video(video_frames, 12)
27
  print(video_path)
28
- return video_path
29
 
30
  css = """
31
  #col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
@@ -138,6 +126,11 @@ with gr.Blocks(css=css) as demo:
138
  video_result = gr.Video(label="Video Output", elem_id="video-output")
139
 
140
  with gr.Row():
 
 
 
 
 
141
  gr.Markdown("""
142
  [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-lg.svg#center)](https://huggingface.co/spaces/fffiloni/zeroscope-cloning?duplicate=true)
143
  """)
@@ -163,8 +156,10 @@ with gr.Blocks(css=css) as demo:
163
 
164
  submit_btn.click(fn=infer,
165
  inputs=[prompt_in],
166
- outputs=[video_result],
167
  api_name="zrscp")
 
 
168
 
169
- demo.queue(max_size=12).launch(show_api=False)
170
 
 
1
  import gradio as gr
2
+ from share_btn import community_icon_html, loading_icon_html, share_js
 
 
 
3
  import torch
4
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
5
+ from diffusers.utils import export_to_video
6
 
7
  pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
8
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
9
  pipe.enable_model_cpu_offload()
10
 
 
 
 
 
 
 
 
 
 
11
  def infer(prompt):
12
  negative_prompt = "text, watermark, copyright, blurry, nsfw"
13
+ video_frames = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
14
+ video_path = export_to_video(video_frames)
 
15
  print(video_path)
16
+ return video_path, gr.Group.update(visible=True)
17
 
18
  css = """
19
  #col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
 
126
  video_result = gr.Video(label="Video Output", elem_id="video-output")
127
 
128
  with gr.Row():
129
+ with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
130
+ community_icon = gr.HTML(community_icon_html)
131
+ loading_icon = gr.HTML(loading_icon_html)
132
+ share_button = gr.Button("Share with Community", elem_id="share-btn")
133
+
134
  gr.Markdown("""
135
  [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-lg.svg#center)](https://huggingface.co/spaces/fffiloni/zeroscope-cloning?duplicate=true)
136
  """)
 
156
 
157
  submit_btn.click(fn=infer,
158
  inputs=[prompt_in],
159
+ outputs=[video_result, share_group],
160
  api_name="zrscp")
161
+
162
+ share_button.click(None, [], [], _js=share_js)
163
 
164
+ demo.queue(max_size=12).launch(show_api=True)
165
 
requirements.txt CHANGED
@@ -3,5 +3,4 @@ transformers
3
  accelerate
4
  torch
5
  opencv-python
6
- imageio[ffmpeg]==2.34.1
7
- numpy
 
3
  accelerate
4
  torch
5
  opencv-python
6
+