Spaces:
Runtime error
Runtime error
test
#3
by
ayazii2
- opened
- README.md +1 -1
- app.py +25 -54
- requirements.txt +0 -3
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🐠
|
|
4 |
colorFrom: blue
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
duplicated_from: fffiloni/zeroscope
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: gray
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 3.35.2
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
duplicated_from: fffiloni/zeroscope
|
app.py
CHANGED
@@ -1,59 +1,26 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
from gradio_client import Client, handle_file
|
4 |
-
import numpy as np
|
5 |
-
import tempfile
|
6 |
-
import imageio
|
7 |
-
|
8 |
import torch
|
9 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
|
|
10 |
|
11 |
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
|
12 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
13 |
pipe.enable_model_cpu_offload()
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
def get_caption(image_in):
|
18 |
-
kosmos2_client = Client("fffiloni/Kosmos-2-API", hf_token=hf_token)
|
19 |
-
kosmos2_result = kosmos2_client.predict(
|
20 |
-
image_input=handle_file(image_in),
|
21 |
-
text_input="Detailed",
|
22 |
-
api_name="/generate_predictions"
|
23 |
-
)
|
24 |
-
print(f"KOSMOS2 RETURNS: {kosmos2_result}")
|
25 |
-
|
26 |
-
data = kosmos2_result[1]
|
27 |
-
|
28 |
-
# Extract and combine tokens starting from the second element
|
29 |
-
sentence = ''.join(item['token'] for item in data[1:])
|
30 |
|
31 |
-
|
32 |
-
|
|
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
#print(f"\n—\nIMAGE CAPTION: {truncated_caption}")
|
39 |
-
|
40 |
-
return sentence
|
41 |
-
|
42 |
-
def export_to_video(frames: np.ndarray, fps: int) -> str:
|
43 |
-
frames = np.clip((frames * 255), 0, 255).astype(np.uint8)
|
44 |
-
out_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
|
45 |
-
writer = imageio.get_writer(out_file.name, format="FFMPEG", fps=fps)
|
46 |
-
for frame in frames:
|
47 |
-
writer.append_data(frame)
|
48 |
-
writer.close()
|
49 |
-
return out_file.name
|
50 |
-
|
51 |
-
def infer(image_init, progress=gr.Progress(track_tqdm=True)):
|
52 |
-
prompt = get_caption(image_init)
|
53 |
-
video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames[0]
|
54 |
-
video_path = export_to_video(video_frames, 12)
|
55 |
print(video_path)
|
56 |
-
return prompt, video_path
|
57 |
|
58 |
css = """
|
59 |
#col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
|
@@ -135,18 +102,22 @@ with gr.Blocks(css=css) as demo:
|
|
135 |
"""
|
136 |
)
|
137 |
|
138 |
-
image_init = gr.Image(label="Image Init",
|
139 |
#inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
|
140 |
submit_btn = gr.Button("Submit")
|
141 |
-
coca_cap = gr.Textbox(label="Caption", placeholder="
|
142 |
video_result = gr.Video(label="Video Output", elem_id="video-output")
|
143 |
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
|
|
|
|
|
|
|
|
150 |
|
151 |
-
demo.queue(max_size=12).launch(
|
152 |
|
|
|
1 |
import gradio as gr
|
2 |
+
from share_btn import community_icon_html, loading_icon_html, share_js
|
|
|
|
|
|
|
|
|
|
|
3 |
import torch
|
4 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
|
5 |
+
from diffusers.utils import export_to_video
|
6 |
|
7 |
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
|
8 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
9 |
pipe.enable_model_cpu_offload()
|
10 |
|
11 |
+
caption = gr.load(name="spaces/fffiloni/CoCa-clone")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
def create_image_caption(image_init):
|
14 |
+
cap = caption(image_init, "Nucleus sampling", 1.2, 0.5, 5, 20, fn_index=0)
|
15 |
+
print("cap: " + cap)
|
16 |
+
return cap
|
17 |
|
18 |
+
def infer(image_init):
|
19 |
+
prompt = create_image_caption(image_init)
|
20 |
+
video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
|
21 |
+
video_path = export_to_video(video_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
print(video_path)
|
23 |
+
return prompt, video_path, gr.Group.update(visible=True)
|
24 |
|
25 |
css = """
|
26 |
#col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
|
|
|
102 |
"""
|
103 |
)
|
104 |
|
105 |
+
image_init = gr.Image(label="Image Init",type="filepath", source="upload", elem_id="image-init")
|
106 |
#inference_steps = gr.Slider(label="Inference Steps", minimum=10, maximum=100, step=1, value=40, interactive=False)
|
107 |
submit_btn = gr.Button("Submit")
|
108 |
+
coca_cap = gr.Textbox(label="Caption", placeholder="CoCa Caption will be displayed here", elem_id="coca-cap-in")
|
109 |
video_result = gr.Video(label="Video Output", elem_id="video-output")
|
110 |
|
111 |
+
with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
|
112 |
+
community_icon = gr.HTML(community_icon_html)
|
113 |
+
loading_icon = gr.HTML(loading_icon_html)
|
114 |
+
share_button = gr.Button("Share to community", elem_id="share-btn")
|
115 |
+
|
116 |
+
submit_btn.click(fn=infer,
|
117 |
+
inputs=[image_init],
|
118 |
+
outputs=[coca_cap, video_result, share_group])
|
119 |
+
|
120 |
+
share_button.click(None, [], [], _js=share_js)
|
121 |
|
122 |
+
demo.queue(max_size=12).launch()
|
123 |
|
requirements.txt
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
-
numpy==1.26.4
|
2 |
diffusers
|
3 |
transformers
|
4 |
accelerate
|
5 |
torch==2.0.1
|
6 |
opencv-python
|
7 |
-
imageio[ffmpeg]==2.34.1
|
8 |
-
huggingface_hub==0.25.2
|
9 |
|
|
|
|
|
1 |
diffusers
|
2 |
transformers
|
3 |
accelerate
|
4 |
torch==2.0.1
|
5 |
opencv-python
|
|
|
|
|
6 |
|