Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -26,6 +26,7 @@ try:
|
|
26 |
st.write("Cache migration completed successfully.")
|
27 |
except Exception as e:
|
28 |
st.error(f"Cache migration failed: {e}")
|
|
|
29 |
|
30 |
if uploaded_file and prompt:
|
31 |
try:
|
@@ -41,15 +42,21 @@ if uploaded_file and prompt:
|
|
41 |
|
42 |
# Load the image
|
43 |
st.write("Loading image...")
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# Initialize the CogVideoX pipeline
|
48 |
st.write("Initializing the pipeline...")
|
49 |
try:
|
50 |
pipe = CogVideoXImageToVideoPipeline.from_pretrained(
|
51 |
"THUDM/CogVideoX1.5-5B-I2V",
|
52 |
-
torch_dtype=torch.bfloat16
|
|
|
53 |
)
|
54 |
st.write("Pipeline initialized successfully.")
|
55 |
except Exception as e:
|
@@ -67,22 +74,32 @@ if uploaded_file and prompt:
|
|
67 |
|
68 |
# Generate the video
|
69 |
st.write("Generating video... This may take a while.")
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
# Export video
|
82 |
st.write("Exporting video...")
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
# Display video in Streamlit
|
88 |
st.video(video_path)
|
|
|
26 |
st.write("Cache migration completed successfully.")
|
27 |
except Exception as e:
|
28 |
st.error(f"Cache migration failed: {e}")
|
29 |
+
st.write("Proceeding without cache migration...")
|
30 |
|
31 |
if uploaded_file and prompt:
|
32 |
try:
|
|
|
42 |
|
43 |
# Load the image
|
44 |
st.write("Loading image...")
|
45 |
+
try:
|
46 |
+
image = load_image("uploaded_image.jpg")
|
47 |
+
st.write("Image loaded successfully.")
|
48 |
+
except Exception as e:
|
49 |
+
st.error("Error loading the image.")
|
50 |
+
st.write(f"Debug info: {e}")
|
51 |
+
raise e
|
52 |
|
53 |
# Initialize the CogVideoX pipeline
|
54 |
st.write("Initializing the pipeline...")
|
55 |
try:
|
56 |
pipe = CogVideoXImageToVideoPipeline.from_pretrained(
|
57 |
"THUDM/CogVideoX1.5-5B-I2V",
|
58 |
+
torch_dtype=torch.bfloat16,
|
59 |
+
cache_dir="./huggingface_cache" # Adjust path if necessary
|
60 |
)
|
61 |
st.write("Pipeline initialized successfully.")
|
62 |
except Exception as e:
|
|
|
74 |
|
75 |
# Generate the video
|
76 |
st.write("Generating video... This may take a while.")
|
77 |
+
try:
|
78 |
+
video_frames = pipe(
|
79 |
+
prompt=prompt,
|
80 |
+
image=image,
|
81 |
+
num_videos_per_prompt=1,
|
82 |
+
num_inference_steps=50,
|
83 |
+
num_frames=81,
|
84 |
+
guidance_scale=6,
|
85 |
+
generator=torch.Generator(device="cuda").manual_seed(42),
|
86 |
+
).frames[0]
|
87 |
+
st.write("Video generated successfully.")
|
88 |
+
except Exception as e:
|
89 |
+
st.error("Error during video generation.")
|
90 |
+
st.write(f"Debug info: {e}")
|
91 |
+
raise e
|
92 |
|
93 |
# Export video
|
94 |
st.write("Exporting video...")
|
95 |
+
try:
|
96 |
+
video_path = "output.mp4"
|
97 |
+
export_to_video(video_frames, video_path, fps=8)
|
98 |
+
st.write("Video exported successfully.")
|
99 |
+
except Exception as e:
|
100 |
+
st.error("Error exporting video.")
|
101 |
+
st.write(f"Debug info: {e}")
|
102 |
+
raise e
|
103 |
|
104 |
# Display video in Streamlit
|
105 |
st.video(video_path)
|