kikopubisher commited on
Commit
f3adba4
·
verified ·
1 Parent(s): ca6690d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -5
app.py CHANGED
@@ -2,15 +2,17 @@ import streamlit as st
2
  from PIL import Image
3
  import torch
4
  from diffusers import StableDiffusionImg2VidPipeline
 
 
5
 
6
  # إعداد الصفحة
7
- st.set_page_config(page_title="Image to Video (SVD)", page_icon="🎥")
8
 
9
  # عنوان الصفحة
10
  st.title("Stable Video Diffusion - Image to Video")
11
 
12
  # تعليمات
13
- st.write("Upload an image, and the model will generate a short video clip from it.")
14
 
15
  # تحميل النموذج
16
  @st.cache_resource
@@ -24,6 +26,9 @@ pipe = load_model()
24
  # إدخال المستخدم
25
  uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
26
 
 
 
 
27
  if uploaded_image is not None:
28
  image = Image.open(uploaded_image)
29
  st.image(image, caption='Uploaded Image', use_column_width=True)
@@ -31,17 +36,22 @@ if uploaded_image is not None:
31
  if st.button('Generate Video'):
32
  # تحويل الصورة إلى فيديو
33
  with st.spinner("Generating video..."):
34
- video_frames = pipe(image)
35
 
36
  st.success("Video generated successfully!")
37
-
38
  # عرض الفيديو
39
  st.video(video_frames[0], format="video/mp4")
40
 
 
 
 
 
 
41
  # تقديم بعض المعلومات حول النموذج
42
  st.write("""
43
  ### About the Model:
44
  SVD Image-to-Video is a latent diffusion model trained to generate short video clips from an image conditioning.
45
- This model generates 25 frames at a resolution of 576x1024 given a context frame of the same size, fine-tuned from the SVD Image-to-Video [14 frames] model.
46
  The widely used f8-decoder is also fine-tuned for temporal consistency, making the output videos more stable and coherent.
47
  """)
 
2
  from PIL import Image
3
  import torch
4
  from diffusers import StableDiffusionImg2VidPipeline
5
+ import numpy as np
6
+ import io
7
 
8
  # إعداد الصفحة
9
+ st.set_page_config(page_title="Image to Video with Editing", page_icon="🎥")
10
 
11
  # عنوان الصفحة
12
  st.title("Stable Video Diffusion - Image to Video")
13
 
14
  # تعليمات
15
+ st.write("Upload an image to generate a video. You can also adjust settings for video generation.")
16
 
17
  # تحميل النموذج
18
  @st.cache_resource
 
26
  # إدخال المستخدم
27
  uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
28
 
29
+ # إعداد الخيارات لتعديل الفيديو
30
+ frame_count = st.slider("Number of frames", min_value=10, max_value=50, value=25, step=5)
31
+
32
  if uploaded_image is not None:
33
  image = Image.open(uploaded_image)
34
  st.image(image, caption='Uploaded Image', use_column_width=True)
 
36
  if st.button('Generate Video'):
37
  # تحويل الصورة إلى فيديو
38
  with st.spinner("Generating video..."):
39
+ video_frames = pipe(image, num_frames=frame_count)
40
 
41
  st.success("Video generated successfully!")
42
+
43
  # عرض الفيديو
44
  st.video(video_frames[0], format="video/mp4")
45
 
46
+ # تنزيل الفيديو
47
+ video_bytes = io.BytesIO()
48
+ video_frames[0].save(video_bytes, format="mp4")
49
+ st.download_button(label="Download Video", data=video_bytes.getvalue(), file_name="generated_video.mp4", mime="video/mp4")
50
+
51
  # تقديم بعض المعلومات حول النموذج
52
  st.write("""
53
  ### About the Model:
54
  SVD Image-to-Video is a latent diffusion model trained to generate short video clips from an image conditioning.
55
+ This model generates frames at a resolution of 576x1024 given a context frame of the same size, fine-tuned from the SVD Image-to-Video [14 frames] model.
56
  The widely used f8-decoder is also fine-tuned for temporal consistency, making the output videos more stable and coherent.
57
  """)