grbell commited on
Commit
d3a2dac
·
verified ·
1 Parent(s): 726a8d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -1
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import streamlit as st
2
  from transformers import pipeline
3
  from PIL import Image
@@ -17,4 +18,47 @@ if file_name is not None:
17
 
18
  col2.header("Probabilities")
19
  for p in predictions:
20
- col2.subheader(f"{ p['label'] }: { round(p['score'] * 100, 1)}%")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
  import streamlit as st
3
  from transformers import pipeline
4
  from PIL import Image
 
18
 
19
  col2.header("Probabilities")
20
  for p in predictions:
21
+ col2.subheader(f"{ p['label'] }: { round(p['score'] * 100, 1)}%")
22
+ """
23
+
24
+ import streamlit as st
25
+ from transformers import pipeline
26
+ from PIL import Image
27
+ import torch
28
+ from diffusers import CogVideoXImageToVideoPipeline
29
+ from diffusers.utils import export_to_video, load_image
30
+
31
+ prompt = "A vast, shimmering ocean flows gracefully under a twilight sky, its waves undulating in a mesmerizing dance of blues and greens. The surface glints with the last rays of the setting sun, casting golden highlights that ripple across the water. Seagulls soar above, their cries blending with the gentle roar of the waves. The horizon stretches infinitely, where the ocean meets the sky in a seamless blend of hues. Close-ups reveal the intricate patterns of the waves, capturing the fluidity and dynamic beauty of the sea in motion."
32
+
33
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
34
+ "THUDM/CogVideoX-5b-I2V",
35
+ torch_dtype=torch.bfloat16
36
+ )
37
+
38
+ pipe.vae.enable_tiling()
39
+ pipe.vae.enable_slicing()
40
+
41
+ st.title("Image to Video")
42
+
43
+ file_name = st.file_uploader("Upload an image")
44
+
45
+ if file_name is None:
46
+ return
47
+
48
+ col1, col2 = st.columns(2)
49
+ image = load_image(image=file_name)
50
+ col1.image(image, use_container_width=True)
51
+
52
+ video = pipe(
53
+ prompt=prompt,
54
+ image=image,
55
+ num_videos_per_prompt=1,
56
+ num_inference_steps=50,
57
+ num_frames=49,
58
+ guidance_scale=6,
59
+ generator=torch.Generator(device="cuda").manual_seed(42),
60
+ ).frames[0]
61
+
62
+ col2.video(video, use_container_width=True)
63
+
64
+ export_to_video(video, "output.mp4", fps=8)