Upload 3 files
Browse files- app.py +1 -8
- img_gen_v2.py +9 -13
app.py
CHANGED
@@ -1,7 +1,4 @@
|
|
1 |
import streamlit as st
|
2 |
-
import requests
|
3 |
-
from PIL import Image
|
4 |
-
from io import BytesIO
|
5 |
from gtts import gTTS
|
6 |
|
7 |
from img_gen_v2 import generate_story
|
@@ -44,12 +41,8 @@ def page_navigation(current_page):
|
|
44 |
def get_pipeline_data(page_number):
|
45 |
pipeline_response = st.session_state.pipeline_response
|
46 |
text_output = pipeline_response.get("steps")[page_number - 1]
|
47 |
-
|
48 |
-
# random_img = f"https://picsum.photos/800/600?random={page_number}"
|
49 |
-
# response = requests.get(random_img)
|
50 |
-
# image = Image.open(BytesIO(response.content))
|
51 |
img_dict = st.session_state.img_dict
|
52 |
-
img = img_dict[page_number-1]
|
53 |
|
54 |
return {"text_output": text_output, "image_obj": img}
|
55 |
|
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
2 |
from gtts import gTTS
|
3 |
|
4 |
from img_gen_v2 import generate_story
|
|
|
41 |
def get_pipeline_data(page_number):
|
42 |
pipeline_response = st.session_state.pipeline_response
|
43 |
text_output = pipeline_response.get("steps")[page_number - 1]
|
|
|
|
|
|
|
|
|
44 |
img_dict = st.session_state.img_dict
|
45 |
+
img = img_dict[page_number-1].get("image")
|
46 |
|
47 |
return {"text_output": text_output, "image_obj": img}
|
48 |
|
img_gen_v2.py
CHANGED
@@ -6,14 +6,12 @@ from diffusers import StableDiffusionImg2ImgPipeline, \
|
|
6 |
|
7 |
def check_cuda_device():
|
8 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
9 |
return device
|
10 |
|
11 |
|
12 |
def get_the_model(device=None):
|
13 |
model_id = "stabilityai/stable-diffusion-2"
|
14 |
-
# if path:
|
15 |
-
# pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
16 |
-
# else:
|
17 |
pipe = StableDiffusionPipeline.from_pretrained(model_id,
|
18 |
torch_dtype=torch.float16)
|
19 |
if device:
|
@@ -53,20 +51,18 @@ def gen_initial_img(int_prompt):
|
|
53 |
return image
|
54 |
|
55 |
|
56 |
-
def generate_story(
|
57 |
image_dic = {}
|
58 |
-
|
59 |
-
img2img_model = get_image_to_image_model()
|
60 |
-
|
61 |
-
img = init_img
|
62 |
-
|
63 |
for idx, step in enumerate(steps):
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
67 |
"image": image,
|
68 |
"prompt": step
|
69 |
}
|
70 |
img = image
|
|
|
71 |
|
72 |
-
return
|
|
|
6 |
|
7 |
def check_cuda_device():
|
8 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
9 |
+
print(device)
|
10 |
return device
|
11 |
|
12 |
|
13 |
def get_the_model(device=None):
|
14 |
model_id = "stabilityai/stable-diffusion-2"
|
|
|
|
|
|
|
15 |
pipe = StableDiffusionPipeline.from_pretrained(model_id,
|
16 |
torch_dtype=torch.float16)
|
17 |
if device:
|
|
|
51 |
return image
|
52 |
|
53 |
|
54 |
+
def generate_story(pipe, original_image, steps, iterations=10):
|
55 |
image_dic = {}
|
56 |
+
img = original_image
|
|
|
|
|
|
|
|
|
57 |
for idx, step in enumerate(steps):
|
58 |
+
print(idx)
|
59 |
+
image = pipe(prompt=step, image=img, strength=0.75, guidance_scale=7.5,
|
60 |
+
num_inference_steps=iterations).images[0]
|
61 |
+
image_dic[f"step_{idx}"] = {
|
62 |
"image": image,
|
63 |
"prompt": step
|
64 |
}
|
65 |
img = image
|
66 |
+
break
|
67 |
|
68 |
+
return image_dic
|