keplersj commited on
Commit
a472fc4
·
1 Parent(s): be608b3

Spinners and Columns

Browse files
Files changed (1) hide show
  1. app.py +23 -13
app.py CHANGED
@@ -7,26 +7,36 @@ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-larg
7
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
8
  pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
9
 
10
- descs = []
11
 
12
  with st.sidebar:
13
  image_gen_guidance = st.slider("Stable Diffusion: Guidance Scale", value=7.5)
14
  image_gen_steps = st.slider("stable Diffusion: Inference Steps", value=50)
15
 
16
- files = st.file_uploader("Upload images to blend", accept_multiple_files=True)
17
 
18
- for file_name in files:
19
- image = Image.open(file_name)
20
 
21
- inputs = processor(image, return_tensors="pt")
 
22
 
23
- out = model.generate(**inputs)
24
- description = processor.decode(out[0], skip_special_tokens=True)
25
- descs.append(description)
26
- st.image(image, caption=description)
 
27
 
28
- if len(descs) > 0:
29
- description = ' '.join(descs)
30
- images = pipe(description, guidance_scale=image_gen_guidance, num_inference_steps=image_gen_steps).images
31
- for image in images:
32
  st.image(image, caption=description)
 
 
 
 
 
 
 
 
 
 
 
 
7
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
8
  pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
9
 
10
+ captions = []
11
 
12
  with st.sidebar:
13
  image_gen_guidance = st.slider("Stable Diffusion: Guidance Scale", value=7.5)
14
  image_gen_steps = st.slider("stable Diffusion: Inference Steps", value=50)
15
 
16
+ col1, col2 = st.columns(2)
17
 
18
+ with col1:
19
+ files = st.file_uploader("Upload images to blend", accept_multiple_files=True)
20
 
21
+ for file_name in files:
22
+ image = Image.open(file_name)
23
 
24
+ with st.spinner('Captioning Provided Image'):
25
+ inputs = processor(image, return_tensors="pt")
26
+ out = model.generate(**inputs)
27
+ description = processor.decode(out[0], skip_special_tokens=True)
28
+ captions.append(description)
29
 
30
+ st.success("Image Captioned")
 
 
 
31
  st.image(image, caption=description)
32
+
33
+ with col2:
34
+ if len(captions) > 0:
35
+ description = ' '.join(captions)
36
+
37
+ with st.spinner('Generating Photo from Caption'):
38
+ images = pipe(description, guidance_scale=image_gen_guidance, num_inference_steps=image_gen_steps).images
39
+
40
+ st.success("Image Generated")
41
+ for image in images:
42
+ st.image(image, caption=description)