fffiloni commited on
Commit
b879745
·
1 Parent(s): ee7720a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -46
app.py CHANGED
@@ -1,6 +1,4 @@
1
  import gradio as gr
2
- import spaces
3
-
4
  from diffusers import StableDiffusionXLPipeline, DDIMScheduler
5
  import torch
6
  import sa_handler
@@ -27,69 +25,71 @@ from diffusers.utils import load_image
27
  import inversion
28
  import numpy as np
29
 
30
- src_style = "medieval painting"
31
- src_prompt = f'Man laying in a bed, {src_style}.'
32
- image_path = './example_image/medieval-bed.jpeg'
 
 
33
 
34
- num_inference_steps = 50
35
- x0 = np.array(load_image(image_path).resize((1024, 1024)))
36
- zts = inversion.ddim_inversion(pipeline, x0, src_prompt, num_inference_steps, 2)
37
- #mediapy.show_image(x0, title="innput reference image", height=256)
38
 
39
- # run StyleAligned
40
- prompts = [
41
- src_prompt,
42
- "A man working on a laptop",
43
- "A man eats pizza",
44
- "A woman playig on saxophone",
45
- ]
46
 
47
- # some parameters you can adjust to control fidelity to reference
48
- shared_score_shift = np.log(2) # higher value induces higher fidelity, set 0 for no shift
49
- shared_score_scale = 1.0 # higher value induces higher, set 1 for no rescale
50
 
51
- # for very famouse images consider supressing attention to refference, here is a configuration example:
52
- # shared_score_shift = np.log(1)
53
- # shared_score_scale = 0.5
54
 
55
- for i in range(1, len(prompts)):
56
- prompts[i] = f'{prompts[i]}, {src_style}.'
57
 
58
- handler = sa_handler.Handler(pipeline)
59
- sa_args = sa_handler.StyleAlignedArgs(
60
- share_group_norm=True, share_layer_norm=True, share_attention=True,
61
- adain_queries=True, adain_keys=True, adain_values=False,
62
- shared_score_shift=shared_score_shift, shared_score_scale=shared_score_scale,)
63
- handler.register(sa_args)
64
 
65
- zT, inversion_callback = inversion.make_inversion_callback(zts, offset=5)
66
 
67
- g_cpu = torch.Generator(device='cpu')
68
- g_cpu.manual_seed(10)
69
 
70
- latents = torch.randn(len(prompts), 4, 128, 128, device='cpu', generator=g_cpu,
71
  dtype=pipeline.unet.dtype,).to('cuda:0')
72
- latents[0] = zT
73
 
74
- images_a = pipeline(prompts, latents=latents,
75
  callback_on_step_end=inversion_callback,
76
  num_inference_steps=num_inference_steps, guidance_scale=10.0).images
77
 
78
- handler.remove()
79
- mediapy.show_images(images_a, titles=[p[:-(len(src_style) + 3)] for p in prompts])
80
-
81
-
82
- @spaces.GPU
83
- def infer(prompts):
84
-
85
 
86
- images = pipeline(sets_of_prompts,).images
87
  return images_a
 
88
 
89
  gr.Interface(
90
- fn=infer,
91
  inputs=[
92
- gr.Textbox(value="Hit submit button to test")
 
 
 
 
 
93
  ],
94
  outputs=[
95
  gr.Gallery()
 
1
  import gradio as gr
 
 
2
  from diffusers import StableDiffusionXLPipeline, DDIMScheduler
3
  import torch
4
  import sa_handler
 
25
  import inversion
26
  import numpy as np
27
 
28
+ def run(ref_path, ref_style, ref_prompt, prompt1, prompt2, prompt3):
29
+
30
+ src_style = f"{ref_style}"
31
+ src_prompt = f"{ref_prompt}, {src_style}."
32
+ image_path = f"{ref_path}"
33
 
34
+ num_inference_steps = 50
35
+ x0 = np.array(load_image(image_path).resize((1024, 1024)))
36
+ zts = inversion.ddim_inversion(pipeline, x0, src_prompt, num_inference_steps, 2)
37
+ #mediapy.show_image(x0, title="innput reference image", height=256)
38
 
39
+ # run StyleAligned
40
+ prompts = [
41
+ src_prompt,
42
+ prompt1,
43
+ prompt2.
44
+ prompt3
45
+ ]
46
 
47
+ # some parameters you can adjust to control fidelity to reference
48
+ shared_score_shift = np.log(2) # higher value induces higher fidelity, set 0 for no shift
49
+ shared_score_scale = 1.0 # higher value induces higher, set 1 for no rescale
50
 
51
+ # for very famouse images consider supressing attention to refference, here is a configuration example:
52
+ # shared_score_shift = np.log(1)
53
+ # shared_score_scale = 0.5
54
 
55
+ for i in range(1, len(prompts)):
56
+ prompts[i] = f'{prompts[i]}, {src_style}.'
57
 
58
+ handler = sa_handler.Handler(pipeline)
59
+ sa_args = sa_handler.StyleAlignedArgs(
60
+ share_group_norm=True, share_layer_norm=True, share_attention=True,
61
+ adain_queries=True, adain_keys=True, adain_values=False,
62
+ shared_score_shift=shared_score_shift, shared_score_scale=shared_score_scale,)
63
+ handler.register(sa_args)
64
 
65
+ zT, inversion_callback = inversion.make_inversion_callback(zts, offset=5)
66
 
67
+ g_cpu = torch.Generator(device='cpu')
68
+ g_cpu.manual_seed(10)
69
 
70
+ latents = torch.randn(len(prompts), 4, 128, 128, device='cpu', generator=g_cpu,
71
  dtype=pipeline.unet.dtype,).to('cuda:0')
72
+ latents[0] = zT
73
 
74
+ images_a = pipeline(prompts, latents=latents,
75
  callback_on_step_end=inversion_callback,
76
  num_inference_steps=num_inference_steps, guidance_scale=10.0).images
77
 
78
+ handler.remove()
79
+ #mediapy.show_images(images_a, titles=[p[:-(len(src_style) + 3)] for p in prompts])
 
 
 
 
 
80
 
 
81
  return images_a
82
+
83
 
84
  gr.Interface(
85
+ fn=run,
86
  inputs=[
87
+ gr.Image(type="filepath", value="./example_image/medieval-bed.jpeg"),
88
+ gr.Textbox(value="medieval painting"),
89
+ gr.Textbox(value="Man laying on bed"),
90
+ gr.Textbox(value="A man working on a laptop"),
91
+ gr.Textbox(value="A man eating pizza"),
92
+ gr.Textbox(value="A woman playing on saxophone")
93
  ],
94
  outputs=[
95
  gr.Gallery()