fffiloni commited on
Commit
54703ae
1 Parent(s): e769f83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -22
app.py CHANGED
@@ -22,12 +22,9 @@ pipeline = StableDiffusionXLPipeline.from_pretrained(
22
  pipeline.enable_model_cpu_offload()
23
  pipeline.enable_vae_slicing()
24
 
25
- # DDIM inversion
26
-
27
- global zts, src_style, src_prompt
28
-
29
- def load_refs(ref_path, ref_style, ref_prompt, prompt1, prompt2, prompt3):
30
-
31
  src_style = f"{ref_style}"
32
  src_prompt = f"{ref_prompt}, {src_style}."
33
  image_path = f"{ref_path}"
@@ -36,9 +33,7 @@ def load_refs(ref_path, ref_style, ref_prompt, prompt1, prompt2, prompt3):
36
  x0 = np.array(load_image(image_path).resize((1024, 1024)))
37
  zts = inversion.ddim_inversion(pipeline, x0, src_prompt, num_inference_steps, 2)
38
  #mediapy.show_image(x0, title="innput reference image", height=256)
39
- return "ready"
40
 
41
- def run(prompt1, prompt2, prompt3):
42
  # run StyleAligned
43
  prompts = [
44
  src_prompt,
@@ -94,27 +89,16 @@ with gr.Blocks() as demo:
94
  ref_path = gr.Image(type="filepath", value="./example_image/medieval-bed.jpeg")
95
  ref_style = gr.Textbox(label="Reference style", value="medieval painting")
96
  ref_prompt = gr.Textbox(label="Referéce prompt", value="Man laying on bed")
97
- load_ref_btn = gr.Button("Load Reference")
98
  with gr.Column():
99
- is_ready = gr.Textbox(label = "Ref status")
100
  prompt1 = gr.Textbox(label="Prompt1", value="A man working on a laptop")
101
  prompt2 = gr.Textbox(label="Prompt2", value="A man eating pizza")
102
  prompt3 = gr.Textbox(label="Prompt3", value="A woman playing on saxophone")
103
  run_button = gr.Button("Submit")
104
  with gr.Column():
105
  results = gr.Gallery()
106
- load_ref_btn.click(
107
- fn = load_refs,
108
- inputs = [
109
- ref_path,
110
- ref_style,
111
- ref_prompt
112
- ],
113
- outputs = [
114
- is_ready
115
- ],
116
- queue = False
117
- )
118
  run_button.click(
119
  fn = run,
120
  inputs = [
 
22
  pipeline.enable_model_cpu_offload()
23
  pipeline.enable_vae_slicing()
24
 
25
+ def run(ref_path, ref_style, ref_prompt, prompt1, prompt2, prompt3):
26
+
27
+ # DDIM inversion
 
 
 
28
  src_style = f"{ref_style}"
29
  src_prompt = f"{ref_prompt}, {src_style}."
30
  image_path = f"{ref_path}"
 
33
  x0 = np.array(load_image(image_path).resize((1024, 1024)))
34
  zts = inversion.ddim_inversion(pipeline, x0, src_prompt, num_inference_steps, 2)
35
  #mediapy.show_image(x0, title="innput reference image", height=256)
 
36
 
 
37
  # run StyleAligned
38
  prompts = [
39
  src_prompt,
 
89
  ref_path = gr.Image(type="filepath", value="./example_image/medieval-bed.jpeg")
90
  ref_style = gr.Textbox(label="Reference style", value="medieval painting")
91
  ref_prompt = gr.Textbox(label="Referéce prompt", value="Man laying on bed")
92
+
93
  with gr.Column():
94
+
95
  prompt1 = gr.Textbox(label="Prompt1", value="A man working on a laptop")
96
  prompt2 = gr.Textbox(label="Prompt2", value="A man eating pizza")
97
  prompt3 = gr.Textbox(label="Prompt3", value="A woman playing on saxophone")
98
  run_button = gr.Button("Submit")
99
  with gr.Column():
100
  results = gr.Gallery()
101
+
 
 
 
 
 
 
 
 
 
 
 
102
  run_button.click(
103
  fn = run,
104
  inputs = [