Akjava commited on
Commit
6b2baab
·
1 Parent(s): abba001
Files changed (1) hide show
  1. app.py +25 -23
app.py CHANGED
@@ -23,26 +23,11 @@ def sanitize_prompt(prompt):
23
  sanitized_prompt = allowed_chars.sub("", prompt)
24
  return sanitized_prompt
25
 
26
- #@spaces.GPU
27
- def process_img2img(image,mask_image,prompt="a person",strength=0.75,seed=0,num_inference_steps=4):
28
- print("start process image process_image")
29
- if image == None:
30
- print("empty input image returned")
31
- return None
32
-
33
- generators = []
34
- generator = torch.Generator(device).manual_seed(seed)
35
- generators.append(generator)
36
- # more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline
37
- print(prompt)
38
- output = pipe(prompt=prompt, image=image,generator=generator,strength=strength
39
- ,guidance_scale=0,num_inference_steps=num_inference_steps,max_sequence_length=256)
40
-
41
- # TODO support mask
42
- return output.images[0]
43
-
44
-
45
- @spaces.GPU(duration=180)
46
  def process_images(image, image2=None,prompt="a girl",strength=0.75,seed=0,progress=gr.Progress(track_tqdm=True)):
47
  print("start process_images")
48
 
@@ -65,6 +50,23 @@ def process_images(image, image2=None,prompt="a girl",strength=0.75,seed=0,progr
65
  mask = image['layers'][0]
66
 
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  output = process_img2img(image["background"],mask,prompt,strength,seed)
69
 
70
  print("end process_images")
@@ -125,7 +127,7 @@ with gr.Blocks(css=css, elem_id="demo-container") as demo:
125
 
126
 
127
 
128
- #btn.click(fn=process_images, inputs=[image, image_mask,prompt,strength,seed], outputs =image_out, api_name='infer')
129
  gr.Examples(
130
  examples=[
131
  #["images/00547245_99.jpg", "images/00547245_99_mask.jpg","a beautiful girl,eyes closed",0.8,"images/00547245.jpg"],
@@ -143,12 +145,12 @@ with gr.Blocks(css=css, elem_id="demo-container") as demo:
143
 
144
  """
145
  )
146
- gr.on(
147
  triggers=[btn.click, prompt.submit],
148
  fn = process_images,
149
  inputs = [image, image_mask, prompt, strength, seed],
150
  outputs = [image_out]
151
- )
152
 
153
  demo.launch()
154
 
 
23
  sanitized_prompt = allowed_chars.sub("", prompt)
24
  return sanitized_prompt
25
 
26
+
27
+
28
+
29
+
30
+ @spaces.GPU(duration=160)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  def process_images(image, image2=None,prompt="a girl",strength=0.75,seed=0,progress=gr.Progress(track_tqdm=True)):
32
  print("start process_images")
33
 
 
50
  mask = image['layers'][0]
51
 
52
 
53
+ def process_img2img(image,mask_image,prompt="a person",strength=0.75,seed=0,num_inference_steps=4):
54
+ print("start process_img2img")
55
+ if image == None:
56
+ print("empty input image returned")
57
+ return None
58
+
59
+ generators = []
60
+ generator = torch.Generator(device).manual_seed(seed)
61
+ generators.append(generator)
62
+ # more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline
63
+ print(prompt)
64
+ output = pipe(prompt=prompt, image=image,generator=generator,strength=strength
65
+ ,guidance_scale=0,num_inference_steps=num_inference_steps,max_sequence_length=512)
66
+
67
+ # TODO support mask
68
+ return output.images[0]
69
+
70
  output = process_img2img(image["background"],mask,prompt,strength,seed)
71
 
72
  print("end process_images")
 
127
 
128
 
129
 
130
+ btn.click(fn=process_images, inputs=[image, image_mask,prompt,strength,seed], outputs =image_out, api_name='infer')
131
  gr.Examples(
132
  examples=[
133
  #["images/00547245_99.jpg", "images/00547245_99_mask.jpg","a beautiful girl,eyes closed",0.8,"images/00547245.jpg"],
 
145
 
146
  """
147
  )
148
+ """gr.on(
149
  triggers=[btn.click, prompt.submit],
150
  fn = process_images,
151
  inputs = [image, image_mask, prompt, strength, seed],
152
  outputs = [image_out]
153
+ )"""
154
 
155
  demo.launch()
156