sab commited on
Commit
429d253
·
1 Parent(s): 7f0de90
Files changed (2) hide show
  1. app.py +52 -95
  2. requirements.txt +2 -7
app.py CHANGED
@@ -1,108 +1,66 @@
1
  import os
2
- import torch
3
  import uuid
 
 
 
 
4
  from pathlib import Path
5
  import gradio as gr
6
  from gradio_imageslider import ImageSlider # Ensure this library is installed
 
7
 
8
- if os.environ.get("SPACES_ZERO_GPU") is not None:
9
- import spaces
10
- else:
11
- class spaces:
12
- @staticmethod
13
- def GPU(func):
14
- def wrapper(*args, **kwargs):
15
- return func(*args, **kwargs)
16
 
17
- return wrapper
 
18
 
19
- from diffusers.utils import check_min_version
20
- from flux.controlnet_flux import FluxControlNetModel
21
- from flux.transformer_flux import FluxTransformer2DModel
22
- from flux.pipeline_flux_controlnet_inpaint import FluxControlNetInpaintingPipeline
23
 
24
- # Import configuration
25
- import config
 
 
26
 
27
- # Define the output folder
28
- output_folder = Path('output_images')
29
- output_folder.mkdir(exist_ok=True)
30
 
31
- # Login to Hugging Face Hub
32
- # huggingface_hub.login(os.getenv('HF_TOKEN_FLUX'))
33
- check_min_version("0.30.2")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- # Load models
36
- transformer = FluxTransformer2DModel.from_pretrained(
37
- "black-forest-labs/FLUX.1-dev", subfolder='transformer', torch_dtype=torch.bfloat16
38
- )
39
- controlnet = FluxControlNetModel.from_pretrained(
40
- "alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta", torch_dtype=torch.bfloat16
41
- )
42
 
43
- # Build pipeline
44
- pipe = FluxControlNetInpaintingPipeline.from_pretrained(
45
- "black-forest-labs/FLUX.1-dev",
46
- controlnet=controlnet,
47
- transformer=transformer,
48
- torch_dtype=torch.bfloat16
49
- ).to("cuda")
50
- pipe.transformer.to(torch.bfloat16)
51
- pipe.controlnet.to(torch.bfloat16)
52
-
53
-
54
- @spaces.GPU()
55
- def process(input_image_editor):
56
- # Use default values from config
57
- negative_prompt = config.DEFAULT_NEGATIVE_PROMPT
58
- controlnet_conditioning_scale = config.DEFAULT_CONTROLNET_CONDITIONING_SCALE
59
- guidance_scale = config.DEFAULT_GUIDANCE_SCALE
60
- seed = config.DEFAULT_SEED
61
- num_inference_steps = config.DEFAULT_NUM_INFERENCE_STEPS
62
- true_guidance_scale = config.DEFAULT_TRUE_GUIDANCE_SCALE
63
-
64
- # Process image and mask
65
- image = input_image_editor['background']
66
- mask = input_image_editor['layers'][0]
67
- #print("TYPE=", type(image))
68
- _h, _w = image.size
69
- # Calculate the new dimensions
70
- h = (_h // 8) * 8 + (8 if _h % 8 != 0 else 0)
71
- w = (_w // 8) * 8 + (8 if _w % 8 != 0 else 0)
72
-
73
- size = (h, w)
74
-
75
- image_or = image.copy()
76
- image = image.convert("RGB").resize(size)
77
- mask = mask.convert("RGB").resize(size)
78
- generator = torch.Generator(device="cuda").manual_seed(seed)
79
-
80
- # Generate result
81
- result = pipe(
82
- prompt="nothing",#os.getenv('MAGIC_PROMPT'),
83
- height=size[1],
84
- width=size[0],
85
- control_image=image,
86
- control_mask=mask,
87
- num_inference_steps=num_inference_steps,
88
- generator=generator,
89
- controlnet_conditioning_scale=controlnet_conditioning_scale,
90
- guidance_scale=guidance_scale,
91
- negative_prompt=negative_prompt,
92
- true_guidance_scale=true_guidance_scale
93
- ).images[0]
94
-
95
- processed_image = result.resize(image_or.size[:2])
96
-
97
- # Save the processed image
98
- output_folder = Path("output") # Make sure this folder exists or create it
99
- output_folder.mkdir(parents=True, exist_ok=True)
100
- image_path = output_folder / f"no_bg_image_{uuid.uuid4().hex}.png"
101
- processed_image.save(image_path)
102
-
103
- return (processed_image, image), str(image_path)
104
-
105
- #################################################################
106
 
107
  # Define inputs and outputs for the Gradio interface
108
  image = gr.ImageEditor(
@@ -116,12 +74,11 @@ image = gr.ImageEditor(
116
  output_slider = ImageSlider(label="Processed photo", type="pil")
117
 
118
  demo = gr.Interface(
119
- fn=process,
120
  inputs=image,
121
  outputs=[output_slider, gr.File(label="output png file")],
122
  #title="🫧 Snap Clean 🧽",
123
- description=config.DESCRIPTION
124
  )
125
 
126
-
127
  demo.launch(debug=False, show_error=True, share=True)
 
1
  import os
 
2
  import uuid
3
+ import base64
4
+ import requests
5
+ from PIL import Image
6
+ from io import BytesIO
7
  from pathlib import Path
8
  import gradio as gr
9
  from gradio_imageslider import ImageSlider # Ensure this library is installed
10
+ from dotenv import load_dotenv
11
 
12
+ # Load environment variables from the .env file
13
+ load_dotenv()
 
 
 
 
 
 
14
 
15
+ # Get API key from environment variable
16
+ api_key = os.getenv('API_KEY')
17
 
18
+ # Funzione per chiamare l'endpoint di predizione FastAPI
19
+ def process_image(input_image_editor):
20
+ input_image = input_image_editor['background']
21
+ mask_image = input_image_editor['layers'][0]
22
 
23
+ # Converti le immagini in base64
24
+ buffered_input = BytesIO()
25
+ input_image.save(buffered_input, format="PNG")
26
+ input_image_base64 = base64.b64encode(buffered_input.getvalue()).decode()
27
 
28
+ buffered_mask = BytesIO()
29
+ mask_image.save(buffered_mask, format="PNG")
30
+ mask_image_base64 = base64.b64encode(buffered_mask.getvalue()).decode()
31
 
32
+ # Prepara il payload per la richiesta POST
33
+ payload = {
34
+ "input_image_editor": {
35
+ "background": input_image_base64,
36
+ "layers": [mask_image_base64]
37
+ }
38
+ }
39
+
40
+ # Effettua la richiesta POST al backend FastAPI
41
+ response = requests.post(
42
+ os.getenv('BACKEND_URL') + "/predict/",
43
+ headers={"access_token": api_key},
44
+ json=payload
45
+ )
46
+
47
+ if response.status_code == 200:
48
+ result = response.json()
49
+ processed_image_base64 = result['processed_image']
50
+ processed_image = Image.open(BytesIO(base64.b64decode(processed_image_base64)))
51
+
52
+ # Save the processed image
53
+ output_folder = Path("output") # Make sure this folder exists or create it
54
+ output_folder.mkdir(parents=True, exist_ok=True)
55
+ image_path = output_folder / f"no_bg_image_{uuid.uuid4().hex}.png"
56
+ processed_image.save(image_path)
57
+
58
+ return (processed_image, input_image), str(image_path)
59
+
60
+ else:
61
+ raise Exception(f"Request failed with status code {response.status_code}")
62
 
 
 
 
 
 
 
 
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  # Define inputs and outputs for the Gradio interface
66
  image = gr.ImageEditor(
 
74
  output_slider = ImageSlider(label="Processed photo", type="pil")
75
 
76
  demo = gr.Interface(
77
+ fn=process_image,
78
  inputs=image,
79
  outputs=[output_slider, gr.File(label="output png file")],
80
  #title="🫧 Snap Clean 🧽",
81
+ description="Upload an image and a mask to remove unwanted objects."
82
  )
83
 
 
84
  demo.launch(debug=False, show_error=True, share=True)
requirements.txt CHANGED
@@ -1,8 +1,3 @@
1
- diffusers==0.30.2
2
- torch
3
- transformers
4
- accelerate
5
- huggingface_hub
6
- sentencepiece
7
  gradio
8
- gradio_imageslider
 
 
 
 
 
 
 
 
1
  gradio
2
+ gradio_imageslider
3
+ python-dotenv