File size: 9,726 Bytes
cbe97f0
 
 
1dddd5f
 
f65f11f
 
cbe97f0
 
5b0a64b
f65f11f
 
 
 
cbe97f0
f65f11f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1dddd5f
 
cbe97f0
1dddd5f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbe97f0
 
1dddd5f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbe97f0
1dddd5f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbe97f0
1dddd5f
 
 
cbe97f0
 
 
 
 
 
 
 
 
 
1dddd5f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbe97f0
1dddd5f
 
cbe97f0
1dddd5f
 
 
 
 
 
cbe97f0
1dddd5f
 
cbe97f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1dddd5f
 
 
cbe97f0
 
 
 
 
 
 
 
 
b608c7b
 
 
ce7b1a5
 
b608c7b
cbe97f0
 
 
 
b608c7b
cbe97f0
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
import os
import requests
import logging
from PIL import Image, ImageEnhance
import cv2
import numpy as np
from preprocess.humanparsing.run_parsing import Parsing
from src.image_format_convertor import ImageFormatConvertor

REMOVE_BG_KEY = os.getenv('REMOVE_BG_KEY')

parsing_model = Parsing(0)

class BackgroundProcessor:
    DeprecationWarning("Created only for testing. Not in use")
    @classmethod
    def add_background(cls, human_img: Image, background_img: Image):
        
        human_img = human_img.convert("RGB")
        width = human_img.width
        height = human_img.height
        
        # Create mask image
        parsed_img, _ = parsing_model(human_img)
        mask_img = parsed_img.convert("L")
        mask_img = mask_img.resize((width, height))
        
        background_img = background_img.convert("RGB")
        background_img = background_img.resize((width, height))

        # Convert to numpy arrays
        human_np = np.array(human_img)
        mask_np = np.array(mask_img)
        background_np = np.array(background_img)

        # Ensure mask is 3-channel (RGB) for compatibility
        mask_np = np.stack((mask_np,) * 3, axis=-1)

        # Apply the mask to human_img
        human_with_background = np.where(mask_np > 0, human_np, background_np)

        # Convert back to PIL Image
        result_img = Image.fromarray(human_with_background.astype('uint8'))

        # Return or save the result
        return result_img

    DeprecationWarning("Created only for testing. Not in use")
    @classmethod
    def add_background_v3(cls, foreground_pil: Image, background_pil: Image):
        foreground_pil= foreground_pil.convert("RGB")
        width = foreground_pil.width
        height = foreground_pil.height

        # Create mask image
        parsed_img, _ = parsing_model(foreground_pil)
        mask_pil = parsed_img.convert("L")
        # Apply a threshold to convert to binary image
        # mask_pil = mask_pil.point(lambda p: 1 if p > 127 else 0, mode='1')
        mask_pil = mask_pil.resize((width, height))
        
        # Resize background image
        background_pil = background_pil.convert("RGB")
        background_pil = background_pil.resize((width, height))
        
        # Load the images using PIL
        #foreground_pil = Image.open(human_img_path).convert("RGB")  # The segmented person image
        #background_pil = Image.open(background_img_path).convert("RGB")  # The new background image
        #mask_pil = Image.open(mask_img_path).convert('L')  # The mask image from the human parser model

        # Resize the background to match the size of the foreground
        #background_pil = background_pil.resize(foreground_pil.size)

        # Resize mask
        #mask_pil = mask_pil.resize(foreground_pil.size)

        # Convert PIL images to OpenCV format
        foreground_cv2 = ImageFormatConvertor.pil_to_cv2(foreground_pil)
        background_cv2 = ImageFormatConvertor.pil_to_cv2(background_pil)
        #mask_cv2 = pil_to_cv2(mask_pil)
        mask_cv2 = np.array(mask_pil)  # Directly convert to NumPy array without color conversion

        # Ensure the mask is a single channel image
        if len(mask_cv2.shape) == 3:
            mask_cv2 = cv2.cvtColor(mask_cv2, cv2.COLOR_BGR2GRAY)

        # Threshold the mask to convert it to pure black and white
        _, mask_cv2 = cv2.threshold(mask_cv2, 0, 255, cv2.THRESH_BINARY)

        # Ensure the mask is a single channel image
        #if len(mask_cv2.shape) == 3:
        #    mask_cv2 = cv2.cvtColor(mask_cv2, cv2.COLOR_BGR2GRAY)

        # Create an inverted mask
        mask_inv_cv2 = cv2.bitwise_not(mask_cv2)

        # Convert mask to 3 channels
        mask_3ch_cv2 = cv2.cvtColor(mask_cv2, cv2.COLOR_GRAY2BGR)
        mask_inv_3ch_cv2 = cv2.cvtColor(mask_inv_cv2, cv2.COLOR_GRAY2BGR)

        # Extract the person from the foreground image using the mask
        person_cv2 = cv2.bitwise_and(foreground_cv2, mask_3ch_cv2)

        # Extract the background where the person is not present
        background_extracted_cv2 = cv2.bitwise_and(background_cv2, mask_inv_3ch_cv2)

        # Combine the person and the new background
        combined_cv2 = cv2.add(person_cv2, background_extracted_cv2)

        # Refine edges using Gaussian Blur (feathering technique)
        blurred_combined_cv2 = cv2.GaussianBlur(combined_cv2, (5, 5), 0)

        # Convert the result back to PIL format
        combined_pil = ImageFormatConvertor.cv2_to_pil(blurred_combined_cv2)
        

        """
        # Post-processing: Adjust brightness, contrast, etc. (optional)
        enhancer = ImageEnhance.Contrast(combined_pil)
        post_processed_pil = enhancer.enhance(1.2)  # Adjust contrast
        enhancer = ImageEnhance.Brightness(post_processed_pil)
        post_processed_pil = enhancer.enhance(1.2)  # Adjust brightness
        """


        # Save the final image
        # post_processed_pil.save('path_to_save_final_image_1.png')

        # Display the images (optional)
        #foreground_pil.show(title="Foreground")
        #background_pil.show(title="Background")
        #mask_pil.show(title="Mask")
        #combined_pil.show(title="Combined")
        # post_processed_pil.show(title="Post Processed")

        return combined_pil
    
    DeprecationWarning("Created only for testing. Not in use")
    @classmethod
    def replace_background(cls, foreground_img_path: str, background_img_path: str):
        # Load the input image (with alpha channel) and the background image
        #input_image = cv2.imread(foreground_img_path, cv2.IMREAD_UNCHANGED)        
        # background_image = cv2.imread(background_img_path)
        foreground_img_pil = Image.open(foreground_img_path)
        width = foreground_img_pil.width
        height = foreground_img_pil.height
        background_image_pil = Image.open(background_img_path)
        background_image_pil = background_image_pil.resize((width, height))
        input_image = ImageFormatConvertor.pil_to_cv2(foreground_img_pil)
        background_image = ImageFormatConvertor.pil_to_cv2(background_image_pil)
        

        # Ensure the input image has an alpha channel
        if input_image.shape[2] != 4:
            raise ValueError("Input image must have an alpha channel")

        # Extract the alpha channel
        alpha_channel = input_image[:, :, 3]

        # Resize the background image to match the input image dimensions
        background_image = cv2.resize(background_image, (input_image.shape[1], input_image.shape[0]))

        # Convert alpha channel to 3 channels
        alpha_channel_3ch = cv2.cvtColor(alpha_channel, cv2.COLOR_GRAY2BGR)
        alpha_channel_3ch = alpha_channel_3ch / 255.0  # Normalize to 0-1

        # Extract the BGR channels of the input image
        input_bgr = input_image[:, :, :3]
        background_bgr = background_image[:,:,:3]
        # Blend the images using the alpha channel
        foreground = cv2.multiply(alpha_channel_3ch, input_bgr.astype(float))
        background = cv2.multiply(1.0 - alpha_channel_3ch, background_bgr.astype(float))
        combined_image = cv2.add(foreground, background).astype(np.uint8)

        # Save and display the result
        cv2.imwrite('path_to_save_combined_image.png', combined_image)
        cv2.imshow('Combined Image', combined_image)
        cv2.waitKey(0)

        cv2.destroyAllWindows()
    
    @classmethod
    def replace_background_with_removebg(cls, foreground_img_pil: Image, background_image_pil: Image):
        foreground_img_pil= foreground_img_pil.convert("RGB")
        width = foreground_img_pil.width
        height = foreground_img_pil.height

        # Resize background image
        background_image_pil = background_image_pil.convert("RGB")
        background_image_pil = background_image_pil.resize((width, height))

        #foreground_img_pil = Image.open(foreground_img_path)
        #width = foreground_img_pil.width
        #height = foreground_img_pil.height
        #background_image_pil = Image.open(background_img_path)
        #background_image_pil = background_image_pil.resize((width, height)) 

        foreground_binary = ImageFormatConvertor.pil_image_to_binary_data(foreground_img_pil)
        background_binary = ImageFormatConvertor.pil_image_to_binary_data(background_image_pil)
        combined_img_pil = cls.remove_bg(foreground_binary, background_binary)
        combined_img_pil.show()
        return combined_img_pil


    @classmethod
    def remove_bg(cls, foreground_binary: str, background_binary: str):
        # ref: https://www.remove.bg/api#api-reference
        url = "https://api.remove.bg/v1.0/removebg"        
        
        # using form-data as passing binary data is not supported in application/json
        files = {
            "image_file": ('foreground.png', foreground_binary, 'image/png'),
            "bg_image_file": ('background.png', background_binary,  'image/png')
        }

        # get output image in same resolution as input
        payload = {
            "size": "full",
            "shadow_type": "3D"
        }        
        headers = {
            "accept": "image/*",
            'X-Api-Key': REMOVE_BG_KEY
        } 
        remove_bg_request = requests.post(url, files=files, data=payload, headers=headers, timeout=20)
        if remove_bg_request.status_code == 200:
            image_content = remove_bg_request.content
            pil_image = ImageFormatConvertor.binary_data_to_pil_image(image_content)
            return pil_image
        logging.error(f"failed to use remove bg. Status: {remove_bg_request.status_code}. Resp: {remove_bg_request.content}")
        return None