samcoding5854 commited on
Commit
b2cb3e9
1 Parent(s): 163abe0

added Save image code

Browse files
Assets/output/images/overlay_image.png DELETED
Binary file (71.3 kB)
 
Assets/saved_masks/inverted_mask_0.png CHANGED
Pages/bgImages.py CHANGED
@@ -43,7 +43,7 @@ def BGIMAGES():
43
  st.image(image, caption=f"Generated image for: {prompt}", width = 300)
44
 
45
  else:
46
- st.write("Please enter a movie title to generate an image.")
47
 
48
  # Path to the folder containing images
49
  image_folder = "Assets/bgImages"
 
43
  st.image(image, caption=f"Generated image for: {prompt}", width = 300)
44
 
45
  else:
46
+ st.write("Please enter prompt for background to generate an image.")
47
 
48
  # Path to the folder containing images
49
  image_folder = "Assets/bgImages"
Pages/createdVisuals.py CHANGED
@@ -8,6 +8,8 @@ def load_images_from_folder(folder):
8
  images.append(os.path.join(folder, filename))
9
  return images
10
 
 
 
11
 
12
  # Main function
13
  def CREATEDIMAGES():
@@ -20,12 +22,12 @@ def CREATEDIMAGES():
20
  images = load_images_from_folder(image_folder)
21
 
22
  # Display images and information in a grid layout with three images per row
23
- col_width = 350 # Adjust this value according to your preference
24
  num_images = len(images)
25
  images_per_row = 3
26
  num_rows = (num_images + images_per_row - 1) // images_per_row
27
 
28
- st.header('Available Templates', divider = 'red')
29
 
30
  # Display images and information in a grid layout with three images per row
31
  for i in range(num_rows):
@@ -33,11 +35,9 @@ def CREATEDIMAGES():
33
  for j in range(images_per_row):
34
  idx = i * images_per_row + j
35
  if idx < num_images:
 
36
  cols[j].image(images[idx], width=col_width)
37
- cols[j].write(images[idx])
38
-
39
 
40
  if __name__ == "__main__":
41
  CREATEDIMAGES()
42
-
43
-
 
8
  images.append(os.path.join(folder, filename))
9
  return images
10
 
11
+ def get_image_name(image_path):
12
+ return os.path.splitext(os.path.basename(image_path))[0]
13
 
14
  # Main function
15
  def CREATEDIMAGES():
 
22
  images = load_images_from_folder(image_folder)
23
 
24
  # Display images and information in a grid layout with three images per row
25
+ col_width = 350 # Adjust this value according to your preference
26
  num_images = len(images)
27
  images_per_row = 3
28
  num_rows = (num_images + images_per_row - 1) // images_per_row
29
 
30
+ st.header('Available Templates', divider='red')
31
 
32
  # Display images and information in a grid layout with three images per row
33
  for i in range(num_rows):
 
35
  for j in range(images_per_row):
36
  idx = i * images_per_row + j
37
  if idx < num_images:
38
+ image_name = get_image_name(images[idx])
39
  cols[j].image(images[idx], width=col_width)
40
+ cols[j].write(image_name)
 
41
 
42
  if __name__ == "__main__":
43
  CREATEDIMAGES()
 
 
Pages/imageBB.py CHANGED
@@ -9,8 +9,7 @@ import cv2
9
  import numpy as np
10
 
11
  @st.cache_data
12
- def get_masks(rect,img_path):
13
-
14
  CHECKPOINT_PATH = os.path.join("weights", "sam_vit_h_4b8939.pth")
15
  DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
16
  MODEL_TYPE = "vit_h"
@@ -34,8 +33,6 @@ def get_masks(rect,img_path):
34
  )
35
  return masks
36
 
37
-
38
-
39
  def run(img_path):
40
  st.set_option("deprecation.showfileUploaderEncoding", False)
41
 
@@ -51,14 +48,11 @@ def run(img_path):
51
  rects = st_img_label(resized_img, box_color="red", rects=st.session_state.rects)
52
  st.session_state.rects = rects
53
  else:
54
- st.image(resized_img, caption="Uploaded Image",width=300, use_column_width=True)
55
 
56
  for rect in st.session_state.rects:
57
- # st.write(f"Rectangle: {rect}")
58
-
59
  with st.spinner('Please wait while the product image is being extracted...'):
60
-
61
- masks = get_masks(rect,img_path)
62
 
63
  save_dir = "Assets/saved_masks"
64
  if not os.path.exists(save_dir):
@@ -84,81 +78,85 @@ def run(img_path):
84
  st.image(image, width=300, caption=f"Selected image: {selected_image}")
85
 
86
  if st.button("Create Image"):
87
- # Read the base image and background image
88
- image_bgr = cv2.imread(img_path)
89
- background_bgr = cv2.imread(image_pathBG)
90
-
91
- # Resize the background image to match the size of image_bgr
92
- background_bgr = cv2.resize(background_bgr, (image_bgr.shape[1], image_bgr.shape[0]))
93
-
94
- # Convert the base image to RGB format for mask prediction if it's not already in RGB
95
- if image_bgr.shape[2] == 3: # No alpha channel, standard BGR image
96
- image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
97
- else:
98
- image_rgb = image_bgr[:, :, :3] # Drop alpha channel if it exists
99
-
100
- # Assuming masks is a binary mask, convert it to uint8 format
101
- mask = (masks[0] > 0).astype(np.uint8) * 255
102
-
103
- # Apply a Gaussian blur to the mask to smooth the edges
104
- mask = cv2.GaussianBlur(mask, (3,3), 0)
105
-
106
- # Ensure the image has an alpha channel
107
- if image_bgr.shape[2] == 3: # If no alpha channel, add one
108
- b, g, r = cv2.split(image_bgr)
109
- alpha_channel = mask # Use the blurred mask as the alpha channel
110
- image_bgra = cv2.merge((b, g, r, alpha_channel))
111
- else:
112
- image_bgra = image_bgr
113
-
114
- # Get the dimensions of the images
115
- masked_height, masked_width = image_bgra.shape[:2]
116
- background_height, background_width = background_bgr.shape[:2]
117
-
118
- # Calculate the coordinates to place the masked image in the center of the background image
119
- x_offset = (background_width - masked_width) // 2
120
- y_offset = (background_height - masked_height) // 2
121
-
122
- # Resize the masked image if it is larger than the background area
123
- if masked_width > background_width or masked_height > background_height:
124
- scaling_factor = min(background_width / masked_width, background_height / masked_height)
125
- new_size = (int(masked_width * scaling_factor), int(masked_height * scaling_factor))
126
- image_bgra = cv2.resize(image_bgra, new_size, interpolation=cv2.INTER_AREA)
127
- masked_height, masked_width = image_bgra.shape[:2]
128
- x_offset = (background_width - masked_width) // 2
129
- y_offset = (background_height - masked_height) // 2
130
-
131
- # Create a copy of the background image and convert it to BGRA
132
- background_bgra = cv2.cvtColor(background_bgr, cv2.COLOR_BGR2BGRA)
133
-
134
- # Overlay the masked image onto the center of the background image
135
- overlay_image = background_bgra.copy()
136
-
137
- # Only update the region where the segmented image will be placed
138
- overlay = np.zeros_like(background_bgra)
139
- overlay[y_offset:y_offset+masked_height, x_offset:x_offset+masked_width] = image_bgra
140
-
141
- # Create the alpha mask for blending
142
- alpha_mask = overlay[:, :, 3] / 255.0
143
- alpha_inv = 1.0 - alpha_mask
144
-
145
- # Modify alpha channel for smoother blending
146
- alpha_mask = alpha_mask ** 0.5 # Applying square root for smoother blending
147
-
148
- # Blend the images
149
- for c in range(0, 3):
150
- overlay_image[:, :, c] = (alpha_mask * overlay[:, :, c] + alpha_inv * overlay_image[:, :, c])
151
-
152
- # Set the alpha channel
153
- overlay_image[:, :, 3] = np.clip(overlay[:, :, 3] + background_bgra[:, :, 3], 0, 255)
154
-
155
- # Save the result
156
- output_path = 'Assets/output/images/overlay_image.png'
157
- cv2.imwrite(output_path, overlay_image)
158
-
159
- # Display the overlay image
160
- st.image(output_path, caption="Overlay Image", use_column_width=True, width=300)
161
-
 
 
 
 
162
 
163
  def annotate():
164
  st.session_state.saved = True
@@ -166,3 +164,5 @@ def run(img_path):
166
  if st.session_state.rects:
167
  st.button(label="Save", on_click=annotate)
168
 
 
 
 
9
  import numpy as np
10
 
11
  @st.cache_data
12
+ def get_masks(rect, img_path):
 
13
  CHECKPOINT_PATH = os.path.join("weights", "sam_vit_h_4b8939.pth")
14
  DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
15
  MODEL_TYPE = "vit_h"
 
33
  )
34
  return masks
35
 
 
 
36
  def run(img_path):
37
  st.set_option("deprecation.showfileUploaderEncoding", False)
38
 
 
48
  rects = st_img_label(resized_img, box_color="red", rects=st.session_state.rects)
49
  st.session_state.rects = rects
50
  else:
51
+ st.image(resized_img, caption="Uploaded Image", width=300, use_column_width=True)
52
 
53
  for rect in st.session_state.rects:
 
 
54
  with st.spinner('Please wait while the product image is being extracted...'):
55
+ masks = get_masks(rect, img_path)
 
56
 
57
  save_dir = "Assets/saved_masks"
58
  if not os.path.exists(save_dir):
 
78
  st.image(image, width=300, caption=f"Selected image: {selected_image}")
79
 
80
  if st.button("Create Image"):
81
+ st.session_state.create_image = True
82
+
83
+ if st.session_state.get("create_image"):
84
+ # Read the base image and background image
85
+ image_bgr = cv2.imread(img_path)
86
+ background_bgr = cv2.imread(image_pathBG)
87
+
88
+ # Resize the background image to match the size of image_bgr
89
+ background_bgr = cv2.resize(background_bgr, (image_bgr.shape[1], image_bgr.shape[0]))
90
+
91
+ # Convert the base image to RGB format for mask prediction if it's not already in RGB
92
+ if image_bgr.shape[2] == 3: # No alpha channel, standard BGR image
93
+ image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
94
+ else:
95
+ image_rgb = image_bgr[:, :, :3] # Drop alpha channel if it exists
96
+
97
+ # Assuming masks is a binary mask, convert it to uint8 format
98
+ mask = (masks[0] > 0).astype(np.uint8) * 255
99
+
100
+ # Apply a Gaussian blur to the mask to smooth the edges
101
+ mask = cv2.GaussianBlur(mask, (3, 3), 0)
102
+
103
+ # Ensure the image has an alpha channel
104
+ if image_bgr.shape[2] == 3: # If no alpha channel, add one
105
+ b, g, r = cv2.split(image_bgr)
106
+ alpha_channel = mask # Use the blurred mask as the alpha channel
107
+ image_bgra = cv2.merge((b, g, r, alpha_channel))
108
+ else:
109
+ image_bgra = image_bgr
110
+
111
+ # Get the dimensions of the images
112
+ masked_height, masked_width = image_bgra.shape[:2]
113
+ background_height, background_width = background_bgr.shape[:2]
114
+
115
+ # Calculate the coordinates to place the masked image in the center of the background image
116
+ x_offset = (background_width - masked_width) // 2
117
+ y_offset = (background_height - masked_height) // 2
118
+
119
+ # Resize the masked image if it is larger than the background area
120
+ if masked_width > background_width or masked_height > background_height:
121
+ scaling_factor = min(background_width / masked_width, background_height / masked_height)
122
+ new_size = (int(masked_width * scaling_factor), int(masked_height * scaling_factor))
123
+ image_bgra = cv2.resize(image_bgra, new_size, interpolation=cv2.INTER_AREA)
124
+ masked_height, masked_width = image_bgra.shape[:2]
125
+ x_offset = (background_width - masked_width) // 2
126
+ y_offset = (background_height - masked_height) // 2
127
+
128
+ # Create a copy of the background image and convert it to BGRA
129
+ background_bgra = cv2.cvtColor(background_bgr, cv2.COLOR_BGR2BGRA)
130
+
131
+ # Overlay the masked image onto the center of the background image
132
+ overlay_image = background_bgra.copy()
133
+
134
+ # Only update the region where the segmented image will be placed
135
+ overlay = np.zeros_like(background_bgra)
136
+ overlay[y_offset:y_offset + masked_height, x_offset:x_offset + masked_width] = image_bgra
137
+
138
+ # Create the alpha mask for blending
139
+ alpha_mask = overlay[:, :, 3] / 255.0
140
+ alpha_inv = 1.0 - alpha_mask
141
+
142
+ # Modify alpha channel for smoother blending
143
+ alpha_mask = alpha_mask ** 0.5 # Applying square root for smoother blending
144
+
145
+ # Blend the images
146
+ for c in range(0, 3):
147
+ overlay_image[:, :, c] = (alpha_mask * overlay[:, :, c] + alpha_inv * overlay_image[:, :, c])
148
+
149
+ # Set the alpha channel
150
+ overlay_image[:, :, 3] = np.clip(overlay[:, :, 3] + background_bgra[:, :, 3], 0, 255)
151
+
152
+ # Prompt user for the filename
153
+ filename = st.text_input("Enter a name to save the image:")
154
+ if filename and st.button("Save Image"):
155
+ output_path = f'Assets/output/images/{filename}.png'
156
+ cv2.imwrite(output_path, overlay_image)
157
+
158
+ # Display the overlay image
159
+ st.image(output_path, caption="Created Image", use_column_width=True, width=300)
160
 
161
  def annotate():
162
  st.session_state.saved = True
 
164
  if st.session_state.rects:
165
  st.button(label="Save", on_click=annotate)
166
 
167
+ # Example of calling the function
168
+ # run("path/to/your/image.jpg")