raj999 commited on
Commit
5156d56
·
1 Parent(s): 66c254b

one commit to rule them all

Browse files
Files changed (9) hide show
  1. app.py +265 -0
  2. identified_species/test.py +0 -0
  3. inference.py +103 -0
  4. predict.py +80 -0
  5. predict_copy.py +110 -0
  6. predict_vit.py +66 -0
  7. rag.py +84 -0
  8. requirements.txt +16 -0
  9. setup.sh +12 -0
app.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ import os
4
+ from deepforest import main
5
+ from deepforest import get_data
6
+ import matplotlib.pyplot as plt
7
+ # from predict import extract_features, predict_similarity, compare_features, extract_features_cp
8
+ import os, re
9
+ import streamlit as st
10
+ import pandas as pd
11
+ from PIL import Image
12
+ import tempfile
13
+ from inference import split_image_from_dataframe
14
+ from datetime import datetime
15
+ from predict import extract_features, predict_similarity, compare_features, extract_features_cp
16
+ import cv2
17
+ from PIL import Image
18
+ import os
19
+ import numpy as np
20
+ import urllib.request
21
+ import glob
22
+
23
+ # intake library and plugin
24
+ # import intake
25
+ # from intake_zenodo_fetcher import download_zenodo_files_for_entry
26
+
27
+ # geospatial libraries
28
+ # import geopandas as gpd
29
+
30
+ # from rasterio.transform import from_origin
31
+ # import rasterio.features
32
+
33
+ # import fiona
34
+
35
+ # from shapely.geometry import shape, mapping, box
36
+ # from shapely.geometry.multipolygon import MultiPolygon
37
+
38
+ # # machine learning libraries
39
+ # from detectron2 import model_zoo
40
+ # from detectron2.engine import DefaultPredictor
41
+ # from detectron2.utils.visualizer import Visualizer, ColorMode
42
+ # from detectron2.config import get_cfg
43
+ # from detectron2.engine import DefaultTrainer
44
+ # # define the URL to retrieve the model
45
+ # fn = 'model_final.pth'
46
+ # url = f'https://zenodo.org/record/5515408/files/{fn}?download=1'
47
+
48
+ # urllib.request.urlretrieve(url, config['model'] + '/' + fn)
49
+
50
+ # import geoviews.tile_sources as gts
51
+
52
+ # import hvplot.pandas
53
+ # import hvplot.xarray
54
+
55
+ # # hv.extension('bokeh', width=100)
56
+ # cfg = get_cfg()
57
+
58
+ # # if you want to make predictions using a CPU, run the following line. If using GPU, hash it out.
59
+ # cfg.MODEL.DEVICE='cuda'
60
+
61
+ # # model and hyperparameter selection
62
+ # cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"))
63
+ # cfg.DATALOADER.NUM_WORKERS = 2
64
+ # cfg.SOLVER.IMS_PER_BATCH = 2
65
+ # cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
66
+
67
+ # ### path to the saved pre-trained model weights
68
+ # cfg.MODEL.WEIGHTS = config['model'] + '/model_final.pth'
69
+
70
+ # # set confidence threshold at which we predict
71
+ # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.15
72
+
73
+ # #### Settings for predictions using detectron config
74
+
75
+ # predictor = DefaultPredictor(cfg)
76
+ # outputs = predictor(im)
77
+ # v = Visualizer(im[:, :, ::-1], scale=1.5, instance_mode=ColorMode.IMAGE_BW) # remove the colors of unsegmented pixels
78
+ # v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
79
+ # image = cv2.cvtColor(v.get_image()[:, :, :], cv2.COLOR_BGR2RGB)
80
+ # st.image(image, caption='Segmented Panoramic Image Detecttree', channels ='RGB', use_column_width=True)
81
+
82
+
83
+ model = main.deepforest()
84
+ model.use_release()
85
+
86
+ # Set the page configuration
87
+ st.set_page_config(page_title="Wise-Vision", page_icon=":deciduous_tree:")
88
+
89
+ # Title and description
90
+ st.title("🌳 Wise-Vision")
91
+ st.subheader("AI + Environment Hackathon 2024")
92
+
93
+ # Sidebar information
94
+ st.sidebar.title("About")
95
+ st.sidebar.info(
96
+ """
97
+ This app is designed for the AI + Environment Hackathon 2024.
98
+ Upload a panoramic image and specify a folder path to detect tree species in the image.
99
+ Upload a word file to integrate knowledge into the image.
100
+ Output will be a panoramic image with identified trees and knowledge symbols.
101
+ """
102
+ )
103
+
104
+ st.sidebar.title("Contact")
105
+ st.sidebar.info(
106
+ """
107
+ For more information, contact us at:
108
+ [rajbhalwankar@protonmail.com]
109
+ """
110
+ )
111
+
112
+
113
+ script_dir = os.path.dirname(os.path.abspath(__file__))
114
+
115
+ # Create a new folder within the script directory for storing cropped images
116
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
117
+ output_folder_name = f"output_{timestamp}"
118
+ output_image_folder = os.path.join(script_dir, output_folder_name)
119
+ os.makedirs(output_image_folder, exist_ok=True)
120
+ output_image_folder = os.path.abspath(output_image_folder)
121
+ # Define paths for the image and Excel file within the new folder
122
+ cropped_image_path = os.path.join(output_image_folder, f"panoramic_{timestamp}.png")
123
+ excel_output_path = os.path.join(output_image_folder, f"results_{timestamp}.xlsx")
124
+
125
+ # Input: Upload panoramic image
126
+ uploaded_image = st.file_uploader("Upload a panoramic image", type=['png', 'jpeg', 'JPG'])
127
+
128
+ # Input: Folder path for tree species detection
129
+
130
+ def extract_treespecies_features(folder_path):
131
+ image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith(('png', 'jpg', 'jpeg', '.JPG'))]
132
+
133
+ species_feature_list = [{"feature": extract_features(file), "file_name": file} for file in image_files]
134
+ return species_feature_list
135
+
136
+
137
+ # print(species_feature_list[:2])
138
+ def perform_inference(cropped_images, species_feature_list, img_df):
139
+
140
+ for img_idx, item in enumerate(cropped_images):
141
+ image = item["image"]
142
+ feature_cp = extract_features_cp(image)
143
+ row_results = []
144
+ species_result = []
145
+ emoji = []
146
+ species_context = []
147
+ for idx, species in enumerate(species_feature_list):
148
+ # euclidean_dist, cos_sim = compare_features(feature_cp, species["feature"])
149
+ # print(f'Euclidean Distance: {euclidean_dist}')
150
+ # print(f'Cosine Similarity: {cos_sim}')
151
+
152
+ # Predict similarity
153
+ is_similar = predict_similarity(feature_cp, species["feature"], threshold=0.92)
154
+ # print(species)
155
+ # print(f'Are the images similar? {"Yes" if is_similar else "No"}')
156
+
157
+ result = "Yes" if is_similar else "No"
158
+
159
+ if result == "Yes":
160
+ item[f"result_{idx}"] = result
161
+ item[f"file_name_{idx}"] = species["file_name"]
162
+ row_results.append(species["file_name"])
163
+ # # Regular expression to match the tree species name
164
+ # species_pattern = r'identified_species\\([^\\]+) -'
165
+
166
+ # # Search for the pattern in the file path
167
+ # match = re.search(species_pattern, species["file_name"])
168
+
169
+ # Extract and print the tree species name if found
170
+
171
+ # species_info = retriever.invoke(f"Scientific name:{tree_species}")
172
+
173
+ # ans = generate_image(species_info, client)
174
+ # emoji.append(ans)
175
+ # text_context = [doc.page_content for doc in species_info]
176
+ # text_context = ", ".join(text_context)
177
+ # species_context.append(text_context)
178
+ # print(ans)
179
+ # species_result.append(tree_species)
180
+
181
+ img_df.at[img_idx, "species_identified"] = ", ".join(species_result) if species_result else "No similar species found"
182
+ img_df.at[img_idx, "result_file_path"] = ", ".join(row_results) if row_results else ""
183
+ # img_df.at[img_idx, "emoji"] = ", ".join(emoji) if emoji else ""
184
+ # img_df.at[img_idx, "retreived context"] = ", ".join(species_context) if species_context else ""
185
+
186
+
187
+ return cropped_images
188
+
189
+
190
+ # Function to simulate tree species detection
191
+
192
+ # Display uploaded image and detected tree species
193
+ if uploaded_image is not None:
194
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.JPG') as temp_file:
195
+ temp_file.write(uploaded_image.read())
196
+ temp_file_path = temp_file.name
197
+ # Open and display the image
198
+ # image = Image.open(uploaded_image)
199
+ sample_image_path = get_data(temp_file_path)
200
+ boxes = model.predict_image(path=sample_image_path, return_plot=False)
201
+ img_actual = model.predict_image(path=sample_image_path, return_plot=True, color=(137, 0, 0), thickness=9)
202
+ st.image(img_actual, caption='Segmented Panoramic Image', channels ='RGB', use_column_width=True)
203
+ st.success("Sample Dataframe:")
204
+ st.dataframe(boxes.head())
205
+ plt.imshow(img_actual[:,:,::-1])
206
+ # plt.show(img[:,:,::-1])
207
+ plt.savefig(cropped_image_path)
208
+ # if st.button("Next Step"):
209
+
210
+ accuracy_threshold = st.slider("Accuracy threshold for cropping images:",min_value=0.1, max_value=1.0, value=0.4)
211
+ images_list = split_image_from_dataframe(boxes, temp_file_path, output_folder_name)
212
+ image_width = 200
213
+ st.success("Sample Images:")
214
+ # Display the images in a row
215
+ col1, col2, col3 = st.columns(3)
216
+
217
+ with col1:
218
+ st.image(images_list[3]["image"], caption="Sample 1", width=image_width)
219
+
220
+ with col2:
221
+ st.image(images_list[4]["image"], caption="Sample 2", width=image_width)
222
+
223
+ with col3:
224
+ st.image(images_list[5]["image"], caption="Sample 3", width=image_width)
225
+
226
+ folder_path = './identified_species'
227
+
228
+ species_feature_list = extract_treespecies_features(folder_path)
229
+ final_result = perform_inference(images_list, species_feature_list, boxes)
230
+ st.success("Final Data:")
231
+ st.dataframe(boxes)
232
+ boxes.to_excel(excel_output_path)
233
+ for index, row in boxes.iterrows():
234
+ species_identified = row['species_identified']
235
+ if species_identified !="No similar species found":
236
+ cropped_image_path = row['cropped_image_path']
237
+ result_file_path = row['result_file_path']
238
+ if type(result_file_path) == list:
239
+ result_file_path = result_file_path[0]
240
+
241
+
242
+ result_file_path = result_file_path.split(',')[0]
243
+ st.write(species_identified)
244
+ col1, col2 = st.columns(2)
245
+ with col1:
246
+ st.image(cropped_image_path, caption='Cropped Image')
247
+ with col2:
248
+ st.image(result_file_path, caption='Species Match')
249
+
250
+
251
+
252
+
253
+
254
+
255
+
256
+ # Detect tree species
257
+ # detected_species = detect_tree_species(image, folder_path)
258
+
259
+ # Display detected tree species
260
+ # st.write("### Detected Tree Species:")
261
+ # for species in detected_species:
262
+ # st.write(f"- {species}")
263
+
264
+
265
+
identified_species/test.py ADDED
File without changes
inference.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from deepforest import main
2
+ from deepforest import get_data
3
+ import matplotlib.pyplot as plt
4
+ from predict import extract_features, predict_similarity, compare_features, extract_features_cp
5
+ import os
6
+ import streamlit as st
7
+ model = main.deepforest()
8
+ model.use_release()
9
+
10
+ # quit()
11
+ # print(img.head())
12
+ import pandas as pd
13
+ from PIL import Image
14
+
15
+
16
+ def split_image_from_dataframe(dataframe, panoramic_image, output_folder_name):
17
+ """
18
+ Splits an image into multiple images based on coordinates provided in a dataframe.
19
+
20
+ Parameters:
21
+ dataframe (pd.DataFrame): DataFrame containing image path and coordinates.
22
+ """
23
+ cropped_images_info = []
24
+ cropped_image_paths = []
25
+ for i, row in dataframe.iterrows():
26
+ image_path = row['image_path']
27
+ left, top, right, bottom = row['xmin'], row['ymin'], row['xmax'], row['ymax']
28
+
29
+ image = Image.open(panoramic_image)
30
+
31
+ cropped_image = image.crop((left, top, right, bottom))
32
+ cropped_image_dict = {
33
+ f'image': cropped_image,
34
+ f'position': (left, top, right, bottom)
35
+ }
36
+ cropped_images_info.append(cropped_image_dict)
37
+ cropped_image_paths.append(f'{output_folder_name}/cropped_image_{i}.png')
38
+ cropped_image.save(f'{output_folder_name}/cropped_image_{i}.png') # Save each cropped image
39
+
40
+ dataframe['cropped_image_path'] = cropped_image_paths
41
+ return cropped_images_info
42
+
43
+
44
+ # print(images_list)
45
+ # quit()
46
+ # Load images from folder
47
+ def extract_treespecies_features(folder_path):
48
+ image_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith(('png', 'jpg', 'jpeg', '.JPG'))]
49
+
50
+ species_feature_list = [{"feature": extract_features(file), "file_name": file} for file in image_files]
51
+ return species_feature_list
52
+
53
+
54
+ # print(species_feature_list[:2])
55
+ def perform_inference(images_list, species_feature_list):
56
+ for idx, item in enumerate(images_list):
57
+ image = item["image"]
58
+ feature_cp = extract_features_cp(image)
59
+ for idx, species in enumerate(species_feature_list):
60
+ euclidean_dist, cos_sim = compare_features(feature_cp, species["feature"])
61
+ # print(f'Euclidean Distance: {euclidean_dist}')
62
+ # print(f'Cosine Similarity: {cos_sim}')
63
+
64
+ # Predict similarity
65
+ is_similar = predict_similarity(feature_cp, species["feature"], threshold=0.8)
66
+ # print(species)
67
+ # print(f'Are the images similar? {"Yes" if is_similar else "No"}')
68
+
69
+ result = "Yes" if is_similar else "No"
70
+ if result == "Yes":
71
+ item[f"result_{idx}"] = result
72
+ item[f"file_name_{idx}"] = species["file_name"]
73
+
74
+ return images_list
75
+
76
+
77
+
78
+
79
+ if __name__ == '__main__':
80
+ pan_image = "D:/Downloads/image/plant_images/plant_images/drone_igapo_flooded_forest/DJI_20240504124024_0037_D.JPG"
81
+
82
+ sample_image_path = get_data(pan_image)
83
+ # img = model.predict_image(path=sample_image_path, return_plot=False)
84
+ # from PIL import Image
85
+ # print(img)
86
+ img_df = ""
87
+ # img_actual = model.predict_image(path=sample_image_path, return_plot=True, color=(0, 165, 255), thickness=9)
88
+ img_actual = model.predict_tile(raster_path=sample_image_path, return_plot=True, patch_size=100,patch_overlap=0.25)
89
+ # im = Image.open('Foto.jpg')
90
+ # im.save('Foto.png')
91
+ #predict_image returns plot in BlueGreenRed (opencv style), but matplotlib likes RedGreenBlue, switch the channel order. Many functions in deepforest will automatically perform this flip for you and give a warning.
92
+ plt.imshow(img_actual[:,:,::-1])
93
+ # plt.show(img[:,:,::-1])
94
+ plt.savefig("cropped_test3/panoramic_2.png")
95
+ quit()
96
+ images_list = split_image_from_dataframe(img_df, pan_image)
97
+ folder_path = 'D:/Downloads/image/plant_images/plant_images/drone_igapo_flooded_forest/identified_species'
98
+
99
+ species_feature_list = extract_treespecies_features()
100
+ final_result = perform_inference(images_list, species_feature_list)
101
+ print(final_result)
102
+
103
+
predict.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras.applications.vgg16 import VGG16, preprocess_input
2
+ from keras.preprocessing import image
3
+ from keras.models import Model
4
+ import numpy as np
5
+ from scipy.spatial.distance import euclidean
6
+ from sklearn.metrics.pairwise import cosine_similarity
7
+ from PIL import Image
8
+ from keras.applications.efficientnet import EfficientNetB0
9
+ # Load VGG16 model + higher level layers
10
+ base_model = VGG16(weights='imagenet')
11
+ model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
12
+ # Load EfficientNetB0 model + higher level layers
13
+ # base_model = EfficientNetB0(weights='imagenet')
14
+ # model = Model(inputs=base_model.input, outputs=base_model.get_layer('top_activation').output)
15
+
16
+ def extract_features_cp(pil_img: Image.Image) -> np.ndarray:
17
+ # Resize the image to the target size
18
+ pil_img = pil_img.resize((224, 224)) # (224, 224)
19
+
20
+ # Convert the PIL image to a numpy array
21
+ img_data = image.img_to_array(pil_img)
22
+
23
+ # Expand dimensions to match the input shape required by the model
24
+ img_data = np.expand_dims(img_data, axis=0)
25
+
26
+ # Preprocess the image data
27
+ img_data = preprocess_input(img_data)
28
+
29
+ # Predict the features using the model
30
+ features = model.predict(img_data)
31
+
32
+ # Return the features as a flattened array
33
+ return features.flatten()
34
+
35
+ def extract_features(img_path):
36
+ img = image.load_img(img_path, target_size=(224, 224)) # (224, 224)
37
+ img_data = image.img_to_array(img)
38
+ img_data = np.expand_dims(img_data, axis=0)
39
+ img_data = preprocess_input(img_data)
40
+ features = model.predict(img_data)
41
+ return features.flatten() # Flatten the features to a 1-D vector
42
+
43
+ def compare_features(features1, features2):
44
+ # Euclidean distance
45
+ euclidean_dist = euclidean(features1, features2)
46
+
47
+ # Cosine similarity
48
+ cos_sim = cosine_similarity([features1], [features2])[0][0]
49
+
50
+ return euclidean_dist, cos_sim
51
+
52
+ def predict_similarity(features1, features2, threshold=0.5):
53
+ _, cos_sim = compare_features(features1, features2)
54
+ similarity_score = cos_sim
55
+ # print(similarity_score)
56
+
57
+ if similarity_score > threshold:
58
+ return True
59
+ else:
60
+ return False
61
+
62
+
63
+
64
+ if __name__ == '__main__':
65
+ # Example usage
66
+ img_path1 = "D:/Downloads/image/rose.jpg"
67
+ img_path2 = "D:/Downloads/image/rose.jpg"
68
+
69
+ # Extract features
70
+ features1 = extract_features(img_path1)
71
+ features2 = extract_features(img_path2)
72
+
73
+ # Compare features
74
+ euclidean_dist, cos_sim = compare_features(features1, features2)
75
+ print(f'Euclidean Distance: {euclidean_dist}')
76
+ print(f'Cosine Similarity: {cos_sim}')
77
+
78
+ # Predict similarity
79
+ is_similar = predict_similarity(features1, features2, threshold=0.8)
80
+ print(f'Are the images similar? {"Yes" if is_similar else "No"}')
predict_copy.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
2
+ from tensorflow.keras.preprocessing import image
3
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
4
+ from tensorflow.keras.models import Model
5
+ import numpy as np
6
+ from scipy.spatial.distance import euclidean
7
+ from sklearn.metrics.pairwise import cosine_similarity
8
+ # Load VGG16 model + higher level layers
9
+ base_model = VGG16(weights='imagenet')
10
+ model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
11
+
12
+ # Define data augmentation
13
+ datagen = ImageDataGenerator(
14
+ rotation_range=20,
15
+ width_shift_range=0.2,
16
+ height_shift_range=0.2,
17
+ shear_range=0.2,
18
+ zoom_range=0.2,
19
+ horizontal_flip=True,
20
+ fill_mode='nearest'
21
+ )
22
+
23
+ def extract_features(img):
24
+ img = img.resize((224, 224)) # Ensure the image is resized to the input size expected by VGG16
25
+ img_data = image.img_to_array(img)
26
+ img_data = np.expand_dims(img_data, axis=0)
27
+ img_data = preprocess_input(img_data)
28
+ features = model.predict(img_data)
29
+ return features.flatten() # Flatten the features to a 1-D vector
30
+
31
+ def augment_image(img):
32
+ x = image.img_to_array(img)
33
+ x = x.reshape((1,) + x.shape) # Reshape to (1, height, width, channels)
34
+
35
+ # Generate batches of augmented images
36
+ augmented_images = []
37
+ for batch in datagen.flow(x, batch_size=1):
38
+ augmented_images.append(image.array_to_img(batch[0]))
39
+ if len(augmented_images) >= 5: # Generate 5 augmented images
40
+ break
41
+ return augmented_images
42
+
43
+ def extract_features_with_augmentation(img_path):
44
+ original_img = image.load_img(img_path)
45
+ augmented_images = augment_image(original_img)
46
+
47
+ # Extract features from the original image
48
+ features = [extract_features(original_img)]
49
+
50
+ # Extract features from augmented images
51
+ for aug_img in augmented_images:
52
+ features.append(extract_features(aug_img))
53
+
54
+ return np.mean(features, axis=0) # Return the average feature vector
55
+
56
+
57
+ def extract_features_with_augmentation_cp(img_path):
58
+ pil_img = pil_img.resize((224, 224)) # (224, 224)
59
+
60
+ # Convert the PIL image to a numpy array
61
+
62
+ augmented_images = augment_image(pil_img)
63
+
64
+ # Extract features from the original image
65
+ features = [extract_features(augmented_images)]
66
+
67
+ # Extract features from augmented images
68
+ for aug_img in augmented_images:
69
+ features.append(extract_features(aug_img))
70
+
71
+ return np.mean(features, axis=0) # Return the average feature vector
72
+
73
+
74
+
75
+ def compare_features(features1, features2):
76
+ # Euclidean distance
77
+ euclidean_dist = euclidean(features1, features2)
78
+
79
+ # Cosine similarity
80
+ cos_sim = cosine_similarity([features1], [features2])[0][0]
81
+
82
+ return euclidean_dist, cos_sim
83
+
84
+ def predict_similarity(features1, features2, threshold=0.5):
85
+ _, cos_sim = compare_features(features1, features2)
86
+ similarity_score = cos_sim
87
+ # print(similarity_score)
88
+
89
+ if similarity_score > threshold:
90
+ return True
91
+ else:
92
+ return False
93
+
94
+ if __name__ == '__main__':
95
+ # Example usage
96
+ img_path1 = "D:/Downloads/image/rose.jpg"
97
+ img_path2 = "D:/Downloads/image/rose3.jpg"
98
+
99
+ # Extract features
100
+ features1 = extract_features_with_augmentation(img_path1)
101
+ features2 = extract_features_with_augmentation(img_path2)
102
+
103
+ # Compare features
104
+ euclidean_dist, cos_sim = compare_features(features1, features2)
105
+ print(f'Euclidean Distance: {euclidean_dist}')
106
+ print(f'Cosine Similarity: {cos_sim}')
107
+
108
+ # Predict similarity
109
+ is_similar = predict_similarity(features1, features2, threshold=0.8)
110
+ print(f'Are the images similar? {"Yes" if is_similar else "No"}')
predict_vit.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import clip
3
+ from PIL import Image
4
+ import numpy as np
5
+ from sklearn.metrics.pairwise import cosine_similarity
6
+
7
+ # Load the CLIP model
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+ model, preprocess = clip.load("ViT-B/32", device=device)
10
+
11
+ def extract_features_cp(pil_img: Image.Image) -> np.ndarray:
12
+ # Preprocess the PIL image using CLIP's preprocess function
13
+ img = preprocess(pil_img).unsqueeze(0).to(device)
14
+
15
+ # Extract features using CLIP
16
+ with torch.no_grad():
17
+ features = model.encode_image(img)
18
+
19
+ # Normalize the features
20
+ features = features / features.norm(dim=-1, keepdim=True)
21
+
22
+ # Convert to numpy array and return as a flattened array
23
+ return features.cpu().numpy().flatten()
24
+
25
+ def extract_features(img_path):
26
+ # Load and preprocess the image
27
+ img = preprocess(Image.open(img_path)).unsqueeze(0).to(device)
28
+
29
+ # Extract features using CLIP
30
+ with torch.no_grad():
31
+ features = model.encode_image(img)
32
+
33
+ # Normalize the features
34
+ features = features / features.norm(dim=-1, keepdim=True)
35
+
36
+ # Convert to numpy array
37
+ return features.cpu().numpy().flatten()
38
+
39
+ def compare_features(features1, features2):
40
+ # Cosine similarity
41
+ cos_sim = cosine_similarity([features1], [features2])[0][0]
42
+
43
+ return cos_sim
44
+
45
+ def predict_similarity(features1, features2, threshold=0.5):
46
+ cos_sim = compare_features(features1, features2)
47
+ similarity_score = cos_sim
48
+
49
+ return similarity_score > threshold
50
+
51
+ if __name__ == '__main__':
52
+ # Example usage
53
+ img_path1 = 'result.jpg'
54
+ img_path2 = 'Vochysia.jpg'
55
+
56
+ # Extract features
57
+ features1 = extract_features(img_path1)
58
+ features2 = extract_features(img_path2)
59
+
60
+ # Compare features
61
+ cos_sim = compare_features(features1, features2)
62
+ print(f'Cosine Similarity: {cos_sim}')
63
+
64
+ # Predict similarity
65
+ is_similar = predict_similarity(features1, features2, threshold=0.8)
66
+ print(f'Are the images similar? {"Yes" if is_similar else "No"}')
rag.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.document_loaders import Docx2txtLoader
2
+ from langchain_community.document_transformers import DoctranTextTranslator
3
+ from langchain_core.documents import Document
4
+ from dotenv import load_dotenv
5
+ from langchain_community.retrievers import BM25Retriever
6
+ import openai
7
+
8
+ # Set your OpenAI API key
9
+ # openai.api_key = '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'
10
+
11
+ from openai import OpenAI
12
+ from os import getenv
13
+ from langchain_core.documents import Document
14
+ from langchain_text_splitters import CharacterTextSplitter
15
+
16
+ def setup_retriever():
17
+ knowledge_file = "E:/Backup_K20pro/Download/treesat_benchmark/Canopy species list and uses (PT)_pt_en.docx"
18
+ loader = Docx2txtLoader(knowledge_file)
19
+ data = loader.load()
20
+ text_splitter = CharacterTextSplitter(
21
+ separator="-",
22
+ chunk_size=600,
23
+ chunk_overlap=0,
24
+ length_function=len,
25
+ is_separator_regex=False,
26
+ )
27
+ chunks = text_splitter.split_documents(data)
28
+ # print(chunks)
29
+ retriever = BM25Retriever.from_documents(chunks)
30
+ retriever.k = 1
31
+ return retriever
32
+
33
+ retriever = setup_retriever()
34
+ species = 'Byrsonima'
35
+ species_info = retriever.invoke(f"Scientific name:{species}")
36
+ print(species_info)
37
+ # qa_translator = DoctranTextTranslator(language="english")
38
+ # translated_document = qa_translator.transform_documents(data)
39
+ # print(translated_document)
40
+
41
+
42
+ info = 'Scientific name:Licaniasp.\n\nFamily:Chrysobalanaceae\n\nPopular name:They are generally known as caripé or macucu, among other generic names.\n\nHabitat:LicaniaIt is a large genus, with dozens of species distributed in all Amazon habitats, species that are difficult to identify without fertile material.\n\nUses:Some species can be edible, others known as caripé had (have) their bark roasted (presence of silica), macerated and together with clay used in the preparation of ceramics by indigenous populations.'
43
+ # gets API Key from environment variable OPENAI_API_KEY
44
+ def setup_client():
45
+ client = OpenAI(
46
+ base_url="https://openrouter.ai/api/v1",
47
+ api_key=getenv("OPENROUTER_API_KEY"),
48
+ )
49
+
50
+ return client
51
+
52
+ client = setup_client()
53
+
54
+ def generate_image(species_info, client):
55
+ completion = client.chat.completions.create(
56
+ model="openai/gpt-3.5-turbo",
57
+ messages=[
58
+ {
59
+ "role": "user",
60
+
61
+ "content": f"Using the trees species information provided below, Using the information in the 'Uses:' section. Generate 1 useful and informative unicode image to be used to be placed on a drone panoramic image. Tree species info:{species_info}",
62
+ },
63
+ ],
64
+ )
65
+ return completion.choices[0].message.content
66
+
67
+
68
+ if __name__ == '__main__':
69
+ ans = generate_image(species_info, client)
70
+ print(ans)
71
+ # # Define the prompt and parameters for the request
72
+ # prompt = "Once upon a time"
73
+ # response = openai.Completion.create(
74
+ # engine="gpt-3.5-turbo", # Use the GPT-3.5 model
75
+ # prompt=prompt,
76
+ # max_tokens=50, # Adjust the number of tokens based on your requirement
77
+ # n=1,
78
+ # stop=None,
79
+ # temperature=0.7,
80
+ # )
81
+
82
+ # # Print the generated text
83
+ # generated_text = response.choices[0].text.strip()
84
+ # print(generated_text)
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ torchaudio
4
+ geopandas
5
+ rasterio
6
+ fiona
7
+ shapely
8
+ opencv-python
9
+ gradio
10
+ deepforest
11
+ matplotlib
12
+ keras
13
+ tensorflow
14
+ openpyxl
15
+
16
+
setup.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # setup.sh
2
+ sudo apt update
3
+ sudo apt install -y libgdal-dev
4
+ sudo apt install -y gdal-bin
5
+ export CPLUS_INCLUDE_PATH=/usr/include/gdal
6
+ export C_INCLUDE_PATH=/usr/include/gdal
7
+ pip install -r requirements.txt
8
+ pip install GDAL
9
+ pip install git+https://github.com/PatBall1/detectree2.git
10
+ pip install git+https://github.com/openai/CLIP.git
11
+ pip install git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13
12
+ pip install detectron2==0.6 -f \ https://dl.fbaipublicfiles.com/detectron2/wheels/cu113/torch1.10/index.html