hprasath commited on
Commit
bbcc5b2
·
verified ·
1 Parent(s): 64c6ed3

Upload 9 files

Browse files
utils/ImageAndTextEmbedding/index.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import io
3
+ from transformers import AutoTokenizer, CLIPProcessor, CLIPModel
4
+ import torch
5
+
6
+ # Load CLIP model and processor
7
+ model_name = "openai/clip-vit-base-patch32"
8
+ loaded_model = CLIPModel.from_pretrained(model_name)
9
+ loaded_processor = CLIPProcessor.from_pretrained(model_name)
10
+
11
+ def getTextEmbedding(text):
12
+ # Preprocess the text
13
+ print("tear")
14
+ inputs_text = loaded_processor(text=[text], return_tensors="pt", padding=True)
15
+ print("here")
16
+ # Forward pass through the model
17
+ with torch.no_grad():
18
+ # Get the text features
19
+ text_features = loaded_model.get_text_features(input_ids=inputs_text.input_ids, attention_mask=inputs_text.attention_mask)
20
+ print("bear")
21
+ # Convert tensor to numpy array for better readability
22
+ text_embedding = text_features.squeeze().numpy()
23
+ print("done")
24
+ return text_embedding
25
+
26
+ def getImageEmbedding(binary_image_data):
27
+ # Load and preprocess the image
28
+ image = Image.open(io.BytesIO(binary_image_data))
29
+ inputs = loaded_processor(images=image, return_tensors="pt", padding=True)
30
+
31
+ # Forward pass through the model
32
+ with torch.no_grad():
33
+ # Get the image features
34
+ image_features = loaded_model.get_image_features(pixel_values=inputs.pixel_values)
35
+
36
+ # Convert tensor to numpy array for better readability
37
+ image_embedding = image_features.squeeze().numpy()
38
+
39
+ return image_embedding
40
+
utils/audioEmbedding/index.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import numpy as np
3
+ import librosa
4
+ import pickle
5
+ import io
6
+
7
+ # Load the YAMNet model from the SavedModel format
8
+ yamnet_model = tf.saved_model.load('yamnet_saved_model')
9
+
10
+ # Function to extract embeddings from audio file using YAMNet
11
+ def extract_audio_embeddings(audio_binary):
12
+ # Load audio from binary data using librosa
13
+ audio, sample_rate = librosa.load(io.BytesIO(audio_binary), sr=16000) # YAMNet requires a sample rate of 16kHz
14
+ # Convert audio to float32 tensor
15
+ audio_tensor = tf.convert_to_tensor(audio, dtype=tf.float32)
16
+ # Extract embeddings using YAMNet model
17
+ scores, embeddings, spectrogram = yamnet_model(audio_tensor)
18
+ embeddings_list = embeddings.numpy().tolist() # Convert embeddings to a list of lists
19
+ return embeddings_list
20
+
21
+ # Example usage
22
+ if __name__ == "__main__":
23
+ image_audio_path = "pictures/users/1a.mp3"
24
+ # Extract embeddings from image audio file
25
+ image_audio_embeddings = extract_audio_embeddings(image_audio_path)
26
+ print("Embeddings for", image_audio_path)
27
+ print(image_audio_embeddings)
28
+ print("audio embedding model loaded succesfully")
utils/imageEmbedding/index.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ from torchvision import transforms
3
+ from PIL import Image
4
+ import torch
5
+ import io
6
+ from utils.ImageAndTextEmbedding.index import getImageEmbedding
7
+
8
+ def get_image_embedding(image_bytes):
9
+ print("comming 1")
10
+ return getImageEmbedding(image_bytes)
11
+
12
+ # Example: Load image data from file and get its embedding
13
+ # image_data = open("pictures/users/2.jpg", "rb").read()
14
+ # embedding = get_image_embedding(image_data)
15
+ # print(embedding)
16
+
17
+ print("Image embedding model loaded successfully!")
utils/imageToText/index.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import re
3
+ from PIL import Image
4
+ from transformers import pipeline
5
+ import io
6
+
7
+ def clean_text(text):
8
+ clean_text = re.sub(r'<[^>]+>', '', text)
9
+ clean_text = clean_text.strip()
10
+ clean_text = re.sub(r'\s+', ' ', clean_text)
11
+ return clean_text
12
+
13
+ pipe = pipeline("image-to-text", model="jinhybr/OCR-Donut-CORD")
14
+
15
+ def extract_text(binary_image):
16
+ image = Image.open(io.BytesIO(binary_image))
17
+ result = pipe(image)
18
+ text = result[0]['generated_text']
19
+ cleaned_text = clean_text(text)
20
+ return cleaned_text
21
+
22
+ # print(extract_text(open("pictures/users/2.jpg", "rb").read()))
23
+
24
+ print("OCR pipeline loaded successfully!")
utils/objectDetection/index.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ from PIL import Image
3
+ from io import BytesIO
4
+
5
+ # Load the object detection pipeline
6
+ object_detection_pipeline = pipeline("object-detection", model="ciasimbaya/ObjectDetection")
7
+ def detect_objects(image_bytes):
8
+ image = Image.open(BytesIO(image_bytes))
9
+ result = object_detection_pipeline(image)
10
+ return result
11
+
12
+ print("object detection model loaded succesfully")
utils/sample.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ # Define the image URL
4
+ image_url = "https://utfs.io/f/47589c6c-6ce0-4baf-b75d-b1ec5d4d9dda-213j1w.jpg"
5
+ audio_url = "https://utfs.io/f/b84a84a2-b68f-49c5-8b7c-d76d894f6d3a-c5qjj4.wav"
6
+ video_url = "https://utfs.io/f/ef6c037f-fa61-471a-8956-562bc2d62531-fzxs1i.mp4"
7
+ family_url = "https://i.pinimg.com/originals/b2/20/14/b22014ca275e94097386aab222469caf.jpg"
8
+
9
+
10
+ # Define the URLs of the three nodes
11
+ extract_text_url = "http://127.0.0.1:5000/extractText"
12
+ extract_audio_text_url = "http://127.0.0.1:5000/extractAudioText"
13
+ get_image_embedding_url = "http://127.0.0.1:5000/getImageEmbedding"
14
+ get_text_embedding_url = "http://127.0.0.1:5000/getTextEmbedding"
15
+ get_text_description_embedding_url = "http://127.0.0.1:5000/getTextDescriptionEmbedding"
16
+ get_audio_embedding_url = "http://127.0.0.1:5000/getAudioEmbedding"
17
+ get_audio_extracted_text_url = "http://127.0.0.1:5000/getAudioExtractedText"
18
+ get_video_embedding_url = "http://127.0.0.1:5000/getVideoEmbedding"
19
+ get_object_detection_url = "http://127.0.0.1:5000/detectObjects"
20
+ get_similarity_score_url = "http://127.0.0.1:5000/getSimilarityScore"
21
+ get_face_locations_url = "http://127.0.0.1:5000/getFaceLocations"
22
+
23
+ # Make requests to each node with the image URL
24
+ try:
25
+ list=[]
26
+
27
+ response_text = requests.post(extract_audio_text_url, json={"audio_url": audio_url})
28
+ extracted_text = response_text.json()["transcription"]
29
+ list.append({"length of text":len(extracted_text)})
30
+
31
+ # # Request to extract text
32
+ # response_text = requests.post(extract_text_url, json={"imageUrl": image_url})
33
+ # extracted_text = response_text.json().get("extracted_text")
34
+ # list.append({"length of text":len(extracted_text)})
35
+
36
+ # # Request to get image embedding
37
+ # response_image_embedding = requests.post(get_image_embedding_url, json={"imageUrl": image_url})
38
+ # image_embedding = response_image_embedding.json().get("image_embedding")
39
+ # list.append({"length of image_embedding":len(image_embedding)})
40
+
41
+ # # Request to get text embedding
42
+ # response_text_embedding = requests.post(get_text_embedding_url, json={"text": extracted_text})
43
+ # text_embedding = response_text_embedding.json().get("text_embedding")
44
+ # list.append({"length of text_embedding":len(text_embedding)})
45
+
46
+ # # Request to get text description embedding
47
+ # response_text_description_embedding = requests.post(get_text_description_embedding_url, json={"text": "a image of mobile phone"})
48
+ # text_description_embedding = response_text_description_embedding.json().get("text_description_embedding")
49
+ # list.append({"length of text_description_embedding":len(text_description_embedding)})
50
+
51
+ # # Request to get audio embedding
52
+ # response_audio_embedding = requests.post(get_audio_embedding_url, json={"audioUrl": audio_url})
53
+ # audio_embedding = response_audio_embedding.json().get("audio_embedding")
54
+ # list.append({"length of audio_embedding":len(audio_embedding)})
55
+
56
+ # Request to get video embedding
57
+ response_video_embedding = requests.post(get_video_embedding_url, json={"videoUrl": video_url})
58
+ video_embedding = response_video_embedding.json().get("video_embedding")
59
+ list.append({"length of video_embedding":(video_embedding)})
60
+
61
+ # # Request to get object detection
62
+ # response_object_detection = requests.post(get_object_detection_url, json={"imageUrl": image_url})
63
+ # object_detection = response_object_detection.json().get("object_detection_results")
64
+ # list.append({"length of object_detection":len(object_detection)})
65
+
66
+ # # Request to get similarity score
67
+ # response_similarity_score = requests.post(get_similarity_score_url, json={"embedding1": text_description_embedding, "embedding2": image_embedding})
68
+ # similarity_score = response_similarity_score.json().get("similarity_score")
69
+ # list.append({"similarity_score":similarity_score})
70
+
71
+ # # Request to get face locations
72
+ # response_face_locations = requests.post(get_face_locations_url, json={"imageUrl": family_url})
73
+ # face_locations = response_face_locations.json().get("face_locations")
74
+ # list.append({"face_locations":face_locations})
75
+ print(list)
76
+ except Exception as e:
77
+ print("Error:", e)
utils/sentanceEmbedding/index.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ from utils.ImageAndTextEmbedding.index import getTextEmbedding
3
+
4
+ with open("word2vec_model.pkl", "rb") as f:
5
+ textEmbedding_model = pickle.load(f)
6
+
7
+ def get_text_vector(example_text):
8
+ # Tokenize the text into words
9
+ words = example_text.lower().split()
10
+
11
+ # Filter out words that are not in the vocabulary of the Word2Vec model
12
+ words_in_vocab = [word for word in words if word in textEmbedding_model]
13
+
14
+ # Calculate the average vector representation of the words
15
+ if words_in_vocab:
16
+ text_vector = sum(textEmbedding_model[word] for word in words_in_vocab) / len(words_in_vocab)
17
+ return text_vector.tolist()
18
+ else:
19
+ return None
20
+
21
+ def get_text_discription_vector(text):
22
+ return getTextEmbedding(text)
23
+
24
+ # Example usage:
25
+ # example_text = "This is an example sentence."
26
+ # text_vector = get_text_vector(example_text)
27
+ # if text_vector:
28
+ # print("Vector representation of the example text:", text_vector)
29
+ # else:
30
+ # print("None of the words in the example text are in the vocabulary of the Word2Vec model.")
31
+
32
+ print("Text embedding model loaded successfully!")
utils/similarityScore.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ def euclidean_similarity(embedding1, embedding2):
4
+ embedding1 = np.array(embedding1)
5
+ embedding2 = np.array(embedding2)
6
+ euclidean_distance = np.linalg.norm(embedding1 - embedding2)
7
+ # Convert distance to similarity score
8
+ similarity_score = 1 / (1 + euclidean_distance) # You can use other transformations as well
9
+ return similarity_score
10
+
11
+ def cosine_similarity(embedding1, embedding2):
12
+ dot_product = np.dot(embedding1, embedding2)
13
+ norm1 = np.linalg.norm(embedding1)
14
+ norm2 = np.linalg.norm(embedding2)
15
+ cosine_similarity = dot_product / (norm1 * norm2)
16
+ return cosine_similarity
17
+
18
+ def jaccard_similarity(embedding1, embedding2):
19
+ intersection = len(set(embedding1).intersection(set(embedding2)))
20
+ union = len(set(embedding1).union(set(embedding2)))
21
+ return intersection / union
22
+
23
+ def hamming_similarity(embedding1, embedding2):
24
+ distance = np.count_nonzero(embedding1 != embedding2)
25
+ similarity = 1 - distance / len(embedding1)
26
+ return similarity
27
+
28
+ def get_all_similarities(embedding1, embedding2):
29
+ euclidean = euclidean_similarity(embedding1, embedding2)
30
+ cosine = cosine_similarity(embedding1, embedding2)
31
+ jaccard = jaccard_similarity(embedding1, embedding2)
32
+ hamming = hamming_similarity(embedding1, embedding2)
33
+ return {"euclidean": euclidean, "cosine": cosine, "jaccard": jaccard, "hamming": hamming}
34
+
35
+ # Example usage:
36
+ # embedding1 = [1, 2, 3]
37
+ # embedding2 = [4, 5, 6]
38
+ # similarities = get_all_similarities(embedding1, embedding2)
39
+ # print(similarities)
40
+
41
+ print("Similarity score is working")
utils/videoEmbedding/index.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from utils.imageEmbedding.index import get_image_embedding
4
+ from utils.imageToText.index import extract_text
5
+
6
+ def get_video_embedding(video_url):
7
+ try:
8
+ cap = cv2.VideoCapture(video_url)
9
+ fps = cap.get(cv2.CAP_PROP_FPS)
10
+ interval = int(fps) # Capture a frame every second
11
+
12
+ frame_count = 0
13
+ video_embeddings = []
14
+
15
+ while(cap.isOpened()):
16
+ ret, frame = cap.read()
17
+ if ret:
18
+ if frame_count % interval == 0:
19
+ # Convert frame to binary format
20
+ ret, buffer = cv2.imencode('.jpg', frame)
21
+ if not ret:
22
+ continue
23
+ # Convert frame binary data to bytes
24
+ frame_bytes = buffer.tobytes()
25
+ # Get image embedding for the frame
26
+ extracted_text = extract_text(frame_bytes)
27
+ image_embedding = get_image_embedding(frame_bytes)
28
+ image_embedding_list = image_embedding.tolist()
29
+ video_embeddings.append({"image_embedding": image_embedding_list ,"extracted_text":extracted_text})
30
+ frame_count += 1
31
+ else:
32
+ break
33
+
34
+ cap.release()
35
+ return video_embeddings
36
+
37
+ except Exception as e:
38
+ print(e)
39
+
40
+ # Example usage:
41
+ # video_url = "https://utfs.io/f/ef6c037f-fa61-471a-8956-562bc2d62531-fzxs1i.mp4"
42
+ # video_embeddings = get_video_embedding(video_url)
43
+ # print("Video Embeddings:", video_embeddings)