ff98 commited on
Commit
c69a273
·
1 Parent(s): 361a69f

files added

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ CNN_model_weight
2
+ EfficientNet_Models
Models/Final_CNN_Model.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Models/Final_EfficientNet_Code.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.14","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"none","dataSources":[{"sourceId":9715840,"sourceType":"datasetVersion","datasetId":5943727},{"sourceId":9739406,"sourceType":"datasetVersion","datasetId":5915042}],"dockerImageVersionId":30786,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"import numpy as np\nimport os\nimport hashlib\nimport tensorflow as tf\nfrom PIL import Image\nfrom tensorflow.keras import layers, models\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array # Correct import\nfrom tensorflow.keras.preprocessing import image\nimport numpy as np\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.applications import EfficientNetB0\nfrom tensorflow.keras.layers import Dropout, Dense, GlobalAveragePooling2D\nfrom tensorflow.keras.models import Model\nfrom sklearn.metrics import confusion_matrix, classification_report\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\nfrom os import listdir","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# Check for TPU\ntry:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver()\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\nexcept ValueError:\n strategy = tf.distribute.get_strategy() # Default to CPU/GPU strategy if TPU is not found\n","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\ndef augment_image(image, label):\n image = tf.image.random_flip_left_right(image) # Randomly flip images\n image = tf.image.random_brightness(image, max_delta=0.1) # Random brightness\n return image, label\n\ntrain_data = tf.keras.utils.image_dataset_from_directory(\n '/kaggle/input/ai-vs-real-dataset/_DATASET/dataset/train',\n image_size=(224, 224),\n batch_size=128, # Adjust based on your TPU memory\n shuffle=True\n)\n \ntrain_data = train_data.map(augment_image, num_parallel_calls=tf.data.AUTOTUNE)\n # Prefetch to improve performance\ntrain_data = train_data.prefetch(tf.data.AUTOTUNE)","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\nwith strategy.scope():\n base_model = tf.keras.applications.EfficientNetB0(weights='/kaggle/input/efficientnetb0-weights/efficientnetb0_notop.h5', include_top=False, input_shape=(224, 224, 3))\n \n # Add custom classification layers\n x = base_model.output\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dense(128, activation='relu')(x)\n x = tf.keras.layers.Dropout(0.5)(x)\n x = tf.keras.layers.Dense(1, activation='sigmoid')(x)\n\n # Create the final model\n model = tf.keras.Model(inputs=base_model.input, outputs=x)\n\n # Compile the model\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n# Summary of the model\nmodel.summary()\n","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"with strategy.scope():\n model.fit(train_data, epochs=20)\n model.save('/kaggle/working/EfficientNet_pre_model.h5')","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# # Test images here\n\n# Load the test dataset\ntest_data = tf.keras.utils.image_dataset_from_directory(\n '/kaggle/input/ai-vs-real-dataset/_DATASET/dataset/test', # Your test directory\n image_size=(224, 224),\n batch_size=64,\n shuffle=True # Important to keep the order for predictions\n)\n\n\n# Extract true labels from the dataset\ny_true = np.concatenate([y.numpy() for x, y in test_data], axis=0)\nclass_names = ['FAKE', 'REAL']\n\n# Make predictions\nwith strategy.scope():\n predictions = model.predict(test_data)\n\n# Convert predictions to binary labels\ny_pred = (predictions > 0.5).astype(\"int32\").flatten() # Flatten to match shape of y_true\n\n# Create confusion matrix\ncm = confusion_matrix(y_true, y_pred)\n\nprint(\"Confusion Matrix:\")\nprint(cm)\n\nprint(\"Classification Report:\")\nprint(classification_report(y_true, y_pred, target_names=class_names))\n\n# Calculate metrics\naccuracy = accuracy_score(y_true, y_pred)\nprecision = precision_score(y_true, y_pred)\nrecall = recall_score(y_true, y_pred)\nf1 = f1_score(y_true, y_pred)\n\nprint(f\"Accuracy: {accuracy}\")\nprint(f\"Precision: {precision}\")\nprint(f\"Recall: {recall}\")\nprint(f\"F1 Score: {f1}\")\n\n# Plot confusion matrix for better visualization\nplt.figure(figsize=(6, 6))\nsns.heatmap(cm, annot=True, fmt=\"d\", cmap=\"Blues\", xticklabels=class_names, yticklabels=class_names)\nplt.title('Confusion Matrix')\nplt.ylabel('True Label')\nplt.xlabel('Predicted Label')\nplt.show()","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# Unfreeze top layers for fine-tuning\nwith strategy.scope():\n base_model.trainable = True\n\n# Fine-tune from this layer onwards (adjust as needed)\n for layer in base_model.layers[:150]:\n layer.trainable = False\n\n# Re-compile the model with a lower learning rate\n model.compile(optimizer=tf.keras.optimizers.Adam(1e-5), \n loss='binary_crossentropy', \n metrics=['accuracy'])\n\n# Train again\n\n history_fine = model.fit(\n train_data,\n epochs=10 # Fine-tuning for a few additional epochs\n )\n model.save('/kaggle/working/EfficientNet_fine_tune_model.h5')\n\n","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\ntest_data = tf.keras.utils.image_dataset_from_directory(\n '/kaggle/input/ai-vs-real-dataset/_DATASET/dataset/test', \n image_size=(224, 224),\n batch_size=64,\n shuffle=True \n)\n\n# Extract true labels from the dataset\ny_true = np.concatenate([y.numpy() for x, y in test_data], axis=0)\nclass_names = ['FAKE', 'REAL']\n\n# Make predictions\nwith strategy.scope():\n predictions = model.predict(test_data)\n\n# Convert predictions to binary labels\ny_pred = (predictions > 0.5).astype(\"int32\").flatten() # Flatten to match shape of y_true\n\n# Create confusion matrix\ncm = confusion_matrix(y_true, y_pred)\n\nprint(\"Confusion Matrix:\")\nprint(cm)\n\nprint(\"Classification Report:\")\nprint(classification_report(y_true, y_pred, target_names=class_names))\n\n# Calculate metrics\naccuracy = accuracy_score(y_true, y_pred)\nprecision = precision_score(y_true, y_pred)\nrecall = recall_score(y_true, y_pred)\nf1 = f1_score(y_true, y_pred)\n\nprint(f\"Accuracy: {accuracy}\")\nprint(f\"Precision: {precision}\")\nprint(f\"Recall: {recall}\")\nprint(f\"F1 Score: {f1}\")\n\n# Plot confusion matrix for better visualization\nplt.figure(figsize=(6, 6))\nsns.heatmap(cm, annot=True, fmt=\"d\", cmap=\"Blues\", xticklabels=class_names, yticklabels=class_names)\nplt.title('Confusion Matrix')\nplt.ylabel('True Label')\nplt.xlabel('Predicted Label')\nplt.show()","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"model = tf.keras.models.load_model('/kaggle/working/EfficientNet_fine_tune_model.h5')\naccuracy = model.evaluate(test_data)\nprint(f\"Test Accuracy: {accuracy[1]}\")\n","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"#for deployment\ndef predictPicture(imagePath):\n # model=tf.keras.models.load_model('//kaggle/input/efficientnetmodel-5/EfficientNet_model_5/saved_model.pb')\n if imagePath.endswith((\".png\", \".jpg\", \".jpeg\")):\n # Load and preprocess the image\n img = load_img(imagePath, target_size=(224, 224)) # Resize image to model input size\n img_arr = img_to_array(img) # Convert to array\n img_arr = np.expand_dims(img_arr, axis=0) # Add batch dimension\n \n with strategy.scope():\n prediction = model.predict(img_arr)\n \n # Output the predicted probability\n print(prediction[0])\n \n # Output the label based on probability\n if prediction[0] > 0.5:\n print(\"Prediction: Real\")\n else:\n print(\"Prediction: Fake\")\n else:\n print(\"Please provide a valid image file (PNG or JPG format).\")\n# get the path or directory\nfake_test = \"/kaggle/input/ai-vs-real-dataset/_DATASET/dataset/test/FAKE\"\ni=0\nfor images in os.listdir(fake_test):\n file_path=fake_test+\"/\"+images\n if i%200==0:\n predictPicture(file_path)\n i+=1\ni=0\nprint(\"CHANGE\")\nreal_test = \"/kaggle/input/ai-vs-real-dataset/_DATASET/dataset/test/REAL\"\nfor images in os.listdir(real_test):\n file_path=real_test+\"/\"+images\n if i%200==0:\n predictPicture(file_path) \n i+=1","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}
app.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # imports
2
+ import streamlit as st
3
+ import tensorflow as tf
4
+ from tensorflow.keras.models import Sequential
5
+ from tensorflow.keras.layers import Conv2D, BatchNormalization, MaxPooling2D, Flatten, Dense, Dropout
6
+ from tensorflow.keras.preprocessing.image import load_img, img_to_array
7
+ import numpy as np
8
+
9
+
10
+
11
+
12
+
13
+ # load css
14
+ def load_local_css(file_name):
15
+ with open(file_name) as f:
16
+ st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
17
+
18
+ load_local_css("./styles/style.css")
19
+
20
+
21
+ # bootstrap
22
+ st.markdown(
23
+ """<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-QWTKZyjpPEjISv5WaRU9OFeRpok6YctnYmDr5pNlyT2bRjXh0JMhjY6hW+ALEwIH" crossorigin="anonymous">""",
24
+ unsafe_allow_html=True
25
+ )
26
+
27
+
28
+ eff_net_model = tf.keras.models.load_model('EfficientNet_Models/efficientnetb3_binary_classifier_8.h5')
29
+ eff_net_art_model = tf.keras.models.load_model('EfficientNet_Models/EfficientNet_fine_tune_art_model.h5')
30
+ cnn_model = 'CNN_model_weight/model_weights.weights.h5'
31
+
32
+ # CNN model
33
+ def run_cnn(img_arr):
34
+ my_model = Sequential()
35
+ my_model.add(Conv2D(
36
+ filters=16,
37
+ kernel_size=(3, 3),
38
+ strides=(1, 1),
39
+ activation='relu',
40
+ input_shape=(256, 256, 3)
41
+ ))
42
+ my_model.add(BatchNormalization())
43
+ my_model.add(MaxPooling2D())
44
+
45
+ my_model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
46
+ my_model.add(BatchNormalization())
47
+ my_model.add(MaxPooling2D())
48
+
49
+ my_model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
50
+ my_model.add(BatchNormalization())
51
+ my_model.add(MaxPooling2D())
52
+
53
+ my_model.add(Flatten())
54
+ my_model.add(Dense(512, activation='relu'))
55
+ my_model.add(Dropout(0.09))
56
+ my_model.add(Dense(1, activation='sigmoid'))
57
+ my_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
58
+
59
+
60
+ # Load the pre-trained weights
61
+ my_model.load_weights(cnn_model)
62
+
63
+ prediction = my_model.predict(img_arr)
64
+ return prediction
65
+
66
+ def run_effNet(img_arr):
67
+ try:
68
+ resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
69
+ tf.config.experimental_connect_to_cluster(resolver)
70
+ tf.tpu.experimental.initialize_tpu_system(resolver)
71
+ strategy = tf.distribute.TPUStrategy(resolver)
72
+ except ValueError:
73
+ strategy = tf.distribute.get_strategy()
74
+ with strategy.scope():
75
+ prediction = eff_net_model.predict(img_arr)
76
+ return prediction
77
+
78
+
79
+ def run_effNet_Art(img_arr):
80
+ try:
81
+ resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
82
+ tf.config.experimental_connect_to_cluster(resolver)
83
+ tf.tpu.experimental.initialize_tpu_system(resolver)
84
+ strategy = tf.distribute.TPUStrategy(resolver)
85
+ except ValueError:
86
+ strategy = tf.distribute.get_strategy()
87
+ with strategy.scope():
88
+ prediction = eff_net_art_model.predict(img_arr)
89
+ return prediction
90
+
91
+ def pre_process_img_effNet(image):
92
+ img = load_img(image, target_size=(300, 300)) # Resize image to model input size
93
+ img_arr = img_to_array(img) # Convert to array
94
+ img_arr = np.expand_dims(img_arr, axis=0) # Add batch dimension
95
+ result = run_effNet(img_arr)
96
+ return result
97
+
98
+ def pre_process_img_effNetArt(image):
99
+ img = load_img(image, target_size=(224, 224)) # Resize image to model input size
100
+ img_arr = img_to_array(img) # Convert to array
101
+ img_arr = np.expand_dims(img_arr, axis=0) # Add batch dimension
102
+ result = run_effNet_Art(img_arr)
103
+ return result
104
+
105
+ # preprocess image for cnn
106
+ def pre_process_img(image):
107
+ # Load and preprocess the image
108
+ input_picture = load_img(image, target_size=(256, 256))
109
+ img_arr = img_to_array(input_picture) / 255.0 # Normalize the image
110
+ img_arr = img_arr.reshape((1, 256, 256, 3)) # Add batch dimension
111
+ result = run_cnn(img_arr)
112
+ return result
113
+ # title
114
+ st.markdown(
115
+ """<p class = "title"> AI vs REAL Image Detection </p>""",
116
+ unsafe_allow_html= True
117
+ )
118
+
119
+ # upload image
120
+ st.markdown(
121
+ """<p class = "upload_line"> Please upload the image </p>""",
122
+ unsafe_allow_html= True
123
+ )
124
+ user_image = st.file_uploader("png, jpg, or jpeg image", ['png', 'jpg', 'jpeg'], label_visibility='hidden')
125
+ model_name = st.selectbox('Choose a model', ['CNN', 'Efficiencynet', 'Efficiencynet Art'], index=None, placeholder='choose an option')
126
+ result_placeholder = st.empty()
127
+
128
+ # design animation elements
129
+ with open("styles/detectiveMag.svg", "r") as file:
130
+ svg_content_detective_Mag = file.read()
131
+
132
+ # First magnifying glass starts at bottom-right
133
+ st.markdown(
134
+ f"<div class='detectiveMag1' style='bottom: 0%; right: 0%;'>{svg_content_detective_Mag}</div>",
135
+ unsafe_allow_html=True
136
+ )
137
+
138
+ # Second magnifying glass starts slightly higher up the diagonal
139
+ st.markdown(
140
+ f"<div class='detectiveMag2' style='bottom: 10%; right: 10%;'>{svg_content_detective_Mag}</div>",
141
+ unsafe_allow_html=True
142
+ )
143
+
144
+ # Third magnifying glass starts further up the diagonal
145
+ st.markdown(
146
+ f"<div class='detectiveMag3' style='bottom: 20%; right: 20%;'>{svg_content_detective_Mag}</div>",
147
+ unsafe_allow_html=True
148
+ )
149
+
150
+ if user_image is not None and model_name is not None:
151
+ predictions = []
152
+ if model_name == 'CNN':
153
+ print('CNN is running')
154
+ predictions = pre_process_img(user_image)
155
+ elif model_name == 'Efficiencynet':
156
+ print('Effnet is running')
157
+ predictions = pre_process_img_effNet(user_image)
158
+ elif model_name == 'Efficiencynet Art':
159
+ print('Effnet Art is running')
160
+ predictions = pre_process_img_effNetArt(user_image)
161
+
162
+ if predictions[0] < 0.5:
163
+ result_word = "FAKE"
164
+ else:
165
+ result_word = "REAL"
166
+
167
+ if user_image is not None:
168
+ if len(predictions) > 0:
169
+ result_placeholder.markdown(f"<div class='result'> It is a <span class = resultword> {result_word} </span> image </div>", unsafe_allow_html=True)
170
+
171
+ print(model_name)
172
+ print(predictions[0])
173
+
174
+
175
+
data_cleaning/data_cleaning.ipynb ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "68e2dccb-3f52-4ea3-bf1d-8732641daefa",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import os\n",
11
+ "import hashlib\n",
12
+ "from PIL import Image\n",
13
+ "import cv2\n",
14
+ "import pandas\n",
15
+ "import numpy as np\n",
16
+ "import matplotlib.pyplot as plt\n",
17
+ "import os\n",
18
+ "import shutil\n",
19
+ "import random\n",
20
+ "\n"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": null,
26
+ "id": "b5f7e8cb-1c1e-423a-b7c5-68a41c3eeec3",
27
+ "metadata": {},
28
+ "outputs": [],
29
+ "source": [
30
+ "#REMOVE DUPLICATE IMAGES\n",
31
+ "def calculate_hash(image_path):\n",
32
+ "\n",
33
+ " #Calculate the hash of an image.\n",
34
+ " with Image.open(image_path) as img:\n",
35
+ " img = img.convert(\"RGB\") # Ensure the image is in RGB format\n",
36
+ " img = img.resize((8, 8)) # Resize to reduce size and create hash\n",
37
+ " hash_value = hashlib.md5(img.tobytes()).hexdigest() # Create hash\n",
38
+ " return hash_value\n",
39
+ "\n",
40
+ "def find_and_remove_duplicates(folder_path):\n",
41
+ "\n",
42
+ " #Find and remove duplicate images in a given folder.\n",
43
+ "\n",
44
+ " #If cannot find path/ folder, Print that it does not exist\n",
45
+ " if not os.path.exists(folder_path):\n",
46
+ "\n",
47
+ " print(f\"The folder '{folder_path}' may not exist.\")\n",
48
+ " return\n",
49
+ "\n",
50
+ " print(f\"Scanning folder: {folder_path}\")\n",
51
+ "\n",
52
+ " hashes = {}\n",
53
+ " duplicates = []\n",
54
+ "\n",
55
+ " for filename in os.listdir(folder_path):# for each file in the folder\n",
56
+ "\n",
57
+ " if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):# if file is an image\n",
58
+ "\n",
59
+ " file_path = os.path.join(folder_path, filename) #generate a path to the specific image\n",
60
+ "\n",
61
+ " img_hash = calculate_hash(file_path)\n",
62
+ "\n",
63
+ " if img_hash in hashes:\n",
64
+ " duplicates.append(file_path) # Found a duplicate\n",
65
+ " print(f\"Duplicate found: {file_path} (duplicate of {hashes[img_hash]})\")\n",
66
+ " else:\n",
67
+ " hashes[img_hash] = file_path\n",
68
+ "\n",
69
+ " # Remove duplicates\n",
70
+ " for duplicate in duplicates:\n",
71
+ "\n",
72
+ " os.remove(duplicate)\n",
73
+ " print(f\"Removed duplicate: {duplicate}\")\n",
74
+ "\n",
75
+ " if not duplicates:\n",
76
+ " print(\"No duplicates found.\")\n",
77
+ "\n",
78
+ "if __name__ == '__main__':\n",
79
+ " folder = input(\"Enter the path to the folder containing photos: \")\n",
80
+ " find_and_remove_duplicates(folder)\n",
81
+ " \n",
82
+ "\n"
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ "execution_count": null,
88
+ "id": "73265e47-6308-4802-be5c-8eb953148d63",
89
+ "metadata": {},
90
+ "outputs": [],
91
+ "source": [
92
+ "#convert all images to jpg format\n",
93
+ "def convert_images(folder):\n",
94
+ " # Loop through the image folder directory\n",
95
+ " for filename in os.listdir(folder):\n",
96
+ " # Check if the file is not in JPG format\n",
97
+ " if not filename.lower().endswith('.jpg') and filename.lower().endswith(('.png', '.gif', '.bmp', '.jpeg')):\n",
98
+ " input_path = os.path.join(folder, filename)\n",
99
+ " output_path = os.path.join(folder, f\"{os.path.splitext(filename)[0]}.jpg\") #jpg converted path\n",
100
+ "\n",
101
+ " try:\n",
102
+ " # Open the image file\n",
103
+ " with Image.open(input_path) as img:\n",
104
+ " # Convert the image to RGB\n",
105
+ " rgb_img = img.convert('RGB')\n",
106
+ " # Save image as JPG\n",
107
+ " rgb_img.save(output_path, 'JPEG')\n",
108
+ " print(f\"Converted {filename} to {output_path}\")\n",
109
+ " # Remove the old image file\n",
110
+ " os.remove(input_path)\n",
111
+ " print(f\"Removed old file: {input_path}\")\n",
112
+ " except Exception as e:\n",
113
+ " print(f\"Error processing {filename}: {e}\")\n",
114
+ "\n",
115
+ " print(\"Image conversion to .jpg completed.\") # Print once after processing all images\n",
116
+ "\n",
117
+ "if __name__ == '__main__':\n",
118
+ " input_folder = input(\"Enter the path to the input folder containing images: \")\n",
119
+ " convert_images(input_folder)\n"
120
+ ]
121
+ },
122
+ {
123
+ "cell_type": "code",
124
+ "execution_count": 17,
125
+ "id": "9d4cea00-fc10-4ca4-a139-0dcd259b2767",
126
+ "metadata": {},
127
+ "outputs": [],
128
+ "source": [
129
+ "# check for corruption\n",
130
+ "def is_corrupt(image_path):\n",
131
+ " try:\n",
132
+ " img = Image.open(image_path)\n",
133
+ " img.verify() # Verify the image file\n",
134
+ " return False # Image is not corrupted\n",
135
+ " except (IOError, SyntaxError) as e:\n",
136
+ " return True # Image is corrupted\n",
137
+ "\n",
138
+ "def read_files_in_folder(folder_path):\n",
139
+ " count=0\n",
140
+ " for filename in os.listdir(folder_path):\n",
141
+ " file_path = os.path.join(folder_path, filename)\n",
142
+ " if is_corrupt(file_path):\n",
143
+ " count+=1\n",
144
+ " print(\"Image is corrupted:\", file_path)\n",
145
+ " return count\n",
146
+ "if __name__ == '__main__':\n",
147
+ " input_folder = input(\"Enter the path to the input folder containing images: \")\n",
148
+ " is_corrupt(input_folder)"
149
+ ]
150
+ },
151
+ {
152
+ "cell_type": "code",
153
+ "execution_count": null,
154
+ "id": "1ce74fa7",
155
+ "metadata": {},
156
+ "outputs": [],
157
+ "source": [
158
+ "# CREATE TEST DATA\n",
159
+ "source_directory = input(\"Enter source directory: \")\n",
160
+ "destination_directory = input(\"Enter destinaton directory: \")\n",
161
+ "\n",
162
+ "#get the total number of files in the directory\n",
163
+ "count = 0\n",
164
+ "for file in os.listdir(source_directory):\n",
165
+ " all_files = file\n",
166
+ " count += 1\n",
167
+ "\n",
168
+ "#get the list of files\n",
169
+ "all_files = os.listdir(source_directory)\n",
170
+ "\n",
171
+ "#get percentage of files to move and sample\n",
172
+ "twenty_percent = count//5\n",
173
+ "\n",
174
+ "files_to_move = random.sample(all_files, twenty_percent)\n",
175
+ "\n",
176
+ "\n",
177
+ "for each_file in files_to_move:\n",
178
+ " source_file = os.path.join(source_directory, each_file)\n",
179
+ " destination_file = os.path.join(destination_directory, each_file)\n",
180
+ " \n",
181
+ " # move the file\n",
182
+ " shutil.move(source_file, destination_file) "
183
+ ]
184
+ },
185
+ {
186
+ "cell_type": "code",
187
+ "execution_count": null,
188
+ "id": "7668aa65-2fb1-4770-9e6a-50e378f7150e",
189
+ "metadata": {},
190
+ "outputs": [],
191
+ "source": [
192
+ "# assess the contrast quality of each image (overall distribution of pixel intensities in the image.)\n",
193
+ "def check_histogram_quality(gray):\n",
194
+ " hist = cv2.calcHist([gray], [0], None, [256], [0, 256])\n",
195
+ " hist_sum = hist.sum()\n",
196
+ " hist_normalized = hist / hist_sum\n",
197
+ " hist_std = hist_normalized.std()\n",
198
+ " return hist_std\n",
199
+ "\n",
200
+ "# checks the sharpness level of each image by applying Laplacian algorithm\n",
201
+ "def check_sharpness(gray):\n",
202
+ " return cv2.Laplacian(gray, cv2.CV_64F).var()\n",
203
+ "\n",
204
+ "# checks the mean variance of each image\n",
205
+ "def check_mean_variance(gray):\n",
206
+ " mean_intensity = np.mean(gray)\n",
207
+ " variance_intensity = np.var(gray)\n",
208
+ " return mean_intensity, variance_intensity\n",
209
+ "\n",
210
+ "# Returns result based on the quality of each image\n",
211
+ "def check_image_quality(folder):\n",
212
+ " results = [] # Collect results for all images\n",
213
+ " for filename in os.listdir(folder):\n",
214
+ " if filename.lower().endswith('.jpg'):\n",
215
+ " image_path = os.path.join(folder, filename)\n",
216
+ " print(f\"Processing: {filename}\") \n",
217
+ " image = cv2.imread(image_path)\n",
218
+ " if image is None:\n",
219
+ " results.append(f\"{filename}: Error: Image not found.\")\n",
220
+ " continue # Skip to the next image\n",
221
+ "\n",
222
+ " gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
223
+ "\n",
224
+ " # Quality assessments\n",
225
+ " hist_std = check_histogram_quality(gray)\n",
226
+ " sharpness = check_sharpness(gray)\n",
227
+ " mean_intensity, variance_intensity = check_mean_variance(gray)\n",
228
+ "\n",
229
+ " quality_issues = []\n",
230
+ "\n",
231
+ " print(f\"hist_std for {image_path}: {hist_std}\")\n",
232
+ "\n",
233
+ " #Histogram quality check\n",
234
+ " if hist_std <= 0.1:\n",
235
+ " quality_issues.append(\"Histogram variance is low; consider improving contrast.\")\n",
236
+ " \n",
237
+ " # Sharpness check\n",
238
+ " if sharpness < 100: # Adjust as necessary\n",
239
+ " quality_issues.append(\"Image is blurry; consider sharpening.\")\n",
240
+ "\n",
241
+ " # Mean intensity check\n",
242
+ " if mean_intensity <= 50:\n",
243
+ " quality_issues.append(\"Image may be underexposed; consider brightening.\")\n",
244
+ " elif mean_intensity >= 200:\n",
245
+ " quality_issues.append(\"Image may be overexposed; consider reducing brightness.\")\n",
246
+ " \n",
247
+ " # Variance check\n",
248
+ " if variance_intensity < 1000: # Adjust threshold as necessary\n",
249
+ " quality_issues.append(\"Image has low intensity variance; check for flat areas.\")\n",
250
+ "\n",
251
+ " # Report results for this image\n",
252
+ " if quality_issues:\n",
253
+ " results.append(f\"{filename}: Image quality is not satisfactory. Issues found:\\n- \" + \"\\n- \".join(quality_issues))\n",
254
+ " else:\n",
255
+ " results.append(f\"{filename}: Image quality is good.\")\n",
256
+ "\n",
257
+ " return \"\\n\".join(results) # Return results for all images\n",
258
+ "\n",
259
+ "\n",
260
+ "if __name__ == \"__main__\":\n",
261
+ " input_folder = input(\"Enter the path to the input folder containing images: \")\n",
262
+ " result = check_image_quality(input_folder)\n",
263
+ " print(result)"
264
+ ]
265
+ }
266
+ ],
267
+ "metadata": {
268
+ "kernelspec": {
269
+ "display_name": "Python 3",
270
+ "language": "python",
271
+ "name": "python3"
272
+ },
273
+ "language_info": {
274
+ "codemirror_mode": {
275
+ "name": "ipython",
276
+ "version": 3
277
+ },
278
+ "file_extension": ".py",
279
+ "mimetype": "text/x-python",
280
+ "name": "python",
281
+ "nbconvert_exporter": "python",
282
+ "pygments_lexer": "ipython3",
283
+ "version": "3.11.1"
284
+ }
285
+ },
286
+ "nbformat": 4,
287
+ "nbformat_minor": 5
288
+ }
datasets/README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ ```plaintext
2
+ dataset/
3
+ ├── test/
4
+ │ ├── FAKE/
5
+ │ └── REAL/
6
+ └── train/
7
+ ├── FAKE/
8
+ └── REAL/
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ numpy==2.1.3
2
+ streamlit==1.40.0
3
+ tensorflow==2.18.0
styles/detectiveMag.svg ADDED
styles/style.css ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Reset and global styles */
2
+ * {
3
+ margin: 0;
4
+ box-sizing: border-box;
5
+ background-color: #001220 !important;
6
+ }
7
+
8
+ html,
9
+ body {
10
+ height: 100vh !important;
11
+ width: 100%;
12
+ overflow-x: hidden;
13
+ padding: 0 !important;
14
+ }
15
+
16
+ /* Title Styling */
17
+ .title {
18
+ font-family: "Segoe UI", Tahoma, Geneva, Verdana, sans-serif;
19
+ font-size: 2em; /* Scalable font size */
20
+ text-align: center;
21
+ background: linear-gradient(to right, #16f0da 0%, #ff93ed 100%);
22
+ -webkit-background-clip: text;
23
+ -webkit-text-fill-color: transparent;
24
+ margin: 0 !important;
25
+ }
26
+
27
+ /* Upload Line */
28
+ .upload_line {
29
+ font-family: "Segoe UI", Tahoma, Geneva, Verdana, sans-serif;
30
+ font-size: 1rem;
31
+ text-align: center;
32
+ color: white;
33
+ margin: 1em 0;
34
+ }
35
+ @keyframes blink {
36
+ 25% {
37
+ opacity: 0.5;
38
+ }
39
+ 50% {
40
+ opacity: 0;
41
+ }
42
+ 75% {
43
+ opacity: 0.5;
44
+ }
45
+ }
46
+ /* Base styles for magnifying glasses */
47
+ .detectiveMag1,
48
+ .detectiveMag2,
49
+ .detectiveMag3 {
50
+ position: fixed;
51
+ width: 10vw;
52
+ max-width: 100px;
53
+ max-height: 100px;
54
+ animation: blink 6s infinite linear;
55
+ }
56
+
57
+ .detectiveMag2 {
58
+ animation-delay: 2s;
59
+ }
60
+
61
+ .detectiveMag3 {
62
+ animation-delay: 4s;
63
+ }
64
+
65
+ /* Result Styling */
66
+ .result {
67
+ color: rgb(179, 217, 253);
68
+ font-size: 1.5rem;
69
+ z-index: 1;
70
+ text-align: center;
71
+ margin-top: 1.5em;
72
+ font-family: "Segoe UI", Tahoma, Geneva, Verdana, sans-serif;
73
+ }
74
+
75
+ .resultword {
76
+ text-transform: uppercase;
77
+ background-image: linear-gradient(
78
+ -225deg,
79
+ #aae2e5 0%,
80
+ #edaee8 29%,
81
+ #421f7e 67%,
82
+ #ff0099 100%
83
+ );
84
+ background-size: auto auto;
85
+ background-clip: border-box;
86
+ background-size: 200% auto;
87
+ color: #fff;
88
+ background-clip: text;
89
+ text-fill-color: transparent;
90
+ -webkit-background-clip: text;
91
+ -webkit-text-fill-color: transparent;
92
+ animation: textclip 2s linear;
93
+ display: inline-block;
94
+ font-size: 32px;
95
+ }
96
+
97
+ @keyframes textclip {
98
+ to {
99
+ background-position: 200% center;
100
+ }
101
+ }
102
+
103
+ /* Media Queries for Responsive Design */
104
+ @media (max-width: 1024px) {
105
+ .title {
106
+ font-size: 1.8em;
107
+ }
108
+
109
+ .upload_line {
110
+ font-size: 0.9rem;
111
+ }
112
+
113
+ .detectiveMag1,
114
+ .detectiveMag2,
115
+ .detectiveMag3 {
116
+ width: 12vw; /* Slightly larger magnifying glasses */
117
+ }
118
+
119
+ .result {
120
+ font-size: 1.2rem;
121
+ }
122
+ }
123
+
124
+ @media (max-width: 924px) {
125
+ .detectiveMag1,
126
+ .detectiveMag2,
127
+ .detectiveMag3 {
128
+ width: 10vw; /* Larger magnifying glasses */
129
+ background-color: transparent !important;
130
+ }
131
+ }
132
+
133
+ @media (max-width: 768px) {
134
+ .title {
135
+ font-size: 1.5em;
136
+ }
137
+
138
+ .upload_line {
139
+ font-size: 0.8rem;
140
+ }
141
+
142
+ .detectiveMag1,
143
+ .detectiveMag2,
144
+ .detectiveMag3 {
145
+ width: 15vw; /* Larger magnifying glasses */
146
+ background-color: transparent !important;
147
+ }
148
+
149
+ .result {
150
+ font-size: 1rem;
151
+ }
152
+ }
153
+
154
+ @media (max-width: 480px) {
155
+ .title {
156
+ font-size: 1.2em;
157
+ }
158
+ .upload_line {
159
+ font-size: 0.7rem;
160
+ }
161
+
162
+ .detectiveMag1,
163
+ .detectiveMag2,
164
+ .detectiveMag3 {
165
+ width: 20vw; /* Even larger magnifying glasses */
166
+ }
167
+
168
+ .result {
169
+ font-size: 0.9rem;
170
+ }
171
+ }
172
+
173
+ @media (max-width: 360px) {
174
+ .detectiveMag1,
175
+ .detectiveMag2,
176
+ .detectiveMag3 {
177
+ visibility: hidden;
178
+ }
179
+ }