David Fischinger commited on
Commit
a435037
0 Parent(s):

Initial Commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
IMVIP_Supplementary_Material/.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ models/model1 filter=lfs diff=lfs merge=lfs -text
2
+ models/model2 filter=lfs diff=lfs merge=lfs -text
IMVIP_Supplementary_Material/__init__.py ADDED
File without changes
IMVIP_Supplementary_Material/models/model1/keras_metadata.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e60097affd2751ea4540b0659522b06c2755d0ec4c8aede29a1b397d960b7949
3
+ size 194745
IMVIP_Supplementary_Material/models/model1/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7664979c02d7a1015b700e66b80291276f336e9bf16d8a3b1ca527cf4b91e5fb
3
+ size 1800047
IMVIP_Supplementary_Material/models/model1/variables/.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
IMVIP_Supplementary_Material/models/model1/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46ec82afc5d7ab67e757b20fa5c4c16845881dab62516c6405dc87b1a381f58b
3
+ size 62888221
IMVIP_Supplementary_Material/models/model1/variables/variables.index ADDED
Binary file (23.1 kB). View file
 
IMVIP_Supplementary_Material/models/model2/keras_metadata.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a38bbb31c5972308f17290c18665747ab537a3a63eb986d02eb212b6a41f6c5
3
+ size 261437
IMVIP_Supplementary_Material/models/model2/saved_model.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec722ab01d8860361c3912f811630983204c34f9d34d92fd0f70773418bd945d
3
+ size 2068453
IMVIP_Supplementary_Material/models/model2/variables/.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
IMVIP_Supplementary_Material/models/model2/variables/variables.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8be65ab6ff1e7ebebbcc7257daee8373c43819a909dfda5df1a61b59a4b17ca
3
+ size 21416427
IMVIP_Supplementary_Material/models/model2/variables/variables.index ADDED
Binary file (12 kB). View file
 
IMVIP_Supplementary_Material/scripts/__init__.py ADDED
File without changes
IMVIP_Supplementary_Material/scripts/dfutils.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import numpy as np
3
+ from sklearn.metrics import roc_auc_score, f1_score, jaccard_score, accuracy_score
4
+ import tensorflow as tf
5
+
6
+
7
+
8
+ # create prediction mask
9
+ def create_mask(pred_mask):
10
+ if pred_mask.shape[-1] > 1:
11
+ pred_mask = tf.argmax(pred_mask, axis=-1)
12
+ pred_mask = pred_mask[..., tf.newaxis]
13
+
14
+ return pred_mask[0]
15
+
16
+
17
+
18
+ def metric_copy(premask, groundtruth):
19
+ seg_inv, gt_inv = np.logical_not(premask), np.logical_not(groundtruth)
20
+ true_pos = float(np.logical_and(premask, groundtruth).sum()) # float for division
21
+ true_neg = np.logical_and(seg_inv, gt_inv).sum()
22
+ false_pos = np.logical_and(premask, gt_inv).sum()
23
+ false_neg = np.logical_and(seg_inv, groundtruth).sum()
24
+ f1 = 2 * true_pos / (2 * true_pos + false_pos + false_neg + 1e-6)
25
+ cross = np.logical_and(premask, groundtruth)
26
+ union = np.logical_or(premask, groundtruth)
27
+ iou = np.sum(cross) / (np.sum(union) + 1e-6)
28
+ if np.sum(cross) + np.sum(union) == 0:
29
+ iou = 1
30
+ return f1, iou
31
+
32
+
33
+
34
+ def show_prediction(img, pred):
35
+ print("max_pred = ", np.max(pred), " min_pred = ", np.min(pred))
36
+ plt.subplot(1,2,1)
37
+ plt.imshow(img)
38
+ plt.subplot(1,2,2)
39
+ plt.imshow(pred, cmap='gray') #, vmin=0, vmax=1)
40
+ plt.show()
41
+
42
+
43
+ def show_predictions(dataset=None, num=1):
44
+ if dataset:
45
+ for image, mask in dataset.take(num):
46
+ pred_mask = model.predict(image)
47
+ display([image[0], mask[0], create_mask(pred_mask)])
48
+ else:
49
+ print(sample_image.shape)
50
+ print(sample_mask.shape)
51
+ display([sample_image, sample_mask,
52
+ create_mask(model.predict(sample_image[tf.newaxis, ...]))])
53
+
54
+
55
+
56
+ def display(display_list, reverseRGB = True):
57
+ plt.figure(figsize=(4, 4))
58
+
59
+ title = ['Input Image', 'True Mask', 'Predicted Mask']
60
+
61
+ for i in range(len(display_list)):
62
+ plt.subplot(1, len(display_list), i+1)
63
+ plt.title(title[i])
64
+ if reverseRGB:
65
+ plt.imshow(tf.keras.utils.array_to_img(display_list[i][...,::-1]))
66
+ else:
67
+ plt.imshow(tf.keras.utils.array_to_img(display_list[i]))
68
+ plt.axis('off')
69
+ plt.show()
70
+
71
+
72
+ def get_gt_and_osn_folders(folder):
73
+ folder_list = [folder]
74
+ folder_list.append(folder+"_Facebook")
75
+ folder_list.append(folder+"_Whatsapp")
76
+ folder_list.append(folder+"_Weibo")
77
+ folder_list.append(folder+"_Wechat")
78
+ gt_folder = folder + "_GT"
79
+ return gt_folder,folder_list
80
+
81
+ def get_gt_and_osn_folder(folder, osn):
82
+ osn_folder = folder+osn
83
+ gt_folder = folder + "_GT"
84
+ return gt_folder,osn_folder
85
+
86
+
87
+ # plots the image + prediction + ground truth
88
+ def plot_img_pred_gt(img_path, pre_t, gt):
89
+ print("INPUT plot_img_pred_gt:")
90
+ print(" img_path: ", img_path)
91
+ #get image
92
+ img = cv2.imread(img_path)
93
+ #plot image, prediction and mask
94
+ plot_img_pred_gt_execute(img,pre_t, gt)
95
+
96
+
97
+ def plot_img_pred_gt_execute(img, pre_t, gt, DISCRETIZE_OUTPUT=True):
98
+ #print("plot_img_pred_gt_execute(): pre_t.max: ", np.max(pre_t))
99
+ #print("plot_img_pred_gt_execute(): pre_t.min: ", np.min(pre_t))
100
+ if DISCRETIZE_OUTPUT:
101
+ pre_t = pre_t.numpy()
102
+ pre_t[pre_t > 0.5] = 1.0
103
+ pre_t[pre_t <= 0.5] = 0.0
104
+ plt.subplots(1,3,figsize=(10,10))
105
+ plt.subplot(1,3,1)
106
+ plt.imshow(img[...,::-1])
107
+ plt.title("Original Image")
108
+ plt.subplot(1,3,2)
109
+ plt.imshow(pre_t, cmap='gray')
110
+ #plt.imshow(pre_t>0.5, cmap='gray')
111
+ plt.title("Prediction")
112
+ plt.subplot(1,3,3)
113
+ plt.imshow(gt, cmap='gray')
114
+ plt.title("Ground Truth")
115
+ plt.show()
116
+
117
+
118
+ def mask_bigger_fifty_perc(mask):
119
+ mask_size = mask.size
120
+ #print("mask.shape: ", mask.shape)
121
+ #print("mask_size: ", mask_size)
122
+ nr_points_in_mask = mask_size - (mask == 0.).sum()
123
+ mask_cover_perc_of_img = nr_points_in_mask/mask_size
124
+ #print("mask_cover_perc_of_img: ", mask_cover_perc_of_img)
125
+ if mask_cover_perc_of_img>0.5:
126
+ return True
127
+ return False
128
+
129
+
130
+ #evaluation for one image (auc roc, f1, iou)
131
+ def eval_image(pre_t, gt, auc, f1, iou, acc):
132
+ #df_out("pre_t_in evalimage",pre_t,True)
133
+
134
+ pre = np.repeat(pre_t.numpy()[:,:,np.newaxis],3,2)
135
+ H, W, _ = pre.shape
136
+ Hg, Wg, C = gt.shape
137
+
138
+ if mask_bigger_fifty_perc(gt):
139
+ print("FLIP pre because mask > 50% of image")
140
+ pre = 1 - pre
141
+
142
+ if H != Hg or W != Wg:
143
+ print("ERROR: values not matching:")
144
+ print(f'H: {H}, W: {W}, C: {C}')
145
+ print(f'Hg: {Hg}, Wg: {Wg}, C: {C}')
146
+ gt = cv2.resize(gt, (W, H))
147
+ gt[gt > 127] = 255
148
+ gt[gt <= 127] = 0
149
+
150
+ if np.max(gt) != np.min(gt):
151
+ auc.append(roc_auc_score((gt.reshape(H*W*C) / 255.).astype('int'), pre.reshape(H*W*C)))
152
+ else:
153
+ print("!!!!!!!!!!!!!! eval_image(): np.max(gt) != np.min(gt) !!!!!!!!!!!!")
154
+ pre[pre>0.5] = 1.0
155
+ pre[pre<=0.5] = 0.0
156
+
157
+ #consider changing to: a, b = metric_copy(pre , gt > 127)
158
+ #a, b = metric_copy(pre , gt / 255.) #old
159
+ a, b = metric_copy(pre , gt)
160
+
161
+
162
+ pre_ = tf.reshape(pre, [-1])
163
+ gt_ = tf.reshape(gt / 255., [-1]).astype(tf.int32)
164
+ acc_tmp = accuracy_score(pre_, gt_)
165
+ acc.append(acc_tmp)
166
+
167
+ f1.append(a)
168
+ iou.append(b)
169
+ #print('Evaluation: AUC: %5.4f, F1: %5.4f, IOU: %5.4f' % (np.mean(auc), np.mean(f1), np.mean(iou)))
170
+
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Df Net
3
+ emoji: ⚡
4
+ colorFrom: gray
5
+ colorTo: gray
6
+ sdk: streamlit
7
+ sdk_version: 1.25.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: cc-by-nc-sa-4.0
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import streamlit as st
3
+ import cv2
4
+ import numpy as np
5
+ import os
6
+ import tensorflow as tf
7
+
8
+ from IMVIP_Supplementary_Material.scripts import dfutils #methods used for DF-Net
9
+
10
+ DESCRIPTION = """# DF-Net
11
+ The Digital Forensics Network is designed and trained to detect and locate image manipulations.
12
+ More information can be found in this [publication](https://zenodo.org/record/8214996)
13
+ """
14
+
15
+ IMG_SIZE=256
16
+
17
+ tf.experimental.numpy.experimental_enable_numpy_behavior()
18
+ #np.warnings.filterwarnings('error', category=np.VisibleDeprecationWarning)
19
+
20
+
21
+
22
+ def check_forgery_df(img):
23
+ shape_original = img.shape
24
+ img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))
25
+ x = np.expand_dims( img.astype('float32')/255., axis=0 )
26
+
27
+ pred1 = model_M1.predict(x, verbose=0)
28
+ pred2= model_M2.predict(x, verbose=0)
29
+ pred = np.max([pred1,pred2], axis=0)
30
+
31
+ pred = dfutils.create_mask(pred)
32
+ pred = pred.reshape(pred.shape[-3:-1])
33
+ resized_image = cv2.resize(pred, (shape_original[1],shape_original[0]), interpolation=cv2.INTER_LINEAR)
34
+
35
+ return resized_image
36
+
37
+
38
+
39
+ def evaluate(img):
40
+ pre_t = check_forgery_df(img)
41
+ st.image(pre_t, caption="White area indicates potential image manipulations.")
42
+
43
+
44
+
45
+ st.markdown(DESCRIPTION)
46
+
47
+ uploaded_file = st.file_uploader("Please upload an image", type=["jpeg", "jpg", "png"])
48
+
49
+ if uploaded_file is not None:
50
+ #load models
51
+ model_M1 = tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model1/")
52
+ model_M2 = tf.keras.models.load_model("IMVIP_Supplementary_Material/models/model2/")
53
+
54
+ # Convert the file to an opencv image.
55
+ file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
56
+ opencv_image = cv2.imdecode(file_bytes, 1)
57
+ reversed_image = opencv_image[:, :, ::-1]
58
+ st.image(reversed_image, caption="Input Image")
59
+ evaluate(reversed_image)
60
+
61
+
62
+
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python3-opencv
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ opencv-python
2
+ tensorflow
3
+ pillow
4
+ scikit-learn
5
+ scikit-image