File size: 3,190 Bytes
7400690
 
 
 
 
 
 
 
 
9265430
7400690
a6eebfb
9265430
 
 
 
7400690
c69b4d1
a6eebfb
 
c69b4d1
a6eebfb
 
 
c69b4d1
 
a6eebfb
 
c69b4d1
a6eebfb
 
c69b4d1
a6eebfb
 
c69b4d1
 
a6eebfb
4ad81ef
7400690
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424f95f
d541ec7
7400690
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b753d9c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import gradio as gr

import cv2
from mtcnn.mtcnn import MTCNN
import tensorflow as tf
import tensorflow_addons
import numpy as np

import os
#import zipfile


#local_zip = "FINAL-EFFICIENTNETV2-B0.zip"
#zip_ref = zipfile.ZipFile(local_zip, 'r')
#zip_ref.extractall('FINAL-EFFICIENTNETV2-B0')
#zip_ref.close()

#import h5py

# افتح ملف النموذج H5
#h5_file = h5py.File('model_cp.h5', 'r')

# قراءة المحتوى
# يمكنك استكشاف المحتوى باستخدام الأوامر التالية
#print(h5_file.keys())  # قائمة المفاتيح في الملف
#print(h5_file['tf_lite_model.tflite'].keys())  # قائمة المفاتيح في مجموعة الأوزان

# إنشاء ملف HDF5 جديد
#hdf5_file = h5py.File('model.hdf5', 'w')

# نسخ المحتوى من ملف النموذج H5 إلى الملف الجديد
#h5_file.copy('model_weights', hdf5_file)

# إغلاق الملفات
#h5_file.close()
#hdf5_file.close()

model = tf.keras.models.load_model('model_cp.h5')

detector = MTCNN()

def deepfakespredict(input_img ):

    labels = ['real', 'fake']
    pred = [0, 0]
    text =""
    text2 =""
    
    face = detector.detect_faces(input_img)

    if len(face) > 0:
        x, y, width, height = face[0]['box']
        x2, y2 = x + width, y + height

        cv2.rectangle(input_img, (x, y), (x2, y2), (0, 255, 0), 2)

        face_image = input_img[y:y2, x:x2]
        face_image2 = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
        face_image3 = cv2.resize(face_image2, (224, 224))
        face_image4 = face_image3/255

        pred = model.predict(np.expand_dims(face_image4, axis=0))[0]

        if pred[1] >= 0.6:
            text = "The image is FAKE."
        elif pred[0] >= 0.6:
            text = "The image is REAL."
        else:
            text = "The image may be REAL or FAKE."
            
    else:
        text = "Face is not detected in the image."
    
    text2 = "REAL: " + str(np.round(pred[0]*100, 2)) + "%, FAKE: " + str(np.round(pred[1]*100, 2)) + "%"

    return input_img, text, text2, {labels[i]: float(pred[i]) for i in range(2)}


title="Deepfakes Image Detector"
description=" project Deepfake detection images real and fake call me +967776215118  "
            
examples = [
                ['Fake-1.png'],
                ['Fake-2.png'],
                ['Fake-3.png'],
                ['Fake-4.png'],
                ['Fake-5.png'],
  
                ['Real-1.png'],
                ['Real-2.png'],
                ['Real-3.png'],
                ['Real-4.png'],
                ['Real-5.png']
                
           ]
           
             
gr.Interface(deepfakespredict,
                     inputs = ["image"],
                     outputs=[gr.outputs.Image(type="pil", label="Detected face"), 
                         "text", 
                         "text", 
                         gr.outputs.Label(num_top_classes=None, type="auto", label="Confidence")],                     
                     title=title,
                     description=description,
                     examples = examples, 
                     examples_per_page = 5
                     ).launch()