Spaces:
Runtime error
Runtime error
import gradio as gr | |
import cv2 | |
from mtcnn.mtcnn import MTCNN | |
import tensorflow as tf | |
import tensorflow_addons | |
import numpy as np | |
import os | |
import zipfile | |
local_zip = "FINAL-EFFICIENTNETV2-B0.zip" | |
zip_ref = zipfile.ZipFile(local_zip, 'r') | |
zip_ref.extractall('FINAL-EFFICIENTNETV2-B0') | |
zip_ref.close() | |
local_zip = "FINAL-EFFICIENTNETV2-S.zip" | |
zip_ref = zipfile.ZipFile(local_zip, 'r') | |
zip_ref.extractall('FINAL-EFFICIENTNETV2-S') | |
zip_ref.close() | |
local_zip = "deepfakes-test-images.zip" | |
zip_ref = zipfile.ZipFile(local_zip, 'r') | |
zip_ref.extractall('deepfakes-test-images') | |
zip_ref.close() | |
with tf.device('/cpu:0'): | |
model_b0 = tf.keras.models.load_model("FINAL-EFFICIENTNETV2-B0") | |
model_s = tf.keras.models.load_model("FINAL-EFFICIENTNETV2-S") | |
detector = MTCNN() | |
def deepfakespredict(select_model, input_img ): | |
tf.keras.backend.clear_session() | |
if select_model == "EfficientNetV2-B0": | |
model = model_b0 | |
elif select_model == "EfficientNetV2-B0": | |
model = model_s | |
text ="" | |
face = detector.detect_faces(input_img) | |
if len(face) > 0: | |
x, y, width, height = face[0]['box'] | |
x2, y2 = x + width, y + height | |
cv2.rectangle(input_img, (x, y), (x2, y2), (0, 255, 0), 2) | |
face_image = input_img[y:y2, x:x2] | |
face_image2 = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB) | |
face_image3 = cv2.resize(face_image2, (224, 224)) | |
face_image4 = face_image3/255 | |
pred = model.predict(np.expand_dims(face_image4, axis=0))[0] | |
if pred[1] >= 0.6: | |
text = "The image is fake." | |
elif pred[0] >= 0.6: | |
text = "The image is real." | |
else: | |
text = "The image might be real or fake." | |
else: | |
text = "Face is not detected in the image." | |
return text, input_img, {labels[i]: float(pred[i]) for i in range(2)} | |
title="EfficientNetV2 Deepfakes Image Detector" | |
description="This is a demo implementation of EfficientNetV2 Deepfakes Image Detector. To use it, simply upload your image, or click one of the examples to load them." | |
examples = [ | |
[ | |
['deepfakes-test-images/Fake-1.jpg'], | |
['deepfakes-test-images/Fake-2.jpg'], | |
['deepfakes-test-images/Fake-3.jpg'], | |
['deepfakes-test-images/Fake-4.jpg'], | |
['deepfakes-test-images/Fake-5.jpg'] | |
], | |
[ | |
['deepfakes-test-images/Real-1.jpg'], | |
['deepfakes-test-images/Real-2.jpg'], | |
['deepfakes-test-images/Real-3.jpg'], | |
['deepfakes-test-images/Real-4.jpg'], | |
['deepfakes-test-images/Real-5.jpg'], | |
] | |
] | |
gr.Interface(deepfakespredict, | |
inputs = [gr.inputs.Radio(["EfficientNetV2-B0", "EfficientNetV2-S"], label = "Select model:"), "image"], | |
outputs=["text", gr.outputs.Image(type="pil", label="Detected face"), gr.outputs.Label(num_top_classes=None, type="auto", label="Confidence")], | |
title=title, | |
description=description, | |
examples=examples | |
).launch() |