Spaces:
Sleeping
Sleeping
File size: 3,281 Bytes
385710b 10ac7ce 385710b 10ac7ce f470b86 385710b 10ac7ce 385710b 10ac7ce 385710b f470b86 385710b 10ac7ce 385710b 90897ec f470b86 385710b 448e3d7 385710b 10ac7ce 385710b 692d303 448e3d7 692d303 385710b 692d303 385710b 448e3d7 385710b 30b6c41 385710b b8cca39 10ac7ce 385710b b8cca39 385710b 969dfe8 385710b 179d187 385710b 61d1a9e 385710b 10ac7ce 385710b 10ac7ce d61e9f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import cv2
import numpy as np
import json
from tensorflow.keras.models import load_model
import os
import base64
from io import BytesIO
from PIL import Image
import gradio as gr
# Set environment variables to suppress TensorFlow warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
# Load your pre-trained model
model = load_model("all_in_one.h5")
def decode_frame(base64_str):
try:
image_data = base64.b64decode(base64_str)
image = Image.open(BytesIO(image_data))
return np.array(image)
except Exception as e:
print(f"Error decoding base64 frame: {e}")
return None
def preprocess_frame(frame):
try:
# Resize the frame as per your model input requirement
frame = cv2.resize(frame, (300, 300))
frame = frame.astype('float32') / 255.0
return np.expand_dims(frame, axis=0)
except Exception as e:
print(f"Error in preprocessing frame: {e}")
return None
def predict_frames(frames):
predictions = []
for i, frame_base64 in enumerate(frames):
frame = decode_frame(frame_base64)
if frame is None:
print(f"Skipping frame {i} due to decoding error.")
continue
preprocessed_frame = preprocess_frame(frame)
if preprocessed_frame is None:
print(f"Skipping frame {i} due to preprocessing error.")
continue
prediction = model.predict(preprocessed_frame)
print(f"Prediction for frame {i}: {prediction}")
if prediction.size == 0:
print(f"No prediction returned for frame {i}.")
continue
predictions.append(prediction)
return predictions
def main(params):
try:
params = json.loads(params)
except json.JSONDecodeError as e:
return {"error": f"Invalid JSON input: {e.msg} at line {e.lineno} column {e.colno}"}
print(f"JSON : \n{params}")
frames = params.get("frames", [])
video_path = params.get("file_name")
print(f"Number of frames received: {len(frames)}")
predictions = predict_frames(frames)
print(f"Predictions: {predictions}")
if len(predictions) == 0:
print("Predictions are empty.")
avg_prediction = np.mean(predictions)
if np.isnan(avg_prediction):
raise ValueError("Average prediction is NaN, indicating an issue with the predictions.")
result = 'Deepfake' if avg_prediction > 0.86 else 'Real'
confidence = float(avg_prediction) * 100 # Convert numpy float to Python float and scale
print(result,float("{:.2f}".format(confidence))*100)
# Prepare the result as a JSON response
output_data = {
"file_name": video_path,
"solution": [{
"answer": result,
"confidence": float("{:.2f}".format(confidence))}]
}
return json.dumps({"output" : output_data})
# Gradio interface
inputt = gr.Textbox(label='''Parameters (JSON format) Eg. {"video" : ["base64_frame1","base64_frame2",...], "file_name": "video.mp4"}''')
outputs = gr.JSON()
application = gr.Interface(fn=main, inputs=inputt, outputs=outputs, title="Deepfake Detection Model")
application.launch(server_name="0.0.0.0")
|