Spaces:
Sleeping
Sleeping
import cv2 | |
import numpy as np | |
import json | |
from tensorflow.keras.models import load_model | |
import os | |
import base64 | |
from io import BytesIO | |
from PIL import Image | |
import gradio as gr | |
# Set environment variables to suppress TensorFlow warnings | |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' | |
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' | |
# Load your pre-trained model | |
model = load_model("all_in_one.h5") | |
def decode_frame(base64_str): | |
try: | |
image_data = base64.b64decode(base64_str) | |
image = Image.open(BytesIO(image_data)) | |
return np.array(image) | |
except Exception as e: | |
print(f"Error decoding base64 frame: {e}") | |
return None | |
def preprocess_frame(frame): | |
try: | |
# Resize the frame as per your model input requirement | |
frame = cv2.resize(frame, (300, 300)) | |
frame = frame.astype('float32') / 255.0 | |
return np.expand_dims(frame, axis=0) | |
except Exception as e: | |
print(f"Error in preprocessing frame: {e}") | |
return None | |
def predict_frames(frames): | |
predictions = [] | |
for i, frame_base64 in enumerate(frames): | |
frame = decode_frame(frame_base64) | |
if frame is None: | |
print(f"Skipping frame {i} due to decoding error.") | |
continue | |
preprocessed_frame = preprocess_frame(frame) | |
if preprocessed_frame is None: | |
print(f"Skipping frame {i} due to preprocessing error.") | |
continue | |
prediction = model.predict(preprocessed_frame) | |
print(f"Prediction for frame {i}: {prediction}") | |
if prediction.size == 0: | |
print(f"No prediction returned for frame {i}.") | |
continue | |
predictions.append(prediction) | |
return predictions | |
def main(params): | |
try: | |
params = json.loads(params) | |
except json.JSONDecodeError as e: | |
return {"error": f"Invalid JSON input: {e.msg} at line {e.lineno} column {e.colno}"} | |
print(f"JSON : \n{params}") | |
frames = params.get("frames", []) | |
video_path = params.get("file_name") | |
print(f"Number of frames received: {len(frames)}") | |
predictions = predict_frames(frames) | |
print(f"Predictions: {predictions}") | |
if len(predictions) == 0: | |
print("Predictions are empty.") | |
avg_prediction = np.mean(predictions) | |
if np.isnan(avg_prediction): | |
raise ValueError("Average prediction is NaN, indicating an issue with the predictions.") | |
result = 'Deepfake' if avg_prediction > 0.86 else 'Real' | |
confidence = float(avg_prediction) * 100 # Convert numpy float to Python float and scale | |
print(result,float("{:.2f}".format(confidence))*100) | |
# Prepare the result as a JSON response | |
output_data = { | |
"file_name": video_path, | |
"solution": [{ | |
"answer": result, | |
"confidence": float("{:.2f}".format(confidence))}] | |
} | |
return json.dumps({"output" : output_data}) | |
# Gradio interface | |
inputt = gr.Textbox(label='''Parameters (JSON format) Eg. {"video" : ["base64_frame1","base64_frame2",...], "file_name": "video.mp4"}''') | |
outputs = gr.JSON() | |
application = gr.Interface(fn=main, inputs=inputt, outputs=outputs, title="Deepfake Detection Model") | |
application.launch(server_name="0.0.0.0") | |