Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
import cv2
|
2 |
import numpy as np
|
3 |
-
import argparse
|
4 |
import json
|
5 |
from tensorflow.keras.models import load_model
|
6 |
import os
|
7 |
-
|
|
|
|
|
|
|
8 |
|
9 |
# Set environment variables to suppress TensorFlow warnings
|
10 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
@@ -13,6 +15,16 @@ os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
|
|
13 |
# Load your pre-trained model
|
14 |
model = load_model("all_in_one.h5")
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
def preprocess_frame(frame):
|
17 |
try:
|
18 |
# Resize the frame as per your model input requirement
|
@@ -26,15 +38,22 @@ def preprocess_frame(frame):
|
|
26 |
def predict_frames(frames):
|
27 |
predictions = []
|
28 |
|
29 |
-
for i,
|
|
|
|
|
|
|
|
|
|
|
30 |
preprocessed_frame = preprocess_frame(frame)
|
31 |
if preprocessed_frame is None:
|
32 |
print(f"Skipping frame {i} due to preprocessing error.")
|
33 |
continue
|
|
|
34 |
prediction = model.predict(preprocessed_frame)
|
35 |
if prediction.size == 0:
|
36 |
print(f"No prediction returned for frame {i}.")
|
37 |
continue
|
|
|
38 |
predictions.append(prediction)
|
39 |
print(f"Prediction for frame {i}: {prediction}")
|
40 |
|
@@ -49,9 +68,11 @@ def main(params):
|
|
49 |
params = json.loads(params)
|
50 |
except json.JSONDecodeError as e:
|
51 |
return {"error": f"Invalid JSON input: {e.msg} at line {e.lineno} column {e.colno}"}
|
|
|
52 |
print(f"JSON : \n{params}")
|
53 |
frames = params.get("video", [])
|
54 |
video_path = params.get("file_name")
|
|
|
55 |
predictions = predict_frames(frames)
|
56 |
print(predictions)
|
57 |
if len(predictions) == 0:
|
@@ -65,7 +86,7 @@ def main(params):
|
|
65 |
result = 'Deepfake' if avg_prediction > 0.5 else 'Real'
|
66 |
confidence = float(avg_prediction) * 100 # Convert numpy float to Python float and scale
|
67 |
|
68 |
-
#
|
69 |
output_data = {
|
70 |
"file_name": video_path,
|
71 |
"solution": {
|
@@ -73,14 +94,12 @@ def main(params):
|
|
73 |
"confidence": float("{:.2f}".format(confidence))
|
74 |
}
|
75 |
}
|
76 |
-
return json.dump(output_data)
|
77 |
|
78 |
-
|
79 |
-
os.remove(video_path)
|
80 |
|
81 |
-
|
82 |
-
inputt = gr.Textbox(label='''Parameters (JSON format) Eg. {"
|
83 |
outputs = gr.JSON()
|
84 |
|
85 |
-
application = gr.Interface(fn=main, inputs=inputt, outputs=outputs, title="
|
86 |
-
application.launch()
|
|
|
1 |
import cv2
|
2 |
import numpy as np
|
|
|
3 |
import json
|
4 |
from tensorflow.keras.models import load_model
|
5 |
import os
|
6 |
+
import base64
|
7 |
+
from io import BytesIO
|
8 |
+
from PIL import Image
|
9 |
+
import gradio as gr
|
10 |
|
11 |
# Set environment variables to suppress TensorFlow warnings
|
12 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
|
|
15 |
# Load your pre-trained model
|
16 |
model = load_model("all_in_one.h5")
|
17 |
|
18 |
+
def decode_frame(base64_str):
|
19 |
+
"""Decodes a base64 string to an image."""
|
20 |
+
try:
|
21 |
+
image_data = base64.b64decode(base64_str)
|
22 |
+
image = Image.open(BytesIO(image_data))
|
23 |
+
return np.array(image)
|
24 |
+
except Exception as e:
|
25 |
+
print(f"Error decoding base64 frame: {e}")
|
26 |
+
return None
|
27 |
+
|
28 |
def preprocess_frame(frame):
|
29 |
try:
|
30 |
# Resize the frame as per your model input requirement
|
|
|
38 |
def predict_frames(frames):
|
39 |
predictions = []
|
40 |
|
41 |
+
for i, frame_base64 in enumerate(frames):
|
42 |
+
frame = decode_frame(frame_base64)
|
43 |
+
if frame is None:
|
44 |
+
print(f"Skipping frame {i} due to decoding error.")
|
45 |
+
continue
|
46 |
+
|
47 |
preprocessed_frame = preprocess_frame(frame)
|
48 |
if preprocessed_frame is None:
|
49 |
print(f"Skipping frame {i} due to preprocessing error.")
|
50 |
continue
|
51 |
+
|
52 |
prediction = model.predict(preprocessed_frame)
|
53 |
if prediction.size == 0:
|
54 |
print(f"No prediction returned for frame {i}.")
|
55 |
continue
|
56 |
+
|
57 |
predictions.append(prediction)
|
58 |
print(f"Prediction for frame {i}: {prediction}")
|
59 |
|
|
|
68 |
params = json.loads(params)
|
69 |
except json.JSONDecodeError as e:
|
70 |
return {"error": f"Invalid JSON input: {e.msg} at line {e.lineno} column {e.colno}"}
|
71 |
+
|
72 |
print(f"JSON : \n{params}")
|
73 |
frames = params.get("video", [])
|
74 |
video_path = params.get("file_name")
|
75 |
+
|
76 |
predictions = predict_frames(frames)
|
77 |
print(predictions)
|
78 |
if len(predictions) == 0:
|
|
|
86 |
result = 'Deepfake' if avg_prediction > 0.5 else 'Real'
|
87 |
confidence = float(avg_prediction) * 100 # Convert numpy float to Python float and scale
|
88 |
|
89 |
+
# Prepare the result as a JSON response
|
90 |
output_data = {
|
91 |
"file_name": video_path,
|
92 |
"solution": {
|
|
|
94 |
"confidence": float("{:.2f}".format(confidence))
|
95 |
}
|
96 |
}
|
|
|
97 |
|
98 |
+
return output_data
|
|
|
99 |
|
100 |
+
# Gradio interface
|
101 |
+
inputt = gr.Textbox(label='''Parameters (JSON format) Eg. {"video" : ["base64_frame1","base64_frame2",...], "file_name": "video.mp4"}''')
|
102 |
outputs = gr.JSON()
|
103 |
|
104 |
+
application = gr.Interface(fn=main, inputs=inputt, outputs=outputs, title="Deepfake Detection Model")
|
105 |
+
application.launch()
|