File size: 2,949 Bytes
88468ae
 
 
 
 
542d3d4
c7a840d
 
c894298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9323d30
 
c894298
 
 
 
 
 
 
 
9323d30
 
c894298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c5c3656
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
#import tensorflow.keras as K
#from tensorflow.keras import layers

import tensorflow.python.keras as K
from tensorflow.python.keras import layers
import keras

import os
import tensorflow as tf
import gradio as gr
from extract_landmarks import get_data_for_test,extract_landmark,merge_video_prediction

block_size = 60
DROPOUT_RATE = 0.5
RNN_UNIT = 64
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

gpus = tf.config.list_physical_devices(device_type='GPU')
for gpu in gpus:
    tf.config.experimental.set_memory_growth(device=gpu, enable=True)
device = "CPU" if len(gpus) == 0 else "GPU"
print("using {}".format(device))

def predict(video):
    path = extract_landmark(video)
    test_samples, test_samples_diff, _, _, test_sv, test_vc = get_data_for_test(path, 1, block_size)

    model = K.Sequential([
        layers.InputLayer(input_shape=(block_size, 136)),
        layers.Dropout(0.25),
        #layers.Bidirectional(layers.GRU(RNN_UNIT)),
        keras.layers.wrappers.Bidirectional(keras.layers.recurrent_v2.GRU(RNN_UNIT)),
        layers.Dropout(DROPOUT_RATE),
        layers.Dense(64, activation='relu'),
        layers.Dropout(DROPOUT_RATE),
        layers.Dense(2, activation='softmax')
    ])
    model_diff = K.Sequential([
        layers.InputLayer(input_shape=(block_size - 1, 136)),
        layers.Dropout(0.25),
        #layers.Bidirectional(layers.GRU(RNN_UNIT)),
        keras.layers.wrappers.Bidirectional(keras.layers.recurrent_v2.GRU(RNN_UNIT)),
        layers.Dropout(DROPOUT_RATE),
        layers.Dense(64, activation='relu'),
        layers.Dropout(DROPOUT_RATE),
        layers.Dense(2, activation='softmax')
    ])

    lossFunction = K.losses.SparseCategoricalCrossentropy(from_logits=False)
    optimizer = K.optimizers.Adam(learning_rate=0.001)
    model.compile(optimizer=optimizer,
                  loss=lossFunction,
                  metrics=['accuracy'])
    model_diff.compile(optimizer=optimizer,
                  loss=lossFunction,
                  metrics=['accuracy'])

#----Using Deeperforensics 1.0 Parameters----#
    model.load_weights('g1.h5')
    model_diff.load_weights('g2.h5')

    prediction = model.predict(test_samples)
    prediction_diff = model_diff.predict(test_samples_diff)
    mix_predict = []
    for i in range(len(prediction)):
        mix = prediction[i][1] + prediction_diff[i][1]
        mix_predict.append(mix/2)

    prediction_video = merge_video_prediction(mix_predict, test_sv, test_vc)

    video_names = []
    for key in test_vc.keys():
        video_names.append(key)
    for i, pd in enumerate(prediction_video):
        if pd >= 0.5:
            label = "Fake"
        else:
            label = "Real"
    return label

inputs = gr.inputs.Video()
outputs = gr.outputs.Textbox()
iface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs,
                     examples=[["sample_fake.mp4"],["sample_real.mp4"]])
iface.launch()