dubm commited on
Commit
b88adff
·
verified ·
1 Parent(s): 5908512

Upload tall.yaml

Browse files
Files changed (1) hide show
  1. tall.yaml +89 -0
tall.yaml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # model setting
2
+ pretrained: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth # path to a pre-trained model, if using one
3
+ model_name: tall # model name
4
+
5
+ mask_grid_size: 16
6
+ num_classes: 2
7
+ embed_dim: 128
8
+ mlp_ratio: 4.0
9
+ patch_size: 4
10
+ window_size: [14, 14, 14, 7]
11
+ depths: [2, 2, 18, 2]
12
+ num_heads: [4, 8, 16, 32]
13
+ ape: true # use absolution position embedding
14
+ thumbnail_rows: 2
15
+ drop_rate: 0
16
+ drop_path_rate: 0.1
17
+
18
+ # dataset
19
+ all_dataset: [FaceForensics++, FF-F2F, FF-DF, FF-FS, FF-NT, FaceShifter, DeepFakeDetection, Celeb-DF-v1, Celeb-DF-v2, DFDCP, DFDC, DeeperForensics-1.0, UADFV]
20
+ train_dataset: [FaceForensics++]
21
+ test_dataset: [Celeb-DF-v2]
22
+
23
+ compression: c23 # compression-level for videos
24
+ train_batchSize: 64 # training batch size
25
+ test_batchSize: 64 # test batch size
26
+ workers: 4 # number of data loading workers
27
+ frame_num: {'train': 32, 'test': 32} # number of frames to use per video in training and testing
28
+ resolution: 224 # resolution of output image to network
29
+ with_mask: false # whether to include mask information in the input
30
+ with_landmark: false # whether to include facial landmark information in the input
31
+ video_mode: True # whether to use video-level data
32
+ clip_size: 4 # number of frames in each clip, should be square number of an integer
33
+ dataset_type: tall
34
+
35
+ # data augmentation
36
+ use_data_augmentation: false # Add this flag to enable/disable data augmentation
37
+ data_aug:
38
+ flip_prob: 0.5
39
+ rotate_prob: 0.5
40
+ rotate_limit: [-10, 10]
41
+ blur_prob: 0.5
42
+ blur_limit: [3, 7]
43
+ brightness_prob: 0.5
44
+ brightness_limit: [-0.1, 0.1]
45
+ contrast_limit: [-0.1, 0.1]
46
+ quality_lower: 40
47
+ quality_upper: 100
48
+
49
+ # mean and std for normalization
50
+ mean: [0.485, 0.456, 0.406]
51
+ std: [0.229, 0.224, 0.225]
52
+
53
+ # optimizer config
54
+ optimizer:
55
+ # choose between 'adam' and 'sgd'
56
+ type: adam
57
+ adam:
58
+ lr: 0.00002 # learning rate
59
+ beta1: 0.9 # beta1 for Adam optimizer
60
+ beta2: 0.999 # beta2 for Adam optimizer
61
+ eps: 0.00000001 # epsilon for Adam optimizer
62
+ weight_decay: 0.0005 # weight decay for regularization
63
+ amsgrad: false
64
+ sgd:
65
+ lr: 0.0002 # learning rate
66
+ momentum: 0.9 # momentum for SGD optimizer
67
+ weight_decay: 0.0005 # weight decay for regularization
68
+
69
+ # training config
70
+ lr_scheduler: null # learning rate scheduler
71
+ nEpochs: 100 # number of epochs to train for
72
+ start_epoch: 0 # manual epoch number (useful for restarts)
73
+ save_epoch: 1 # interval epochs for saving models
74
+ rec_iter: 100 # interval iterations for recording
75
+ logdir: ./logs # folder to output images and logs
76
+ manualSeed: 1024 # manual seed for random number generation
77
+ save_ckpt: true # whether to save checkpoint
78
+ save_feat: true # whether to save features
79
+
80
+ # loss function
81
+ loss_func: cross_entropy # loss function to use
82
+ losstype: null
83
+
84
+ # metric
85
+ metric_scoring: auc # metric for evaluation (auc, acc, eer, ap)
86
+
87
+ # cuda
88
+ cuda: true # whether to use CUDA acceleration
89
+ cudnn: true # whether to use CuDNN for convolution operations