Spaces:
Running
Running
Upload 4 files
Browse files
pretrained_models/.gitkeep
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# placeholder
|
pretrained_models/big-lama/config.yaml
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
run_title: b18_ffc075_batch8x15
|
2 |
+
training_model:
|
3 |
+
kind: default
|
4 |
+
visualize_each_iters: 1000
|
5 |
+
concat_mask: true
|
6 |
+
store_discr_outputs_for_vis: true
|
7 |
+
losses:
|
8 |
+
l1:
|
9 |
+
weight_missing: 0
|
10 |
+
weight_known: 10
|
11 |
+
perceptual:
|
12 |
+
weight: 0
|
13 |
+
adversarial:
|
14 |
+
kind: r1
|
15 |
+
weight: 10
|
16 |
+
gp_coef: 0.001
|
17 |
+
mask_as_fake_target: true
|
18 |
+
allow_scale_mask: true
|
19 |
+
feature_matching:
|
20 |
+
weight: 100
|
21 |
+
resnet_pl:
|
22 |
+
weight: 30
|
23 |
+
weights_path: ${env:TORCH_HOME}
|
24 |
+
|
25 |
+
optimizers:
|
26 |
+
generator:
|
27 |
+
kind: adam
|
28 |
+
lr: 0.001
|
29 |
+
discriminator:
|
30 |
+
kind: adam
|
31 |
+
lr: 0.0001
|
32 |
+
visualizer:
|
33 |
+
key_order:
|
34 |
+
- image
|
35 |
+
- predicted_image
|
36 |
+
- discr_output_fake
|
37 |
+
- discr_output_real
|
38 |
+
- inpainted
|
39 |
+
rescale_keys:
|
40 |
+
- discr_output_fake
|
41 |
+
- discr_output_real
|
42 |
+
kind: directory
|
43 |
+
outdir: /group-volume/User-Driven-Content-Generation/r.suvorov/inpainting/experiments/r.suvorov_2021-04-30_14-41-12_train_simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15/samples
|
44 |
+
location:
|
45 |
+
data_root_dir: /group-volume/User-Driven-Content-Generation/datasets/inpainting_data_root_large
|
46 |
+
out_root_dir: /group-volume/User-Driven-Content-Generation/${env:USER}/inpainting/experiments
|
47 |
+
tb_dir: /group-volume/User-Driven-Content-Generation/${env:USER}/inpainting/tb_logs
|
48 |
+
data:
|
49 |
+
batch_size: 15
|
50 |
+
val_batch_size: 2
|
51 |
+
num_workers: 3
|
52 |
+
train:
|
53 |
+
indir: ${location.data_root_dir}/train
|
54 |
+
out_size: 256
|
55 |
+
mask_gen_kwargs:
|
56 |
+
irregular_proba: 1
|
57 |
+
irregular_kwargs:
|
58 |
+
max_angle: 4
|
59 |
+
max_len: 200
|
60 |
+
max_width: 100
|
61 |
+
max_times: 5
|
62 |
+
min_times: 1
|
63 |
+
box_proba: 1
|
64 |
+
box_kwargs:
|
65 |
+
margin: 10
|
66 |
+
bbox_min_size: 30
|
67 |
+
bbox_max_size: 150
|
68 |
+
max_times: 3
|
69 |
+
min_times: 1
|
70 |
+
segm_proba: 0
|
71 |
+
segm_kwargs:
|
72 |
+
confidence_threshold: 0.5
|
73 |
+
max_object_area: 0.5
|
74 |
+
min_mask_area: 0.07
|
75 |
+
downsample_levels: 6
|
76 |
+
num_variants_per_mask: 1
|
77 |
+
rigidness_mode: 1
|
78 |
+
max_foreground_coverage: 0.3
|
79 |
+
max_foreground_intersection: 0.7
|
80 |
+
max_mask_intersection: 0.1
|
81 |
+
max_hidden_area: 0.1
|
82 |
+
max_scale_change: 0.25
|
83 |
+
horizontal_flip: true
|
84 |
+
max_vertical_shift: 0.2
|
85 |
+
position_shuffle: true
|
86 |
+
transform_variant: distortions
|
87 |
+
dataloader_kwargs:
|
88 |
+
batch_size: ${data.batch_size}
|
89 |
+
shuffle: true
|
90 |
+
num_workers: ${data.num_workers}
|
91 |
+
val:
|
92 |
+
indir: ${location.data_root_dir}/val
|
93 |
+
img_suffix: .png
|
94 |
+
dataloader_kwargs:
|
95 |
+
batch_size: ${data.val_batch_size}
|
96 |
+
shuffle: false
|
97 |
+
num_workers: ${data.num_workers}
|
98 |
+
visual_test:
|
99 |
+
indir: ${location.data_root_dir}/korean_test
|
100 |
+
img_suffix: _input.png
|
101 |
+
pad_out_to_modulo: 32
|
102 |
+
dataloader_kwargs:
|
103 |
+
batch_size: 1
|
104 |
+
shuffle: false
|
105 |
+
num_workers: ${data.num_workers}
|
106 |
+
generator:
|
107 |
+
kind: ffc_resnet
|
108 |
+
input_nc: 4
|
109 |
+
output_nc: 3
|
110 |
+
ngf: 64
|
111 |
+
n_downsampling: 3
|
112 |
+
n_blocks: 18
|
113 |
+
add_out_act: sigmoid
|
114 |
+
init_conv_kwargs:
|
115 |
+
ratio_gin: 0
|
116 |
+
ratio_gout: 0
|
117 |
+
enable_lfu: false
|
118 |
+
downsample_conv_kwargs:
|
119 |
+
ratio_gin: ${generator.init_conv_kwargs.ratio_gout}
|
120 |
+
ratio_gout: ${generator.downsample_conv_kwargs.ratio_gin}
|
121 |
+
enable_lfu: false
|
122 |
+
resnet_conv_kwargs:
|
123 |
+
ratio_gin: 0.75
|
124 |
+
ratio_gout: ${generator.resnet_conv_kwargs.ratio_gin}
|
125 |
+
enable_lfu: false
|
126 |
+
discriminator:
|
127 |
+
kind: pix2pixhd_nlayer
|
128 |
+
input_nc: 3
|
129 |
+
ndf: 64
|
130 |
+
n_layers: 4
|
131 |
+
evaluator:
|
132 |
+
kind: default
|
133 |
+
inpainted_key: inpainted
|
134 |
+
integral_kind: ssim_fid100_f1
|
135 |
+
trainer:
|
136 |
+
kwargs:
|
137 |
+
gpus: -1
|
138 |
+
accelerator: ddp
|
139 |
+
max_epochs: 200
|
140 |
+
gradient_clip_val: 1
|
141 |
+
log_gpu_memory: None
|
142 |
+
limit_train_batches: 25000
|
143 |
+
val_check_interval: ${trainer.kwargs.limit_train_batches}
|
144 |
+
log_every_n_steps: 1000
|
145 |
+
precision: 32
|
146 |
+
terminate_on_nan: false
|
147 |
+
check_val_every_n_epoch: 1
|
148 |
+
num_sanity_val_steps: 8
|
149 |
+
limit_val_batches: 1000
|
150 |
+
replace_sampler_ddp: false
|
151 |
+
checkpoint_kwargs:
|
152 |
+
verbose: true
|
153 |
+
save_top_k: 5
|
154 |
+
save_last: true
|
155 |
+
period: 1
|
156 |
+
monitor: val_ssim_fid100_f1_total_mean
|
157 |
+
mode: max
|
pretrained_models/big-lama/models/best.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fccb7adffd53ec0974ee5503c3731c2c2f1e7e07856fd9228cdcc0b46fd5d423
|
3 |
+
size 410046389
|
pretrained_models/sam_vit_h_4b8939.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a7bf3b02f3ebf1267aba913ff637d9a2d5c33d3173bb679e46d9f338c26f262e
|
3 |
+
size 2564550879
|