model: target: visconet.visconet.ViscoNetLDM params: linear_start: 0.00085 linear_end: 0.0120 num_timesteps_cond: 1 log_every_t: 200 timesteps: 1000 first_stage_key: "jpg" cond_stage_key: "txt" control_key: "hint" control_crossattn_key: "styles" mask_key: "human_mask" image_size: 64 channels: 4 cond_stage_trainable: false conditioning_key: crossattn monitor: val/loss_simple_ema scale_factor: 0.18215 use_ema: False only_mid_control: False scheduler_config: target: torch.optim.lr_scheduler.ReduceLROnPlateau monitor: val/loss_simple_ema params: mode: min factor: 0.5 patience: 3 cooldown: 0 min_lr: 0.00001 threshold: 0.001 verbose: True control_cond_config: target: visconet.modules.ProjectLocalStyle params: pool_size: 9 local_emb_size: 257 bias: True #target: visconet.modules.ClipImageEncoder control_stage_config: target: cldm.cldm.ControlNet params: use_checkpoint: True image_size: 32 # unused in_channels: 4 hint_channels: 3 model_channels: 320 attention_resolutions: [ 4, 2, 1 ] num_res_blocks: 2 channel_mult: [ 1, 2, 4, 4 ] num_head_channels: 64 # need to fix for flash-attn use_spatial_transformer: True use_linear_in_transformer: True transformer_depth: 1 context_dim: 1024 legacy: False unet_config: target: cldm.cldm.ControlledUnetModel params: use_checkpoint: True image_size: 32 # unused in_channels: 4 out_channels: 4 model_channels: 320 attention_resolutions: [ 4, 2, 1 ] num_res_blocks: 2 channel_mult: [ 1, 2, 4, 4 ] num_head_channels: 64 # need to fix for flash-attn use_spatial_transformer: True use_linear_in_transformer: True transformer_depth: 1 context_dim: 1024 legacy: False first_stage_config: target: ldm.models.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss ddconfig: #attn_type: "vanilla-xformers" double_z: true z_channels: 4 resolution: 256 in_channels: 3 out_ch: 3 ch: 128 ch_mult: - 1 - 2 - 4 - 4 num_res_blocks: 2 attn_resolutions: [] dropout: 0.0 lossconfig: target: torch.nn.Identity cond_stage_config: target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder params: freeze: True layer: "penultimate" style_embedding_config: target: scripts.image_emb_hidden.ClipImageEncoder dataset: train: target: visconet.deepfashion.DeepFashionDataset params: image_root: "/home/soon/datasets/deepfashion_inshop" image_dir: img_512_padded pose_dir: openpose_hand_default_512 style_dir: styles_default style_postfix: _hidden mask_dir: smpl_256 data_files: - data/deepfashion/pairs-train-all.csv - data/deepfashion/solo-train-all.csv map_file: data/deepfashion/deepfashion_map.csv style_emb_shape: - 257 - 1024 crop_shape: - 512 - 384 style_names: - face - hair - headwear - top - outer - bottom - shoes - accesories val: target: visconet.deepfashion.DeepFashionDataset params: image_root: "/home/soon/datasets/deepfashion_inshop" image_dir: img_512_padded pose_dir: openpose_hand_default_512 style_dir: styles_default style_postfix: _hidden mask_dir: smpl_256 data_files: - data/deepfashion/pairs-test-all.csv map_file: data/deepfashion/deepfashion_map.csv sample_ratio: 1.0 style_emb_shape: - 257 - 1024 crop_shape: - 512 - 384 style_names: - face - hair - headwear - top - outer - bottom - shoes - accesories