model: base_learning_rate: 1.0e-06 target: ldm.models.diffusion.ddpm.LatentDiffusion params: reg_weight: 1.0 linear_start: 0.00085 linear_end: 0.012 num_timesteps_cond: 1 log_every_t: 200 timesteps: 1000 first_stage_key: image cond_stage_key: caption image_size: 64 channels: 4 cond_stage_trainable: true conditioning_key: crossattn monitor: val/loss_simple_ema scale_factor: 0.18215 use_ema: false embedding_reg_weight: 0.0 unfreeze_model: true model_lr: 1.0e-06 personalization_config: target: ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: - '*' initializer_words: - sculpture per_image_tokens: false num_vectors_per_token: 1 progressive_words: false unet_config: target: ldm.modules.diffusionmodules.openaimodel.UNetModel params: image_size: 32 in_channels: 4 out_channels: 4 model_channels: 320 attention_resolutions: - 4 - 2 - 1 num_res_blocks: 2 channel_mult: - 1 - 2 - 4 - 4 num_heads: 8 use_spatial_transformer: true transformer_depth: 1 context_dim: 768 use_checkpoint: true legacy: false first_stage_config: target: ldm.models.autoencoder.AutoencoderKL params: embed_dim: 4 monitor: val/rec_loss ddconfig: double_z: true z_channels: 4 resolution: 512 in_channels: 3 out_ch: 3 ch: 128 ch_mult: - 1 - 2 - 4 - 4 num_res_blocks: 2 attn_resolutions: [] dropout: 0.0 lossconfig: target: torch.nn.Identity cond_stage_config: target: ldm.modules.encoders.modules.FrozenCLIPEmbedder ckpt_path: stable-diffusion-v-1-4-original/sd-v1-4-full-ema.ckpt data: target: main.DataModuleFromConfig params: batch_size: 1 num_workers: 2 wrap: false train: target: ldm.data.personalized.PersonalizedBase params: size: 512 set: train per_image_tokens: false repeats: 100 special_token: ldj placeholder_token: can reg: target: ldm.data.personalized.PersonalizedBase params: size: 512 set: train reg: true per_image_tokens: false repeats: 10 placeholder_token: can validation: target: ldm.data.personalized.PersonalizedBase params: size: 512 set: val per_image_tokens: false repeats: 10 placeholder_token: can