resume: false device: cuda use_amp: false seed: 1000 dataset_repo_id: kywch/mimicgen_stack_d1 video_backend: pyav training: offline_steps: 100000 num_workers: 4 batch_size: 8 eval_freq: 10000 log_freq: 200 save_checkpoint: true save_freq: 20000 online_steps: 0 online_rollout_n_episodes: 1 online_rollout_batch_size: 1 online_steps_between_rollouts: 1 online_sampling_ratio: 0.5 online_env_seed: null online_buffer_capacity: null online_buffer_seed_size: 0 do_online_rollout_async: false image_transforms: enable: false max_num_transforms: 3 random_order: false brightness: weight: 1 min_max: - 0.8 - 1.2 contrast: weight: 1 min_max: - 0.8 - 1.2 saturation: weight: 1 min_max: - 0.5 - 1.5 hue: weight: 1 min_max: - -0.05 - 0.05 sharpness: weight: 1 min_max: - 0.8 - 1.2 lr: 1.0e-05 lr_backbone: 1.0e-05 weight_decay: 0.0001 grad_clip_norm: 10 delta_timestamps: action: - 0.0 - 0.05 - 0.1 - 0.15 - 0.2 - 0.25 - 0.3 - 0.35 - 0.4 - 0.45 - 0.5 - 0.55 - 0.6 - 0.65 - 0.7 - 0.75 - 0.8 - 0.85 - 0.9 - 0.95 eval: n_episodes: 20 batch_size: 20 use_async_envs: true wandb: enable: true disable_artifact: true project: lerobot notes: '' fps: 20 env: name: mimicgen task: Stack_D1 state_dim: 9 action_dim: 7 episode_length: 250 meta: stack_d1_env.json image_keys: - agentview - robot0_eye_in_hand state_keys: - robot0_eef_pos - robot0_eef_quat - robot0_gripper_qpos use_delta_action: false use_highres_image_obs: true override_dataset_stats: observation.images.agentview: mean: - - - 0.485 - - - 0.456 - - - 0.406 std: - - - 0.229 - - - 0.224 - - - 0.225 observation.images.robot0_eye_in_hand: mean: - - - 0.485 - - - 0.456 - - - 0.406 std: - - - 0.229 - - - 0.224 - - - 0.225 policy: name: act n_obs_steps: 1 chunk_size: 20 n_action_steps: 20 input_shapes: observation.images.agentview: - 3 - 256 - 256 observation.images.robot0_eye_in_hand: - 3 - 256 - 256 observation.state: - ${env.state_dim} output_shapes: action: - ${env.action_dim} input_normalization_modes: observation.images.agentview: mean_std observation.images.robot0_eye_in_hand: mean_std observation.state: mean_std output_normalization_modes: action: mean_std vision_backbone: resnet18 pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1 replace_final_stride_with_dilation: false pre_norm: false dim_model: 512 n_heads: 8 dim_feedforward: 3200 feedforward_activation: relu n_encoder_layers: 4 n_decoder_layers: 1 use_vae: true latent_dim: 32 n_vae_encoder_layers: 4 temporal_ensemble_momentum: null dropout: 0.1 kl_weight: 10.0