behaviors: BigWallJump: trainer_type: sac hyperparameters: learning_rate: 0.0003 learning_rate_schedule: constant batch_size: 128 buffer_size: 200000 buffer_init_steps: 0 tau: 0.005 steps_per_update: 20.0 save_replay_buffer: false init_entcoef: 0.1 reward_signal_steps_per_update: 10.0 network_settings: normalize: false hidden_units: 256 num_layers: 2 vis_encode_type: simple reward_signals: extrinsic: gamma: 0.99 strength: 1.0 keep_checkpoints: 5 max_steps: 15000000 time_horizon: 128 summary_freq: 20000 SmallWallJump: trainer_type: sac hyperparameters: learning_rate: 0.0003 learning_rate_schedule: constant batch_size: 128 buffer_size: 50000 buffer_init_steps: 0 tau: 0.005 steps_per_update: 20.0 save_replay_buffer: false init_entcoef: 0.1 reward_signal_steps_per_update: 10.0 network_settings: normalize: false hidden_units: 256 num_layers: 2 vis_encode_type: simple reward_signals: extrinsic: gamma: 0.99 strength: 1.0 keep_checkpoints: 5 max_steps: 5000000 time_horizon: 128 summary_freq: 20000