behaviors: MultiDirWalker: trainer_type: ppo hyperparameters: batch_size: 2048 buffer_size: 20480 learning_rate: 0.0003 beta: 0.005 epsilon: 0.2 lambd: 0.95 num_epoch: 3 learning_rate_schedule: linear network_settings: normalize: true hidden_units: 256 num_layers: 2 vis_encode_type: simple reward_signals: extrinsic: gamma: 0.995 strength: 1.0 keep_checkpoints: 5 max_steps: 30000000 time_horizon: 1000 summary_freq: 30000 threaded: true