Reacher:
trainer_type: ppo
hyperparameters:
batch_size: 2024
buffer_size: 20240
batch_size: 512
buffer_size: 20480
beta: 0.005
beta: 0.001
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 128
buffer_size: 500000
buffer_init_steps: 10000
buffer_size: 2000000
buffer_init_steps: 1000
tau: 0.01
steps_per_update: 10.0
save_replay_buffer: false
normalize: false
hidden_units: 256
num_layers: 2
hidden_units: 512
num_layers: 3
gamma: 0.99
gamma: 0.995
strength: 0.02
strength: 0.01
encoding_size: 128
use_actions: true
max_steps: 10000000
max_steps: 3000000
time_horizon: 128
summary_freq: 30000
threaded: true
batch_size: 256
batch_size: 1024
buffer_init_steps: 0
tau: 0.005
steps_per_update: 30.0
network_settings:
normalize: true
num_layers: 4
vis_encode_type: simple
reward_signals:
extrinsic:
max_steps: 20000000
max_steps: 15000000
time_horizon: 1000