trainer_type: sac_transfer
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: constant
learning_rate_schedule: linear
model_schedule: constant
batch_size: 128
buffer_size: 500000
save_replay_buffer: false
init_entcoef: 1.0
reward_signal_steps_per_update: 20.0
encoder_layers: 1
encoder_layers: 2
action_layers: 1
action_layers: 2
feature_size: 64
action_feature_size: 16
separate_policy_train: true
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 8000000
max_steps: 6000000
time_horizon: 1000
summary_freq: 60000
threaded: true
reuse_encoder: true
reuse_encoder: false
in_epoch_alter: false
in_batch_alter: true
use_op_buffer: false
train_model: false
load_action: true
train_action: false
transfer_path: "results/sacmod_reacher_f64/Reacher"
transfer_path: "results/reacher/Reacher"
network_settings:
normalize: true
hidden_units: 128