behaviors: Reacher: trainer_type: sac_transfer hyperparameters: learning_rate: 0.0003 learning_rate_schedule: constant model_schedule: constant batch_size: 256 buffer_size: 6000000 buffer_init_steps: 0 tau: 0.005 steps_per_update: 20.0 save_replay_buffer: false init_entcoef: 1.0 reward_signal_steps_per_update: 20.0 encoder_layers: 1 policy_layers: 2 forward_layers: 0 value_layers: 2 action_layers: 1 feature_size: 64 action_feature_size: 16 separate_policy_train: true separate_policy_net: true reuse_encoder: false in_epoch_alter: false in_batch_alter: true use_op_buffer: false use_var_predict: true with_prior: false predict_return: true use_bisim: false use_transfer: true load_model: true train_model: false load_action: true train_action: false transfer_path: "results/reacher-qr-s3/Reacher" network_settings: normalize: true hidden_units: 128 num_layers: 2 vis_encode_type: simple reward_signals: extrinsic: gamma: 0.99 strength: 1.0 keep_checkpoints: 5 max_steps: 6000000 time_horizon: 1000 summary_freq: 60000 threaded: true