浏览代码

reacher: stack observation. with the original reward function

/develop/bisim-sac-transfer
yanchaosun 4 年前
当前提交
85549b2b
共有 5 个文件被更改,包括 78 次插入23 次删除
  1. 18
      Project/Assets/ML-Agents/Examples/Reacher/Scripts/NewReacherAgent.cs
  2. 18
      Project/Assets/ML-Agents/Examples/Reacher/Scripts/ReacherAgent.cs
  3. 4
      config/sac_transfer/Reacher.yaml
  4. 6
      config/sac_transfer/ReacherTransfer.yaml
  5. 55
      config/sac_transfer/3DBall1fTransfer.yaml

18
Project/Assets/ML-Agents/Examples/Reacher/Scripts/NewReacherAgent.cs


torqueZ = Mathf.Clamp(vectorAction[3], -1f, 1f) * 150f;
m_RbB.AddTorque(new Vector3(torqueX, 0f, torqueZ));
AddReward( - 0.001f * (vectorAction[0] * vectorAction[0]
+ vectorAction[1] * vectorAction[1]
+ vectorAction[2] * vectorAction[2]
+ vectorAction[3] * vectorAction[3]
));
// AddReward( - 0.001f * (vectorAction[0] * vectorAction[0]
// + vectorAction[1] * vectorAction[1]
// + vectorAction[2] * vectorAction[2]
// + vectorAction[3] * vectorAction[3]
// ));
}
/// <summary>

{
if ((goal.transform.position - hand.transform.position).magnitude > 3.5f)
{
AddReward(-0.002f);
}
// if ((goal.transform.position - hand.transform.position).magnitude > 3.5f)
// {
// AddReward(-0.002f);
// }
// AddReward( - 0.001f * (goal.transform.position - hand.transform.position).magnitude);
// Debug.Log((goal.transform.position - hand.transform.position).magnitude);
var radians = m_GoalDegree * Mathf.PI / 180f;

18
Project/Assets/ML-Agents/Examples/Reacher/Scripts/ReacherAgent.cs


torqueZ = Mathf.Clamp(vectorAction[3], -1f, 1f) * 150f;
m_RbB.AddTorque(new Vector3(torqueX, 0f, torqueZ));
AddReward( - 0.001f * (vectorAction[0] * vectorAction[0]
+ vectorAction[1] * vectorAction[1]
+ vectorAction[2] * vectorAction[2]
+ vectorAction[3] * vectorAction[3]
));
// AddReward( - 0.001f * (vectorAction[0] * vectorAction[0]
// + vectorAction[1] * vectorAction[1]
// + vectorAction[2] * vectorAction[2]
// + vectorAction[3] * vectorAction[3]
// ));
}
/// <summary>

{
if ((goal.transform.position - hand.transform.position).magnitude > 3.5f)
{
AddReward(-0.002f);
}
// if ((goal.transform.position - hand.transform.position).magnitude > 3.5f)
// {
// AddReward(-0.002f);
// }
// AddReward( - 0.001f * (goal.transform.position - hand.transform.position).magnitude);
// Debug.Log((goal.transform.position - hand.transform.position).magnitude);
var radians = m_GoalDegree * Mathf.PI / 180f;

4
config/sac_transfer/Reacher.yaml


learning_rate_schedule: linear
model_schedule: constant
batch_size: 256
buffer_size: 6000000
buffer_size: 10000000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 20.0

gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 6000000
max_steps: 10000000
time_horizon: 1000
summary_freq: 60000
threaded: true

6
config/sac_transfer/ReacherTransfer.yaml


Reacher:
trainer_type: sac_transfer
hyperparameters:
learning_rate: 0.0006
learning_rate_schedule: constant
learning_rate: 0.0003
learning_rate_schedule: linear
model_schedule: constant
batch_size: 256
buffer_size: 6000000

train_model: false
load_action: true
train_action: false
transfer_path: "results/reacher/Reacher"
transfer_path: "results/reacher-ori-sta/Reacher"
network_settings:
normalize: true
hidden_units: 128

55
config/sac_transfer/3DBall1fTransfer.yaml


behaviors:
3DBall:
trainer_type: sac_transfer
hyperparameters:
learning_rate: 0.0003
learning_rate_schedule: linear
model_schedule: constant
batch_size: 64
buffer_size: 500000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 10.0
save_replay_buffer: false
init_entcoef: 0.5
reward_signal_steps_per_update: 10.0
encoder_layers: 2
policy_layers: 0
forward_layers: 0
value_layers: 1
action_layers: 1
feature_size: 64
action_feature_size: 32
# separate_value_net: true
separate_policy_train: true
# separate_value_train: true
separate_model_train: true
reuse_encoder: true
in_epoch_alter: false
in_batch_alter: true
use_op_buffer: false
use_var_predict: true
with_prior: false
predict_return: true
use_bisim: false
use_transfer: true
load_model: false
load_encoder: true
train_encoder: false
load_action: true
train_action: false
transfer_path: "results/ball_1f/3DBall"
network_settings:
normalize: true
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 500000
time_horizon: 1000
summary_freq: 12000
threaded: true
正在加载...
取消
保存