}
else
{
SetReward(0.1f);
// SetReward(0.1f);
SetReward(0.1f - (0.05f * actionZ * actionZ + 0.05f * actionX * actionX));
m_ObjectHideFlags: 0
serializedVersion: 2
m_Scenes:
- enabled: 0
- enabled: 1
path: Assets/ML-Agents/Examples/3DBall/Scenes/3DBall.unity
guid: b9ac0cbf961bf4dacbfa0aa9c0d60aaa
m_configObjects: {}
learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 256
buffer_size: 50000
buffer_size: 500000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 10.0
network_settings:
normalize: true
hidden_units: 128
hidden_units: 64
num_layers: 2
vis_encode_type: simple
reward_signals:
separate_policy_train: true
# separate_value_train: true
separate_model_train: true
reuse_encoder: false
reuse_encoder: true
in_epoch_alter: false
in_batch_alter: true
use_op_buffer: false
# separate_value_net: true
in_batch_alter: false
use_transfer: true
load_model: true
train_model: false
transfer_path: "results/ball-targ-linear/3DBall"
transfer_path: "results/ball-linear/3DBall"
self.processed_vector_in,
vis_encode_type,
encoder_layers=encoder_layers,
scope="target_enc",
scope="encoding",
reuse=True
)
if separate_train: