浏览代码

numpy fix, config 3dball

/develop/bisim-sac-transfer
yanchaosun 4 年前
当前提交
c48b6429
共有 5 个文件被更改,包括 39 次插入26 次删除
  1. 8
      config/sac_transfer/3DBall.yaml
  2. 8
      config/sac_transfer/3DBallHard.yaml
  3. 14
      config/sac_transfer/3DBallHardTransfer.yaml
  4. 2
      ml-agents-envs/setup.py
  5. 33
      ml-agents/mlagents/trainers/sac_transfer/optimizer.py

8
config/sac_transfer/3DBall.yaml


save_replay_buffer: false
init_entcoef: 0.5
reward_signal_steps_per_update: 10.0
encoder_layers: 2
encoder_layers: 1
forward_layers: 0
value_layers: 0
feature_size: 64
forward_layers: 1
value_layers: 1
feature_size: 16
# separate_value_net: true
separate_policy_train: true
# separate_value_train: true

8
config/sac_transfer/3DBallHard.yaml


save_replay_buffer: false
init_entcoef: 1.0
reward_signal_steps_per_update: 10.0
encoder_layers: 2
encoder_layers: 1
forward_layers: 0
value_layers: 0
feature_size: 64
forward_layers: 1
value_layers: 1
feature_size: 16
# separate_value_net: true
separate_policy_train: true
# separate_value_train: true

14
config/sac_transfer/3DBallHardTransfer.yaml


init_entcoef: 1.0
reward_signal_steps_per_update: 10.0
encoder_layers: 1
policy_layers: 0
policy_layers: 1
value_layers: 2
feature_size: 32
separate_value_net: true
# separate_policy_train: true
value_layers: 1
feature_size: 16
# separate_value_net: true
separate_policy_train: true
reuse_encoder: false
reuse_encoder: true
in_epoch_alter: false
in_batch_alter: false
use_op_buffer: false

use_transfer: true
load_model: true
train_model: false
transfer_path: "results/sac_model_ball_ml1/3DBall"
transfer_path: "results/ball/3DBall"
network_settings:
normalize: true
hidden_units: 64

2
ml-agents-envs/setup.py


install_requires=[
"cloudpickle",
"grpcio>=1.11.0",
"numpy>=1.14.1,<2.0",
"numpy>=1.14.1,<1.19.0",
"Pillow>=4.2.1",
"protobuf>=3.6",
"pyyaml>=3.1.0",

33
ml-agents/mlagents/trainers/sac_transfer/optimizer.py


hyperparameters.load_action,
)
self.policy.run_hard_copy()
self.num_updates = 0
print("All variables in the graph:")
for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):

self.update_dict = {
"value_loss": self.total_value_loss,
"policy_loss": self.policy_loss,
# "model_loss": self.model_loss,
# "model_learning_rate": self.model_learning_rate,
# "reward_loss": self.policy.reward_loss,
"q1_loss": self.q1_loss,
"q2_loss": self.q2_loss,
"entropy_coef": self.ent_coef,

"learning_rate": self.learning_rate,
}
if self.use_transfer:
self.update_dict.update({
"model_loss": self.model_loss,
"model_learning_rate": self.model_learning_rate,
"reward_loss": self.policy.reward_loss,
})
def _create_inputs_and_outputs(self) -> None:
"""

self.update_batch_policy = policy_optimizer.minimize(
self.policy_loss, var_list=policy_vars
)
print("value trainable:", critic_vars)
# print("value trainable:", critic_vars)
if self.use_transfer:
value_loss = self.total_value_loss + self.model_loss
else:
value_loss = self.total_value_loss
self.total_value_loss, var_list=critic_vars
value_loss, var_list=critic_vars
)
# Add entropy coefficient optimization operation
with tf.control_dependencies([self.update_batch_value]):

stats_needed = self.stats_name_to_update_name
update_stats: Dict[str, float] = {}
# update_vals = self._execute_model(feed_dict, self.update_dict)
update_vals = self._execute_model(feed_dict, self.model_update_dict)
update_vals.update(self._execute_model(feed_dict, self.update_dict))
if self.use_transfer:
update_vals = self._execute_model(feed_dict, self.update_dict)
else:
update_vals = self._execute_model(feed_dict, self.model_update_dict)
update_vals.update(self._execute_model(feed_dict, self.update_dict))
update_stats[stat_name] = update_vals[update_name]
if update_name in update_vals.keys():
update_stats[stat_name] = update_vals[update_name]
if self.use_bisim:
bisim_stats = self.update_encoder(batch, batch_bisim)

self.sess.run(self.target_update_op)
self.policy.run_soft_copy()
self.num_updates += 1
return update_stats

正在加载...
取消
保存