浏览代码

basic implementation

/develop/bisim-review
yanchaosun 4 年前
当前提交
c2d6f5c0
共有 9 个文件被更改,包括 850 次插入99 次删除
  1. 4
      Project/Assets/ML-Agents/Examples/3DBall/Scripts/Ball3DAgent.cs
  2. 8
      Project/ProjectSettings/EditorBuildSettings.asset
  3. 2
      Project/ProjectSettings/UnityConnectSettings.asset
  4. 2
      config/ppo/3DBallHard.yaml
  5. 2
      config/ppo_transfer/3DBall.yaml
  6. 417
      ml-agents/mlagents/trainers/ppo_transfer/optimizer.py
  7. 10
      ml-agents/mlagents/trainers/ppo_transfer/trainer.py
  8. 26
      config/ppo_transfer/3DBallHard.yaml
  9. 478
      ml-agents/mlagents/trainers/policy/transfer_policy.py

4
Project/Assets/ML-Agents/Examples/3DBall/Scripts/Ball3DAgent.cs


public override void OnActionReceived(float[] vectorAction)
{
var actionZ = 2f * Mathf.Clamp(vectorAction[0], -1f, 1f);
var actionX = 2f * Mathf.Clamp(vectorAction[1], -1f, 1f);
var actionZ = 1f * Mathf.Clamp(vectorAction[0], -1f, 1f);
var actionX = 1f * Mathf.Clamp(vectorAction[1], -1f, 1f);
if ((gameObject.transform.rotation.z < 0.25f && actionZ > 0f) ||
(gameObject.transform.rotation.z > -0.25f && actionZ < 0f))

8
Project/ProjectSettings/EditorBuildSettings.asset


EditorBuildSettings:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Scenes: []
m_Scenes:
- enabled: 0
path: Assets/ML-Agents/Examples/3DBall/Scenes/3DBallHard.unity
guid: 35c41099ceec44889bdbe95ed86c97ac
- enabled: 1
path: Assets/ML-Agents/Examples/3DBall/Scenes/3DBall.unity
guid: b9ac0cbf961bf4dacbfa0aa9c0d60aaa
m_configObjects: {}

2
Project/ProjectSettings/UnityConnectSettings.asset


UnityConnectSettings:
m_ObjectHideFlags: 0
serializedVersion: 1
m_Enabled: 1
m_Enabled: 0
m_TestMode: 0
m_EventOldUrl: https://api.uca.cloud.unity3d.com/v1/events
m_EventUrl: https://cdp.cloud.unity3d.com/v1/events

2
config/ppo/3DBallHard.yaml


gamma: 0.995
strength: 1.0
keep_checkpoints: 5
max_steps: 5000000
max_steps: 4000000
time_horizon: 1000
summary_freq: 12000
threaded: true

2
config/ppo_transfer/3DBall.yaml


network_settings:
normalize: true
hidden_units: 128
num_layers: 2
num_layers: 1
vis_encode_type: simple
reward_signals:
extrinsic:

417
ml-agents/mlagents/trainers/ppo_transfer/optimizer.py


from mlagents_envs.timers import timed
from mlagents.trainers.models import ModelUtils, EncoderType
from mlagents.trainers.policy.tf_policy import TFPolicy
from mlagents.trainers.policy.transfer_policy import TransferPolicy
import tf_slim as slim
def __init__(self, policy: TFPolicy, trainer_params: TrainerSettings):
def __init__(self, policy: TransferPolicy, trainer_params: TrainerSettings):
"""
Takes a Policy and a Dict of trainer parameters and creates an Optimizer around the policy.
The PPO optimizer has a value esåtimator and a loss function.

self.separate_value_train = False
self.ppo_update_dict: Dict[str, tf.Tensor] = {}
self.model_update_dict: Dict[str, tf.Tensor] = {}
self.use_alter = False
self.in_batch_alter = False
self.num_updates = 0
self.alter_every = 400
# Transfer
self.use_transfer = True
self.smart_transfer = False
self.conv_thres = 1e-6
self.old_loss = np.inf
self.update_mode = "model"
self.transfer_path = "results/BallSingle_nosep/3DBall"
self.transfer_type = "observation"
with tf.variable_scope("optimizer/"):
super().__init__(policy, trainer_params)
hyperparameters: PPOSettings = cast(
PPOSettings, trainer_params.hyperparameters
)
lr = float(hyperparameters.learning_rate)
self._schedule = hyperparameters.learning_rate_schedule
epsilon = float(hyperparameters.epsilon)
beta = float(hyperparameters.beta)
max_step = float(trainer_params.max_steps)
super().__init__(policy, trainer_params)
hyperparameters: PPOSettings = cast(
PPOSettings, trainer_params.hyperparameters
)
lr = float(hyperparameters.learning_rate)
self._schedule = hyperparameters.learning_rate_schedule
epsilon = float(hyperparameters.epsilon)
beta = float(hyperparameters.beta)
max_step = float(trainer_params.max_steps)
policy_network_settings = policy.network_settings
h_size = int(policy_network_settings.hidden_units)
num_layers = policy_network_settings.num_layers
vis_encode_type = policy_network_settings.vis_encode_type
self.burn_in_ratio = 0.0
policy_network_settings = policy.network_settings
h_size = int(policy_network_settings.hidden_units)
num_layers = policy_network_settings.num_layers
vis_encode_type = policy_network_settings.vis_encode_type
self.burn_in_ratio = 0.0
self.stream_names = list(self.reward_signals.keys())
self.stream_names = list(self.reward_signals.keys())
self.tf_optimizer: Optional[tf.train.AdamOptimizer] = None
self.grads = None
self.update_batch: Optional[tf.Operation] = None
self.tf_optimizer: Optional[tf.train.AdamOptimizer] = None
self.grads = None
self.update_batch: Optional[tf.Operation] = None
self.stats_name_to_update_name = {
"Losses/Value Loss": "value_loss",
"Losses/Policy Loss": "policy_loss",
"Policy/Learning Rate": "learning_rate",
"Policy/Epsilon": "decay_epsilon",
"Policy/Beta": "decay_beta",
}
if self.policy.use_recurrent:
self.m_size = self.policy.m_size
self.memory_in = tf.placeholder(
shape=[None, self.m_size],
dtype=tf.float32,
name="recurrent_value_in",
)
self.stats_name_to_update_name = {
"Losses/Value Loss": "value_loss",
"Losses/Policy Loss": "policy_loss",
"Losses/Model Loss": "model_loss",
"Policy/Learning Rate": "learning_rate",
"Policy/Epsilon": "decay_epsilon",
"Policy/Beta": "decay_beta",
}
if self.policy.use_recurrent:
self.m_size = self.policy.m_size
self.memory_in = tf.placeholder(
shape=[None, self.m_size],
dtype=tf.float32,
name="recurrent_value_in",
)
if num_layers < 1:
num_layers = 1
if num_layers < 1:
num_layers = 1
with tf.variable_scope("value"):
with tf.variable_scope("optimizer/"):
self.learning_rate = ModelUtils.create_schedule(
self._schedule,
lr,

self.old_log_probs,
self.value_heads,
self.policy.entropy,
self.policy.targ_encoder,
self.policy.predict,
beta,
epsilon,
lr,

self.update_dict.update(
{
"value_loss": self.value_loss,
"policy_loss": self.abs_policy_loss,
"update_batch": self.update_batch,
"learning_rate": self.learning_rate,
"decay_epsilon": self.decay_epsilon,
"decay_beta": self.decay_beta,
}
)
self.update_dict.update(
{
"value_loss": self.value_loss,
"policy_loss": self.abs_policy_loss,
"model_loss": self.model_loss,
"update_batch": self.update_batch,
"learning_rate": self.learning_rate,
"decay_epsilon": self.decay_epsilon,
"decay_beta": self.decay_beta,
}
)
if self.use_alter or self.smart_transfer or self.in_batch_alter:
self._init_alter_update()
if self.use_transfer:
self.policy.load_graph_partial(self.transfer_path, self.transfer_type)
slim.model_analyzer.analyze_vars(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), print_info=True)
print("All variables in the graph:")
for variable in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
print(variable.name)
# tf.summary.FileWriter(self.policy.model_path, self.sess.graph)
def _create_cc_critic(
self, h_size: int, num_layers: int, vis_encode_type: EncoderType
) -> None:

:param num_layers: Number of hidden linear layers.
:param vis_encode_type: The type of visual encoder to use.
"""
hidden_stream = ModelUtils.create_observation_streams(
self.policy.visual_in,
self.policy.processed_vector_in,
1,
if self.separate_value_train:
input_state = tf.stop_gradient(self.policy.encoder)
else:
input_state = self.policy.encoder
hidden_value = ModelUtils.create_vector_observation_encoder(
input_state,
ModelUtils.swish,
vis_encode_type,
)[0]
if self.policy.use_recurrent:
hidden_value, memory_value_out = ModelUtils.create_recurrent_encoder(
hidden_stream,
self.memory_in,
self.policy.sequence_length_ph,
name="lstm_value",
)
self.memory_out = memory_value_out
else:
hidden_value = hidden_stream
scope=f"main_graph",
reuse=False
)
self.value_heads, self.value = ModelUtils.create_value_heads(
self.stream_names, hidden_value
)

:param num_layers: Number of hidden linear layers.
:param vis_encode_type: The type of visual encoder to use.
"""
hidden_stream = ModelUtils.create_observation_streams(
self.policy.visual_in,
self.policy.processed_vector_in,
1,
if self.separate_value_train:
input_state = tf.stop_gradient(self.policy.encoder)
else:
input_state = self.policy.encoder
hidden_value = ModelUtils.create_vector_observation_encoder(
input_state,
ModelUtils.swish,
vis_encode_type,
)[0]
if self.policy.use_recurrent:
hidden_value, memory_value_out = ModelUtils.create_recurrent_encoder(
hidden_stream,
self.memory_in,
self.policy.sequence_length_ph,
name="lstm_value",
)
self.memory_out = memory_value_out
else:
hidden_value = hidden_stream
scope=f"main_graph",
reuse=False
)
self.value_heads, self.value = ModelUtils.create_value_heads(
self.stream_names, hidden_value
)

)
def _create_losses(
self, probs, old_probs, value_heads, entropy, beta, epsilon, lr, max_step
self, probs, old_probs, value_heads, entropy, targ_encoder, predict, beta, epsilon, lr, max_step
):
"""
Creates training-specific Tensorflow ops for PPO models.

# For cleaner stats reporting
self.abs_policy_loss = tf.abs(self.policy_loss)
# encoder and predict loss
self.dis_returns = tf.placeholder(
shape=[None], dtype=tf.float32, name="dis_returns"
)
target = tf.concat([targ_encoder, tf.expand_dims(self.dis_returns, -1)], axis=1)
self.model_loss = tf.reduce_mean(tf.squared_difference(predict, targ_encoder))
+ self.model_loss
+ 0.5 * self.value_loss
- self.decay_beta
* tf.reduce_mean(tf.dynamic_partition(entropy, self.policy.mask, 2)[1])
)
self.ppo_loss = (
self.policy_loss
+ 0.5 * self.value_loss
- self.decay_beta
* tf.reduce_mean(tf.dynamic_partition(entropy, self.policy.mask, 2)[1])

if self.use_transfer:
if self.transfer_type == "dynamics":
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
# train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "encoding")
# train_vars += tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "policy")
# train_vars += tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "value")
# train_vars += tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "policy/mu")
# train_vars += tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "policy/log_std")
# train_vars += tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "value/extrinsic_value")
elif self.transfer_type == "observation":
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "policy") \
+ tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "predict") \
+ tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "value")
else:
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.grads = self.tf_optimizer.compute_gradients(self.loss)
self.update_batch = self.tf_optimizer.minimize(self.loss)
self.grads = self.tf_optimizer.compute_gradients(self.loss, var_list=train_vars)
self.update_batch = self.tf_optimizer.minimize(self.loss, var_list=train_vars)
def _init_alter_update(self):
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
policy_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "encoding/latent")
model_train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "encoding")
self.ppo_optimizer = self.create_optimizer_op(self.learning_rate)
self.ppo_grads = self.ppo_optimizer.compute_gradients(self.ppo_loss, var_list=train_vars)
self.ppo_update_batch = self.ppo_optimizer.minimize(self.ppo_loss, var_list=train_vars)
self.model_optimizer = self.create_optimizer_op(self.learning_rate)
self.model_grads = self.model_optimizer.compute_gradients(self.model_loss, var_list=model_train_vars)
self.model_update_batch = self.model_optimizer.minimize(self.model_loss, var_list=model_train_vars)
self.ppo_update_dict.update(
{
"value_loss": self.value_loss,
"policy_loss": self.abs_policy_loss,
"model_loss": self.model_loss,
"update_batch": self.ppo_update_batch,
"learning_rate": self.learning_rate,
"decay_epsilon": self.decay_epsilon,
"decay_beta": self.decay_beta,
}
)
self.model_update_dict.update(
{
"value_loss": self.value_loss,
"policy_loss": self.abs_policy_loss,
"model_loss": self.model_loss,
"update_batch": self.model_update_batch,
"learning_rate": self.learning_rate,
"decay_epsilon": self.decay_epsilon,
"decay_beta": self.decay_beta,
}
)
@timed
def update(self, batch: AgentBuffer, num_sequences: int) -> Dict[str, float]:

reward_signal.prepare_update(self.policy, batch, num_sequences)
)
stats_needed.update(reward_signal.stats_name_to_update_name)
if self.use_alter:
if self.num_updates / self.alter_every == 0:
update_vals = self._execute_model(feed_dict, self.update_dict)
if self.num_updates % self.alter_every == 0:
print("start update all", self.num_updates)
elif (self.num_updates / self.alter_every) % 2 == 1:
update_vals = self._execute_model(feed_dict, self.model_update_dict)
if self.num_updates % self.alter_every == 0:
print("start update model", self.num_updates)
else: # (self.num_updates / self.alter_every) % 2 == 0:
update_vals = self._execute_model(feed_dict, self.ppo_update_dict)
if self.num_updates % self.alter_every == 0:
print("start update policy", self.num_updates)
self.num_updates += 1
elif self.in_batch_alter:
update_vals = self._execute_model(feed_dict, self.model_update_dict)
update_vals.update(self._execute_model(feed_dict, self.ppo_update_dict))
elif self.use_transfer and self.smart_transfer:
if self.update_mode == "model":
update_vals = self._execute_model(feed_dict, self.update_dict)
cur_loss = update_vals["model_loss"]
print("model loss:", cur_loss)
if abs(cur_loss - self.old_loss) < self.conv_thres:
self.update_mode = "policy"
print("start to train policy")
else:
self.old_loss = cur_loss
if self.update_mode == "policy":
update_vals = self._execute_model(feed_dict, self.ppo_update_dict)
else:
update_vals = self._execute_model(feed_dict, self.update_dict)
update_vals = self._execute_model(feed_dict, self.update_dict)
# update target encoder
self.policy.hard_copy_encoder()
for stat_name, update_name in stats_needed.items():
update_stats[stat_name] = update_vals[update_name]
return update_stats

self.policy.mask_input: mini_batch["masks"] * burn_in_mask,
self.advantage: mini_batch["advantages"],
self.all_old_log_probs: mini_batch["action_probs"],
self.policy.processed_vector_next: mini_batch["next_vector_in"],
self.policy.current_action: mini_batch["actions"],
self.dis_returns: mini_batch["discounted_returns"]
}
for name in self.reward_signals:
feed_dict[self.returns_holders[name]] = mini_batch[

self.m_size, mini_batch.num_experiences
)
return feed_dict
def _create_cc_critic_old(
self, h_size: int, num_layers: int, vis_encode_type: EncoderType
) -> None:
"""
Creates Continuous control critic (value) network.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
:param vis_encode_type: The type of visual encoder to use.
"""
hidden_stream = ModelUtils.create_observation_streams(
self.policy.visual_in,
self.policy.processed_vector_in,
1,
h_size,
num_layers,
vis_encode_type,
)[0]
if self.policy.use_recurrent:
hidden_value, memory_value_out = ModelUtils.create_recurrent_encoder(
hidden_stream,
self.memory_in,
self.policy.sequence_length_ph,
name="lstm_value",
)
self.memory_out = memory_value_out
else:
hidden_value = hidden_stream
self.value_heads, self.value = ModelUtils.create_value_heads(
self.stream_names, hidden_value
)
self.all_old_log_probs = tf.placeholder(
shape=[None, sum(self.policy.act_size)],
dtype=tf.float32,
name="old_probabilities",
)
self.old_log_probs = tf.reduce_sum(
(tf.identity(self.all_old_log_probs)), axis=1, keepdims=True
)
def _create_dc_critic_old(
self, h_size: int, num_layers: int, vis_encode_type: EncoderType
) -> None:
"""
Creates Discrete control critic (value) network.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
:param vis_encode_type: The type of visual encoder to use.
"""
hidden_stream = ModelUtils.create_observation_streams(
self.policy.visual_in,
self.policy.processed_vector_in,
1,
h_size,
num_layers,
vis_encode_type,
)[0]
if self.policy.use_recurrent:
hidden_value, memory_value_out = ModelUtils.create_recurrent_encoder(
hidden_stream,
self.memory_in,
self.policy.sequence_length_ph,
name="lstm_value",
)
self.memory_out = memory_value_out
else:
hidden_value = hidden_stream
self.value_heads, self.value = ModelUtils.create_value_heads(
self.stream_names, hidden_value
)
self.all_old_log_probs = tf.placeholder(
shape=[None, sum(self.policy.act_size)],
dtype=tf.float32,
name="old_probabilities",
)
# Break old log probs into separate branches
old_log_prob_branches = ModelUtils.break_into_branches(
self.all_old_log_probs, self.policy.act_size
)
_, _, old_normalized_logits = ModelUtils.create_discrete_action_masking_layer(
old_log_prob_branches, self.policy.action_masks, self.policy.act_size
)
action_idx = [0] + list(np.cumsum(self.policy.act_size))
self.old_log_probs = tf.reduce_sum(
(
tf.stack(
[
-tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self.policy.selected_actions[
:, action_idx[i] : action_idx[i + 1]
],
logits=old_normalized_logits[
:, action_idx[i] : action_idx[i + 1]
],
)
for i in range(len(self.policy.act_size))
],
axis=1,
)
),
axis=1,
keepdims=True,
)

10
ml-agents/mlagents/trainers/ppo_transfer/trainer.py


from mlagents_envs.logging_util import get_logger
from mlagents.trainers.policy.nn_policy import NNPolicy
from mlagents.trainers.policy.transfer_policy import TransferPolicy
from mlagents.trainers.trainer.rl_trainer import RLTrainer
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.policy.tf_policy import TFPolicy

)
self.load = load
self.seed = seed
self.policy: NNPolicy = None # type: ignore
self.policy: TransferPolicy = None # type: ignore
print("The current algorithm is PPO Transfer")
def _process_trajectory(self, trajectory: Trajectory) -> None:

global_returns = list(np.mean(np.array(tmp_returns, dtype=np.float32), axis=0))
agent_buffer_trajectory["advantages"].set(global_advantages)
agent_buffer_trajectory["discounted_returns"].set(global_returns)
# Append to update buffer
agent_buffer_trajectory.resequence_and_append(
self.update_buffer, training_length=self.policy.sequence_length

"""
buffer_length = self.update_buffer.num_experiences
self.cumulative_returns_since_policy_update.clear()
# tf.stop_gradient
# Make sure batch_size is a multiple of sequence length. During training, we
# will need to reshape the data into a batch_size x sequence_length tensor.
batch_size = (

:param brain_parameters: specifications for policy construction
:return policy
"""
policy = NNPolicy(
policy = TransferPolicy(
self.seed,
brain_parameters,
self.trainer_settings,

self.__class__.__name__
)
)
if not isinstance(policy, NNPolicy):
if not isinstance(policy, TransferPolicy):
raise RuntimeError("Non-NNPolicy passed to PPOTrainer.add_policy()")
self.policy = policy
self.optimizer = PPOTransferOptimizer(self.policy, self.trainer_settings)

26
config/ppo_transfer/3DBallHard.yaml


behaviors:
3DBallHard:
trainer_type: ppo_transfer
hyperparameters:
batch_size: 1200
buffer_size: 12000
learning_rate: 0.0003
beta: 0.001
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
network_settings:
normalize: true
hidden_units: 128
num_layers: 1
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.995
strength: 1.0
keep_checkpoints: 5
max_steps: 4000000
time_horizon: 1000
summary_freq: 12000
threaded: true

478
ml-agents/mlagents/trainers/policy/transfer_policy.py


import os
from typing import Any, Dict, Optional, List
from mlagents.tf_utils import tf
from mlagents_envs.timers import timed
from mlagents_envs.base_env import DecisionSteps
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.models import EncoderType
from mlagents.trainers.models import ModelUtils
from mlagents.trainers.policy.tf_policy import TFPolicy
from mlagents.trainers.settings import TrainerSettings
from mlagents.trainers.distributions import (
GaussianDistribution,
MultiCategoricalDistribution,
)
import tf_slim as slim
EPSILON = 1e-6 # Small value to avoid divide by zero
class TransferPolicy(TFPolicy):
def __init__(
self,
seed: int,
brain: BrainParameters,
trainer_params: TrainerSettings,
is_training: bool,
model_path: str,
load: bool,
tanh_squash: bool = False,
reparameterize: bool = False,
condition_sigma_on_obs: bool = True,
create_tf_graph: bool = True,
):
"""
Policy that uses a multilayer perceptron to map the observations to actions. Could
also use a CNN to encode visual input prior to the MLP. Supports discrete and
continuous action spaces, as well as recurrent networks.
:param seed: Random seed.
:param brain: Assigned BrainParameters object.
:param trainer_params: Defined training parameters.
:param is_training: Whether the model should be trained.
:param load: Whether a pre-trained model will be loaded or a new one created.
:param model_path: Path where the model should be saved and loaded.
:param tanh_squash: Whether to use a tanh function on the continuous output, or a clipped output.
:param reparameterize: Whether we are using the resampling trick to update the policy in continuous output.
"""
super().__init__(seed, brain, trainer_params, model_path, load)
self.grads = None
self.update_batch: Optional[tf.Operation] = None
num_layers = self.network_settings.num_layers
self.h_size = self.network_settings.hidden_units
if num_layers < 1:
num_layers = 1
self.num_layers = num_layers
self.vis_encode_type = self.network_settings.vis_encode_type
self.tanh_squash = tanh_squash
self.reparameterize = reparameterize
self.condition_sigma_on_obs = condition_sigma_on_obs
self.trainable_variables: List[tf.Variable] = []
# Model-based learning
self.feature_size = 16 # dimension of latent feature size
self.separate_train = False # whether to train policy and model separately
# Non-exposed parameters; these aren't exposed because they don't have a
# good explanation and usually shouldn't be touched.
self.log_std_min = -20
self.log_std_max = 2
if create_tf_graph:
self.create_tf_graph()
def get_trainable_variables(self) -> List[tf.Variable]:
"""
Returns a List of the trainable variables in this policy. if create_tf_graph hasn't been called,
returns empty list.
"""
return self.trainable_variables
def create_tf_graph(self, transfer=False) -> None:
"""
Builds the tensorflow graph needed for this policy.
"""
with self.graph.as_default():
tf.set_random_seed(self.seed)
_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
if len(_vars) > 0:
# We assume the first thing created in the graph is the Policy. If
# already populated, don't create more tensors.
return
self.create_input_placeholders()
# latent feature encoder
if transfer:
n_layers = self.num_layers + 1
else:
n_layers = self.num_layers
self.encoder = self._create_encoder(
self.visual_in,
self.processed_vector_in,
self.h_size,
self.feature_size,
n_layers,
self.vis_encode_type
)
self.targ_encoder = self._create_target_encoder(
self.h_size,
self.feature_size,
n_layers,
self.vis_encode_type
)
self.hard_copy_encoder()
self.predict = self._create_world_model(
self.encoder,
self.h_size,
self.feature_size,
self.num_layers,
self.vis_encode_type
)
if self.use_continuous_act:
self._create_cc_actor(
self.encoder,
self.h_size,
self.num_layers,
self.tanh_squash,
self.reparameterize,
self.condition_sigma_on_obs,
)
else:
self._create_dc_actor(self.encoder, self.h_size, self.num_layers)
self.trainable_variables = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="policy"
)
self.trainable_variables += tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="encoding"
)
self.trainable_variables += tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="predict"
)
self.trainable_variables += tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="lstm"
) # LSTMs need to be root scope for Barracuda export
self.inference_dict: Dict[str, tf.Tensor] = {
"action": self.output,
"log_probs": self.all_log_probs,
"entropy": self.entropy,
}
if self.use_continuous_act:
self.inference_dict["pre_action"] = self.output_pre
if self.use_recurrent:
self.inference_dict["memory_out"] = self.memory_out
# We do an initialize to make the Policy usable out of the box. If an optimizer is needed,
# it will re-load the full graph
self._initialize_graph()
# slim.model_analyzer.analyze_vars(self.trainable_variables, print_info=True)
def load_graph_partial(self, path: str, transfer_type="dynamics"):
load_nets = {"dynamics": ["policy", "predict", "value"], "observation": ["encoding"]}
for net in load_nets[transfer_type]:
variables_to_restore = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, net)
partial_saver = tf.train.Saver(variables_to_restore)
partial_model_checkpoint = os.path.join(path, f"{net}.ckpt")
partial_saver.restore(self.sess, partial_model_checkpoint)
print("loaded net", net, "from path", path)
# variables_to_restore = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "encoding/latent")
# partial_saver = tf.train.Saver(variables_to_restore)
# partial_model_checkpoint = os.path.join(path, f"latent.ckpt")
# partial_saver.restore(self.sess, partial_model_checkpoint)
# print("loaded net latent from path", path)
if transfer_type == "observation":
self.hard_copy_encoder()
def _create_world_model(
self,
encoder: tf.Tensor,
h_size: int,
feature_size: int,
num_layers: int,
vis_encode_type: EncoderType,
) -> tf.Tensor:
""""
Builds the world model for state prediction
"""
with self.graph.as_default():
with tf.variable_scope("predict"):
self.current_action = tf.placeholder(
shape=[None, sum(self.act_size)], dtype=tf.float32, name="current_action"
)
hidden_stream = ModelUtils.create_vector_observation_encoder(
tf.concat([encoder, self.current_action], axis=1),
h_size,
ModelUtils.swish,
num_layers,
scope=f"main_graph",
reuse=False
)
predict = tf.layers.dense(
hidden_stream,
feature_size,
name="next_state"
)
return predict
@timed
def evaluate(
self, decision_requests: DecisionSteps, global_agent_ids: List[str]
) -> Dict[str, Any]:
"""
Evaluates policy for the agent experiences provided.
:param decision_requests: DecisionSteps object containing inputs.
:param global_agent_ids: The global (with worker ID) agent ids of the data in the batched_step_result.
:return: Outputs from network as defined by self.inference_dict.
"""
feed_dict = {
self.batch_size_ph: len(decision_requests),
self.sequence_length_ph: 1,
}
if self.use_recurrent:
if not self.use_continuous_act:
feed_dict[self.prev_action] = self.retrieve_previous_action(
global_agent_ids
)
feed_dict[self.memory_in] = self.retrieve_memories(global_agent_ids)
feed_dict = self.fill_eval_dict(feed_dict, decision_requests)
run_out = self._execute_model(feed_dict, self.inference_dict)
return run_out
def _create_target_encoder(
self,
h_size: int,
feature_size: int,
num_layers: int,
vis_encode_type: EncoderType,
) -> tf.Tensor:
self.visual_next = ModelUtils.create_visual_input_placeholders(
self.brain.camera_resolutions
)
self.vector_next = ModelUtils.create_vector_input(self.vec_obs_size)
if self.normalize:
self.processed_vector_next = ModelUtils.normalize_vector_obs(
self.vector_next,
self.running_mean,
self.running_variance,
self.normalization_steps,
)
else:
self.processed_vector_next = self.vector_next
with tf.variable_scope("target_enc"):
hidden_stream_targ = ModelUtils.create_observation_streams(
self.visual_next,
self.processed_vector_next,
1,
h_size,
num_layers,
vis_encode_type,
)[0]
latent_targ = tf.layers.dense(
hidden_stream_targ,
feature_size,
name="latent"
)
return tf.stop_gradient(latent_targ)
def _create_encoder(
self,
visual_in: List[tf.Tensor],
vector_in: tf.Tensor,
h_size: int,
feature_size: int,
num_layers: int,
vis_encode_type: EncoderType,
) -> tf.Tensor:
"""
Creates an encoder for visual and vector observations.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
:param vis_encode_type: Type of visual encoder to use if visual input.
:return: The hidden layer (tf.Tensor) after the encoder.
"""
with tf.variable_scope("encoding"):
hidden_stream = ModelUtils.create_observation_streams(
self.visual_in,
self.processed_vector_in,
1,
h_size,
num_layers,
vis_encode_type,
)[0]
latent = tf.layers.dense(
hidden_stream,
feature_size,
name="latent"
)
return latent
def hard_copy_encoder(self):
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_enc')
e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='encoding')
with tf.variable_scope('hard_replacement'):
self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
def _create_cc_actor(
self,
encoded: tf.Tensor,
h_size: int,
num_layers: int,
tanh_squash: bool = False,
reparameterize: bool = False,
condition_sigma_on_obs: bool = True,
) -> None:
"""
Creates Continuous control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
:param vis_encode_type: Type of visual encoder to use if visual input.
:param tanh_squash: Whether to use a tanh function, or a clipped output.
:param reparameterize: Whether we are using the resampling trick to update the policy.
"""
if self.use_recurrent:
self.memory_in = tf.placeholder(
shape=[None, self.m_size], dtype=tf.float32, name="recurrent_in"
)
hidden_policy, memory_policy_out = ModelUtils.create_recurrent_encoder(
encoded, self.memory_in, self.sequence_length_ph, name="lstm_policy"
)
self.memory_out = tf.identity(memory_policy_out, name="recurrent_out")
else:
hidden_policy = encoded
if self.separate_train:
hidden_policy = tf.stop_gradient(hidden_policy)
with tf.variable_scope("policy"):
hidden_policy = ModelUtils.create_vector_observation_encoder(
hidden_policy,
h_size,
ModelUtils.swish,
num_layers,
scope=f"main_graph",
reuse=False,
)
distribution = GaussianDistribution(
hidden_policy,
self.act_size,
reparameterize=reparameterize,
tanh_squash=tanh_squash,
condition_sigma=condition_sigma_on_obs,
)
if tanh_squash:
self.output_pre = distribution.sample
self.output = tf.identity(self.output_pre, name="action")
else:
self.output_pre = distribution.sample
# Clip and scale output to ensure actions are always within [-1, 1] range.
output_post = tf.clip_by_value(self.output_pre, -3, 3) / 3
self.output = tf.identity(output_post, name="action")
self.selected_actions = tf.stop_gradient(self.output)
self.all_log_probs = tf.identity(distribution.log_probs, name="action_probs")
self.entropy = distribution.entropy
# We keep these tensors the same name, but use new nodes to keep code parallelism with discrete control.
self.total_log_probs = distribution.total_log_probs
def _create_dc_actor(self, encoded: tf.Tensor, h_size: int, num_layers: int) -> None:
"""
Creates Discrete control actor-critic model.
:param h_size: Size of hidden linear layers.
:param num_layers: Number of hidden linear layers.
:param vis_encode_type: Type of visual encoder to use if visual input.
"""
if self.use_recurrent:
self.prev_action = tf.placeholder(
shape=[None, len(self.act_size)], dtype=tf.int32, name="prev_action"
)
prev_action_oh = tf.concat(
[
tf.one_hot(self.prev_action[:, i], self.act_size[i])
for i in range(len(self.act_size))
],
axis=1,
)
hidden_policy = tf.concat([encoded, prev_action_oh], axis=1)
self.memory_in = tf.placeholder(
shape=[None, self.m_size], dtype=tf.float32, name="recurrent_in"
)
hidden_policy, memory_policy_out = ModelUtils.create_recurrent_encoder(
hidden_policy,
self.memory_in,
self.sequence_length_ph,
name="lstm_policy",
)
self.memory_out = tf.identity(memory_policy_out, "recurrent_out")
else:
hidden_policy = encoded
if self.separate_train:
hidden_policy = tf.stop_gradient(hidden_policy)
self.action_masks = tf.placeholder(
shape=[None, sum(self.act_size)], dtype=tf.float32, name="action_masks"
)
with tf.variable_scope("policy"):
hidden_policy = ModelUtils.create_vector_observation_encoder(
hidden_policy,
h_size,
ModelUtils.swish,
num_layers,
scope=f"main_graph",
reuse=False,
)
distribution = MultiCategoricalDistribution(
hidden_policy, self.act_size, self.action_masks
)
# It's important that we are able to feed_dict a value into this tensor to get the
# right one-hot encoding, so we can't do identity on it.
self.output = distribution.sample
self.all_log_probs = tf.identity(distribution.log_probs, name="action")
self.selected_actions = tf.stop_gradient(
distribution.sample_onehot
) # In discrete, these are onehot
self.entropy = distribution.entropy
self.total_log_probs = distribution.total_log_probs
def save_model(self, steps):
"""
Saves the model
:param steps: The number of steps the model was trained for
:return:
"""
with self.graph.as_default():
last_checkpoint = os.path.join(self.model_path, f"model-{steps}.ckpt")
self.saver.save(self.sess, last_checkpoint)
tf.train.write_graph(
self.graph, self.model_path, "raw_graph_def.pb", as_text=False
)
# save each net separately
policy_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "policy")
policy_saver = tf.train.Saver(policy_vars)
policy_checkpoint = os.path.join(self.model_path, f"policy.ckpt")
policy_saver.save(self.sess, policy_checkpoint)
encoding_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "encoding")
encoding_saver = tf.train.Saver(encoding_vars)
encoding_checkpoint = os.path.join(self.model_path, f"encoding.ckpt")
encoding_saver.save(self.sess, encoding_checkpoint)
latent_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "encoding/latent")
latent_saver = tf.train.Saver(latent_vars)
latent_checkpoint = os.path.join(self.model_path, f"latent.ckpt")
latent_saver.save(self.sess, latent_checkpoint)
predict_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "predict")
predict_saver = tf.train.Saver(predict_vars)
predict_checkpoint = os.path.join(self.model_path, f"predict.ckpt")
predict_saver.save(self.sess, predict_checkpoint)
value_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "value")
value_saver = tf.train.Saver(value_vars)
value_checkpoint = os.path.join(self.model_path, f"value.ckpt")
value_saver.save(self.sess, value_checkpoint)
正在加载...
取消
保存