浏览代码

Proper critic memories for PPO

/develop/critic-op-lstm-currentmem
Ervin Teng 4 年前
当前提交
ae7643b8
共有 5 个文件被更改,包括 75 次插入16 次删除
  1. 1
      ml-agents/mlagents/trainers/buffer.py
  2. 71
      ml-agents/mlagents/trainers/optimizer/torch_optimizer.py
  3. 14
      ml-agents/mlagents/trainers/ppo/optimizer_torch.py
  4. 3
      ml-agents/mlagents/trainers/ppo/trainer.py
  5. 2
      ml-agents/mlagents/trainers/sac/trainer.py

1
ml-agents/mlagents/trainers/buffer.py


ENVIRONMENT_REWARDS = "environment_rewards"
MASKS = "masks"
MEMORY = "memory"
CRITIC_MEMORY = "critic_memory"
PREV_ACTION = "prev_action"
ADVANTAGES = "advantages"

71
ml-agents/mlagents/trainers/optimizer/torch_optimizer.py


from mlagents.torch_utils import torch
import numpy as np
from mlagents.trainers.buffer import AgentBuffer, BufferKey
from mlagents.trainers.buffer import AgentBuffer, AgentBufferField
from mlagents.trainers.trajectory import ObsUtil
from mlagents.trainers.torch.components.bc.module import BCModule
from mlagents.trainers.torch.components.reward_providers import create_reward_provider

self.global_step = torch.tensor(0)
self.bc_module: Optional[BCModule] = None
self.create_reward_signals(trainer_settings.reward_signals)
self.critic_memory_dict: Dict[str, torch.Tensor] = {}
if trainer_settings.behavioral_cloning is not None:
self.bc_module = BCModule(
self.policy,

)
def get_trajectory_value_estimates(
self, batch: AgentBuffer, next_obs: List[np.ndarray], done: bool
) -> Tuple[Dict[str, np.ndarray], Dict[str, float]]:
self,
batch: AgentBuffer,
next_obs: List[np.ndarray],
done: bool,
agent_id: str = "",
) -> Tuple[Dict[str, np.ndarray], Dict[str, float], Optional[AgentBufferField]]:
current_obs = ObsUtil.from_buffer(batch, n_obs)
if agent_id in self.critic_memory_dict:
memory = self.critic_memory_dict[agent_id]
else:
memory = (
torch.zeros((1, 1, self.critic.memory_size))
if self.policy.use_recurrent
else None
)
# If we're using LSTM, we want to get all the intermediate memories.
all_next_memories: Optional[AgentBufferField] = None
if self.policy.use_recurrent:
resequenced_buffer = AgentBuffer()
all_next_memories = AgentBufferField()
# The 1st sequence are the ones that are padded. So if seq_len = 3 and
# trajectory is of length 10, the ist sequence is [pad,pad,obs].
# Compute the number of elements in this padded seq.
leftover = batch.num_experiences % self.policy.sequence_length
first_seq_len = self.policy.sequence_length if leftover == 0 else leftover
for _ in range(first_seq_len):
all_next_memories.append(memory.squeeze().detach().numpy())
batch.resequence_and_append(
resequenced_buffer, training_length=self.policy.sequence_length
)
reseq_obs = ObsUtil.from_buffer(resequenced_buffer, n_obs)
reseq_obs = [ModelUtils.list_to_tensor(obs) for obs in reseq_obs]
# By now, the buffer should be of length seq_len * num_seq, padded
_mem = memory
for seq_num in range(
resequenced_buffer.num_experiences // self.policy.sequence_length - 1
):
seq_obs = []
for _obs in reseq_obs:
start = seq_num * self.policy.sequence_length
end = (seq_num + 1) * self.policy.sequence_length
seq_obs.append(_obs[start:end])
_, next_seq_mem = self.critic.critic_pass(
seq_obs, _mem, sequence_length=self.policy.sequence_length
)
for _ in range(self.policy.sequence_length):
all_next_memories.append(next_seq_mem.squeeze().detach().numpy())
current_obs = ObsUtil.from_buffer(batch, n_obs)
memory = (
ModelUtils.list_to_tensor(batch[BufferKey.MEMORY][0])
.unsqueeze(0)
.unsqueeze(0)
if self.policy.use_recurrent
else None
)
# Store the memory for the next trajectory
self.critic_memory_dict[agent_id] = next_memory
next_value_estimate, _ = self.critic.critic_pass(
next_obs, next_memory, sequence_length=1

for k in next_value_estimate:
if not self.reward_signals[k].ignore_done:
next_value_estimate[k] = 0.0
if agent_id in self.critic_memory_dict:
self.critic_memory_dict.pop(agent_id)
return value_estimates, next_value_estimate
return value_estimates, next_value_estimate, all_next_memories

14
ml-agents/mlagents/trainers/ppo/optimizer_torch.py


if len(memories) > 0:
memories = torch.stack(memories).unsqueeze(0)
# Get value memories
value_memories = [
ModelUtils.list_to_tensor(batch[BufferKey.CRITIC_MEMORY][i])
for i in range(
0, len(batch[BufferKey.CRITIC_MEMORY]), self.policy.sequence_length
)
]
if len(value_memories) > 0:
value_memories = torch.stack(value_memories).unsqueeze(0)
log_probs, entropy = self.policy.evaluate_actions(
current_obs,
masks=act_masks,

)
values, _ = self.critic.critic_pass(
current_obs, memories=memories, sequence_length=self.policy.sequence_length
current_obs,
memories=value_memories,
sequence_length=self.policy.sequence_length,
)
old_log_probs = ActionLogProbs.from_buffer(batch).flatten()
log_probs = log_probs.flatten()

3
ml-agents/mlagents/trainers/ppo/trainer.py


self.policy.update_normalization(agent_buffer_trajectory)
# Get all value estimates
value_estimates, value_next = self.optimizer.get_trajectory_value_estimates(
value_estimates, value_next, value_memories = self.optimizer.get_trajectory_value_estimates(
agent_buffer_trajectory[BufferKey.CRITIC_MEMORY].set(value_memories)
for name, v in value_estimates.items():
agent_buffer_trajectory[RewardSignalUtil.value_estimates_key(name)].extend(

2
ml-agents/mlagents/trainers/sac/trainer.py


self.collected_rewards[name][agent_id] += np.sum(evaluate_result)
# Get all value estimates for reporting purposes
value_estimates, _ = self.optimizer.get_trajectory_value_estimates(
value_estimates, _, _ = self.optimizer.get_trajectory_value_estimates(
agent_buffer_trajectory, trajectory.next_obs, trajectory.done_reached
)
for name, v in value_estimates.items():

正在加载...
取消
保存