比较提交

...
此合并请求有变更与目标分支冲突。
/ml-agents/mlagents/trainers/agent_processor.py
/ml-agents/mlagents/trainers/tests/mock_brain.py
/ml-agents/mlagents/trainers/tests/test_agent_processor.py

2 次代码提交

作者 SHA1 备注 提交日期
Ervin Teng 7b0f700b Add test for deletion calls 5 年前
Ervin Teng 7bbd91ad Change logic to fix memory leak 5 年前
共有 3 个文件被更改,包括 82 次插入10 次删除
  1. 25
      ml-agents/mlagents/trainers/agent_processor.py
  2. 3
      ml-agents/mlagents/trainers/tests/mock_brain.py
  3. 64
      ml-agents/mlagents/trainers/tests/test_agent_processor.py

25
ml-agents/mlagents/trainers/agent_processor.py


) # Needed for mypy to pass since ndarray has no content type
curr_agent_step = batched_step_result.get_agent_step_result(local_id)
global_id = get_global_agent_id(worker_id, local_id)
stored_step = self.last_step_result.get(global_id, None)
stored_agent_step, idx = self.last_step_result.get(global_id, (None, None))
if stored_step is not None and stored_take_action_outputs is not None:
if stored_agent_step is not None and stored_take_action_outputs is not None:
stored_agent_step = stored_step.get_agent_step_result(local_id)
idx = stored_step.agent_id_to_index[local_id]
obs = stored_agent_step.obs
if not stored_agent_step.done:
if self.policy.use_recurrent:

elif not curr_agent_step.done:
self.episode_steps[global_id] += 1
self.last_step_result[global_id] = batched_step_result
for _gid in action_global_agent_ids:
if _gid in self.last_step_result:
if not self.last_step_result[_gid][0].done:
if "action" in take_action_outputs:
self.policy.save_previous_action(
[global_id], take_action_outputs["action"]
)
else:
# If it was done, delete it
del self.last_step_result[_gid]
if "action" in take_action_outputs:
self.policy.save_previous_action(
previous_action.agent_ids, take_action_outputs["action"]
# Index is needed to grab from last_take_action_outputs
self.last_step_result[global_id] = (
curr_agent_step,
batched_step_result.agent_id_to_index[_id],
)
for terminated_id in terminated_agents:

del self.last_take_action_outputs[global_id]
del self.episode_steps[global_id]
del self.episode_rewards[global_id]
del self.last_step_result[global_id]
self.policy.remove_previous_action([global_id])
self.policy.remove_memories([global_id])

3
ml-agents/mlagents/trainers/tests/mock_brain.py


num_vis_observations: int = 0,
action_shape: List[int] = None,
discrete: bool = False,
done: bool = False,
) -> BatchedStepResult:
"""
Creates a mock BatchedStepResult with observations. Imitates constant

]
reward = np.array(num_agents * [1.0], dtype=np.float32)
done = np.array(num_agents * [False], dtype=np.bool)
done = np.array(num_agents * [done], dtype=np.bool)
max_step = np.array(num_agents * [False], dtype=np.bool)
agent_id = np.arange(num_agents, dtype=np.int32)

64
ml-agents/mlagents/trainers/tests/test_agent_processor.py


from mlagents.trainers.action_info import ActionInfo
from mlagents.trainers.trajectory import Trajectory
from mlagents.trainers.stats import StatsReporter
from mlagents.trainers.brain_conversion_utils import get_global_agent_id
def create_mock_brain():

processor.add_experiences(mock_step, 0, ActionInfo([], [], {}, []))
# Assert that the AgentProcessor is still empty
assert len(processor.experience_buffers[0]) == 0
def test_agent_deletion():
policy = create_mock_policy()
tqueue = mock.Mock()
name_behavior_id = "test_brain_name"
processor = AgentProcessor(
policy,
name_behavior_id,
max_trajectory_length=5,
stats_reporter=StatsReporter("testcat"),
)
fake_action_outputs = {
"action": [0.1],
"entropy": np.array([1.0], dtype=np.float32),
"learning_rate": 1.0,
"pre_action": [0.1],
"log_probs": [0.1],
}
mock_step = mb.create_mock_batchedstep(
num_agents=1,
num_vector_observations=8,
action_shape=[2],
num_vis_observations=0,
)
mock_done_step = mb.create_mock_batchedstep(
num_agents=1,
num_vector_observations=8,
action_shape=[2],
num_vis_observations=0,
done=True,
)
fake_action_info = ActionInfo(
action=[0.1],
value=[0.1],
outputs=fake_action_outputs,
agent_ids=mock_step.agent_id,
)
processor.publish_trajectory_queue(tqueue)
# This is like the initial state after the env reset
processor.add_experiences(mock_step, 0, ActionInfo.empty())
# Run 3 trajectories, with different workers (to simulate different agents)
add_calls = []
add_calls.append(mock.call([get_global_agent_id(0, 0)], [0.1]))
remove_calls = []
for _ep in range(3):
for _ in range(5):
processor.add_experiences(mock_step, _ep, fake_action_info)
add_calls.append(mock.call([get_global_agent_id(_ep, 0)], [0.1]))
processor.add_experiences(mock_done_step, _ep, fake_action_info)
# Make sure we don't add experiences from the prior agents after the done
remove_calls.append(mock.call([get_global_agent_id(_ep, 0)]))
policy.save_previous_action.assert_has_calls(add_calls)
policy.remove_previous_action.assert_has_calls(remove_calls)
# Check that there are no experiences left
assert len(processor.experience_buffers.keys()) == 0
assert len(processor.last_take_action_outputs.keys()) == 0
assert len(processor.episode_steps.keys()) == 0
assert len(processor.episode_rewards.keys()) == 0
def test_agent_manager():

正在加载...
取消
保存