浏览代码

Merge branch 'soccer-2v1' into asymm-envs

/asymm-envs
Andrew Cohen 4 年前
当前提交
185d4b35
共有 51 个文件被更改,包括 1715 次插入1484 次删除
  1. 2
      .yamato/gym-interface-test.yml
  2. 2
      .yamato/python-ll-api-test.yml
  3. 1
      .yamato/standalone-build-test.yml
  4. 3
      Project/Assets/ML-Agents/Examples/SharedAssets/Scripts/SensorBase.cs
  5. 1001
      Project/Assets/ML-Agents/Examples/Soccer/TFModels/SoccerTwos.nn
  6. 1
      Project/Assets/ML-Agents/Examples/Tennis/Scripts/TennisAgent.cs
  7. 2
      com.unity.ml-agents/CHANGELOG.md
  8. 11
      com.unity.ml-agents/Runtime/Agent.cs
  9. 5
      com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
  10. 3
      com.unity.ml-agents/Runtime/Sensors/CameraSensor.cs
  11. 6
      com.unity.ml-agents/Runtime/Sensors/ISensor.cs
  12. 3
      com.unity.ml-agents/Runtime/Sensors/RayPerceptionSensor.cs
  13. 3
      com.unity.ml-agents/Runtime/Sensors/RenderTextureSensor.cs
  14. 13
      com.unity.ml-agents/Runtime/Sensors/StackingSensor.cs
  15. 6
      com.unity.ml-agents/Runtime/Sensors/VectorSensor.cs
  16. 9
      com.unity.ml-agents/Tests/Editor/MLAgentsEditModeTest.cs
  17. 1
      com.unity.ml-agents/Tests/Editor/ParameterLoaderTest.cs
  18. 1
      com.unity.ml-agents/Tests/Editor/Sensor/FloatVisualSensorTests.cs
  19. 1
      com.unity.ml-agents/Tests/Editor/Sensor/SensorShapeValidatorTests.cs
  20. 18
      com.unity.ml-agents/Tests/Editor/Sensor/StackingSensorTests.cs
  21. 2
      docs/Migrating.md
  22. 141
      docs/Python-API.md
  23. 15
      gym-unity/README.md
  24. 342
      gym-unity/gym_unity/envs/__init__.py
  25. 198
      gym-unity/gym_unity/tests/test_gym.py
  26. 249
      ml-agents-envs/mlagents_envs/base_env.py
  27. 102
      ml-agents-envs/mlagents_envs/environment.py
  28. 99
      ml-agents-envs/mlagents_envs/rpc_utils.py
  29. 43
      ml-agents-envs/mlagents_envs/tests/test_envs.py
  30. 170
      ml-agents-envs/mlagents_envs/tests/test_rpc_utils.py
  31. 179
      ml-agents/mlagents/trainers/agent_processor.py
  32. 16
      ml-agents/mlagents/trainers/brain_conversion_utils.py
  33. 59
      ml-agents/mlagents/trainers/demo_loader.py
  34. 36
      ml-agents/mlagents/trainers/env_manager.py
  35. 10
      ml-agents/mlagents/trainers/policy/nn_policy.py
  36. 4
      ml-agents/mlagents/trainers/policy/policy.py
  37. 25
      ml-agents/mlagents/trainers/policy/tf_policy.py
  38. 26
      ml-agents/mlagents/trainers/simple_env_manager.py
  39. 24
      ml-agents/mlagents/trainers/subprocess_env_manager.py
  40. 42
      ml-agents/mlagents/trainers/tests/mock_brain.py
  41. 137
      ml-agents/mlagents/trainers/tests/simple_test_envs.py
  42. 54
      ml-agents/mlagents/trainers/tests/test_agent_processor.py
  43. 8
      ml-agents/mlagents/trainers/tests/test_demo_loader.py
  44. 12
      ml-agents/mlagents/trainers/tests/test_nn_policy.py
  45. 24
      ml-agents/mlagents/trainers/tests/test_policy.py
  46. 4
      ml-agents/mlagents/trainers/tests/test_simple_rl.py
  47. 5
      ml-agents/mlagents/trainers/tests/test_subprocess_env_manager.py
  48. 27
      ml-agents/tests/yamato/scripts/run_gym.py
  49. 29
      ml-agents/tests/yamato/scripts/run_llapi.py
  50. 18
      ml-agents/tests/yamato/standalone_build_tests.py
  51. 7
      ml-agents/tests/yamato/yamato_utils.py

2
.yamato/gym-interface-test.yml


commands:
- pip install pyyaml
- python -u -m ml-agents.tests.yamato.setup_venv
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_gym.py
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_gym.py --env=Project/testPlayer-Basic
dependencies:
- .yamato/standalone-build-test.yml#test_mac_standalone_{{ editor.version }}
triggers:

2
.yamato/python-ll-api-test.yml


- python -u -m ml-agents.tests.yamato.setup_venv
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_llapi.py
dependencies:
- .yamato/standalone-build-test.yml#test_mac_standalone_{{ editor.version }}
- .yamato/standalone-build-test.yml#test_mac_standalone_{{ editor.version }} --env=Project/testPlayer
triggers:
cancel_old_ci: true
changes:

1
.yamato/standalone-build-test.yml


commands:
- pip install pyyaml
- python -u -m ml-agents.tests.yamato.standalone_build_tests
- python -u -m ml-agents.tests.yamato.standalone_build_tests --scene=Assets/ML-Agents/Examples/Basic/Scenes/Basic.unity
triggers:
cancel_old_ci: true
changes:

3
Project/Assets/ML-Agents/Examples/SharedAssets/Scripts/SensorBase.cs


public void Update() {}
/// <inheritdoc/>
public void Reset() { }
/// <inheritdoc/>
public virtual byte[] GetCompressedObservation()
{
return null;

1001
Project/Assets/ML-Agents/Examples/Soccer/TFModels/SoccerTwos.nn
文件差异内容过多而无法显示
查看文件

1
Project/Assets/ML-Agents/Examples/Tennis/Scripts/TennisAgent.cs


transform.position.y,
transform.position.z);
}
// AddReward(-1f / 3000f);
timePenalty += -1f / 3000f;
m_TextComponent.text = score.ToString();

2
com.unity.ml-agents/CHANGELOG.md


- The Jupyter notebooks have been removed from the repository.
- Introduced the `SideChannelUtils` to register, unregister and access side channels.
- `Academy.FloatProperties` was removed, please use `SideChannelUtils.GetSideChannel<FloatPropertiesChannel>()` instead.
- Removed the multi-agent gym option from the gym wrapper. For multi-agent scenarios, use the [Low Level Python API](Python-API.md).
- The low level Python API has changed. You can look at the document [Low Level Python API documentation](Python-API.md) for more information. If you use `mlagents-learn` for training, this should be a transparent change.
- Added ability to start training (initialize model weights) from a previous run ID. (#3710)
### Minor Changes

11
com.unity.ml-agents/Runtime/Agent.cs


// Request the last decision with no callbacks
// We request a decision so Python knows the Agent is done immediately
m_Brain?.RequestDecision(m_Info, sensors);
ResetSensors();
// We also have to write any to any DemonstationStores so that they get the "done" flag.
foreach (var demoWriter in DemonstrationWriters)

UpdateRewardStats();
}
// The Agent is done, so we give it a new episode Id
m_EpisodeId = EpisodeIdCounter.GetEpisodeId();
m_Reward = 0f;
m_CumulativeReward = 0f;
m_RequestAction = false;

foreach (var sensor in sensors)
{
sensor.Update();
}
}
void ResetSensors()
{
foreach (var sensor in sensors)
{
sensor.Reset();
}
}

5
com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs


{
m_OrderedAgentsRequestingDecisions[behaviorName] = new List<int>();
}
m_OrderedAgentsRequestingDecisions[behaviorName].Add(info.episodeId);
if (!info.done)
{
m_OrderedAgentsRequestingDecisions[behaviorName].Add(info.episodeId);
}
if (!m_LastActionsReceived.ContainsKey(behaviorName))
{
m_LastActionsReceived[behaviorName] = new Dictionary<int, float[]>();

3
com.unity.ml-agents/Runtime/Sensors/CameraSensor.cs


public void Update() {}
/// <inheritdoc/>
public void Reset() { }
/// <inheritdoc/>
public SensorCompressionType GetCompressionType()
{
return m_CompressionType;

6
com.unity.ml-agents/Runtime/Sensors/ISensor.cs


void Update();
/// <summary>
/// Resets the internal states of the sensor. This is called at the end of an Agent's episode.
/// Most implementations can leave this empty.
/// </summary>
void Reset();
/// <summary>
/// Return the compression type being used. If no compression is used, return
/// <see cref="SensorCompressionType.None"/>.
/// </summary>

3
com.unity.ml-agents/Runtime/Sensors/RayPerceptionSensor.cs


}
/// <inheritdoc/>
public void Reset() { }
/// <inheritdoc/>
public int[] GetObservationShape()
{
return m_Shape;

3
com.unity.ml-agents/Runtime/Sensors/RenderTextureSensor.cs


public void Update() {}
/// <inheritdoc/>
public void Reset() { }
/// <inheritdoc/>
public SensorCompressionType GetCompressionType()
{
return m_CompressionType;

13
com.unity.ml-agents/Runtime/Sensors/StackingSensor.cs


using System;
namespace MLAgents.Sensors
{
/// <summary>

{
m_WrappedSensor.Update();
m_CurrentIndex = (m_CurrentIndex + 1) % m_NumStackedObservations;
}
/// <inheritdoc/>
public void Reset()
{
m_WrappedSensor.Reset();
// Zero out the buffer.
for (var i = 0; i < m_NumStackedObservations; i++)
{
Array.Clear(m_StackedObservations[i], 0, m_StackedObservations[i].Length);
}
}
/// <inheritdoc/>

6
com.unity.ml-agents/Runtime/Sensors/VectorSensor.cs


}
/// <inheritdoc/>
public void Reset()
{
Clear();
}
/// <inheritdoc/>
public int[] GetObservationShape()
{
return m_Shape;

9
com.unity.ml-agents/Tests/Editor/MLAgentsEditModeTest.cs


public string sensorName;
public int numWriteCalls;
public int numCompressedCalls;
public int numResetCalls;
public SensorCompressionType compressionType = SensorCompressionType.None;
public TestSensor(string n)

}
public void Update() {}
public void Reset()
{
numResetCalls++;
}
}
[TestFixture]

var expectedAgentActionForEpisode = 0;
var expectedCollectObsCalls = 0;
var expectedCollectObsCallsForEpisode = 0;
var expectedSensorResetCalls = 0;
for (var i = 0; i < 15; i++)
{

expectedAgentActionForEpisode = 0;
expectedCollectObsCallsForEpisode = 0;
expectedAgentStepCount = 0;
expectedSensorResetCalls++;
}
aca.EnvironmentStep();

Assert.AreEqual(expectedAgentActionForEpisode, agent1.agentActionCallsForEpisode);
Assert.AreEqual(expectedCollectObsCalls, agent1.collectObservationsCalls);
Assert.AreEqual(expectedCollectObsCallsForEpisode, agent1.collectObservationsCallsForEpisode);
Assert.AreEqual(expectedSensorResetCalls, agent1.sensor1.numResetCalls);
}
}

1
com.unity.ml-agents/Tests/Editor/ParameterLoaderTest.cs


}
public void Update() {}
public void Reset() { }
public SensorCompressionType GetCompressionType()
{

1
com.unity.ml-agents/Tests/Editor/Sensor/FloatVisualSensorTests.cs


}
public void Update() {}
public void Reset() { }
public SensorCompressionType GetCompressionType()
{

1
com.unity.ml-agents/Tests/Editor/Sensor/SensorShapeValidatorTests.cs


}
public void Update() { }
public void Reset() { }
public SensorCompressionType GetCompressionType()
{

18
com.unity.ml-agents/Tests/Editor/Sensor/StackingSensorTests.cs


// Check that if we don't call Update(), the same observations are produced
SensorTestHelper.CompareObservation(sensor, new[] {5f, 6f, 7f, 8f, 9f, 10f});
}
[Test]
public void TestStackingReset()
{
VectorSensor wrapped = new VectorSensor(2);
ISensor sensor = new StackingSensor(wrapped, 3);
wrapped.AddObservation(new[] {1f, 2f});
SensorTestHelper.CompareObservation(sensor, new[] {0f, 0f, 0f, 0f, 1f, 2f});
sensor.Update();
wrapped.AddObservation(new[] {3f, 4f});
SensorTestHelper.CompareObservation(sensor, new[] {0f, 0f, 1f, 2f, 3f, 4f});
sensor.Reset();
wrapped.AddObservation(new[] {5f, 6f});
SensorTestHelper.CompareObservation(sensor, new[] {0f, 0f, 0f, 0f, 5f, 6f});
}
}
}

2
docs/Migrating.md


* The `--load` and `--train` command-line flags have been deprecated and replaced with `--resume` and `--inference`.
* Running with the same `--run-id` twice will now throw an error.
* The `play_against_current_self_ratio` self-play trainer hyperparameter has been renamed to `play_against_latest_model_ratio`
* Removed the multi-agent gym option from the gym wrapper. For multi-agent scenarios, use the [Low Level Python API](Python-API.md).
* The low level Python API has changed. You can look at the document [Low Level Python API documentation](Python-API.md) for more information. If you use `mlagents-learn` for training, this should be a transparent change.
### Steps to Migrate
* Replace the `--load` flag with `--resume` when calling `mlagents-learn`, and don't use the `--train` flag as training

141
docs/Python-API.md


- **UnityEnvironment** — the main interface between the Unity application and
your code. Use UnityEnvironment to start and control a simulation or training
session.
- **BatchedStepResult** — contains the data from Agents belonging to the same
"AgentGroup" in the simulation, such as observations and rewards.
- **AgentGroupSpec** — describes the shape of the data inside a BatchedStepResult.
For example, provides the dimensions of the observations of a group.
- **BehaviorName** - is a string that identifies a behavior in the simulation.
- **AgentId** - is an `int` that serves as unique identifier for Agents in the
simulation.
- **DecisionSteps** — contains the data from Agents belonging to the same
"Behavior" in the simulation, such as observations and rewards. Only Agents
that requested a decision since the last call to `env.step()` are in the
DecisionSteps object.
- **TerminalSteps** — contains the data from Agents belonging to the same
"Behavior" in the simulation, such as observations and rewards. Only Agents
whose episode ended since the last call to `env.step()` are in the
TerminalSteps object.
- **BehaviorSpec** — describes the shape of the observation data inside
DecisionSteps and TerminalSteps as well as the expected action shapes.
An Agent Group is a group of Agents identified by a string name that share the same
observations and action types. You can think about Agent Group as a group of agents
that will share the same policy or behavior. All Agents in a group have the same goal
and reward signals.
An Agent "Behavior" is a group of Agents identified by a `BehaviorName` that share the same
observations and action types (described in their `BehaviorSpec`). You can think about Agent
Behavior as a group of agents that will share the same policy. All Agents with the same
behavior have the same goal and reward signals.
__Note__: The `Behavior Name` corresponds to the Agent Group name on Python.
_Notice: Currently communication between Unity and Python takes place over an
open socket without authentication. As such, please make sure that the network

move forward until an Agent in the simulation needs a input from Python to act.
- **Close : `env.close()`** Sends a shutdown signal to the environment and terminates
the communication.
- **Get Agent Group Names : `env.get_agent_groups()`** Returns a list of agent group ids.
- **Get Behavior Names : `env.get_behavior_names()`** Returns a list of `BehaviorName`.
agent groups are created in the simulation.
- **Get Agent Group Spec : `env.get_agent_group_spec(agent_group: str)`** Returns
the `AgentGroupSpec` corresponding to the agent_group given as input. An
`AgentGroupSpec` contains information such as the observation shapes, the action
type (multi-discrete or continuous) and the action shape. Note that the `AgentGroupSpec`
Agent behaviors are created in the simulation.
- **Get Behavior Spec : `env.get_behavior_spec(behavior_name: str)`** Returns
the `BehaviorSpec` corresponding to the behavior_name given as input. A
`BehaviorSpec` contains information such as the observation shapes, the action
type (multi-discrete or continuous) and the action shape. Note that the `BehaviorSpec`
- **Get Batched Step Result for Agent Group : `env.get_step_result(agent_group: str)`**
Returns a `BatchedStepResult` corresponding to the agent_group given as input.
A `BatchedStepResult` contains information about the state of the agents in a group
such as the observations, the rewards, the done flags and the agent identifiers. The
data is in `np.array` of which the first dimension is always the number of agents which
requested a decision in the simulation since the last call to `env.step()` note that the
number of agents is not guaranteed to remain constant during the simulation.
- **Set Actions for Agent Group :`env.set_actions(agent_group: str, action: np.array)`**
- **Get Steps : `env.get_steps(behavior_name: str)`**
Returns a tuple `DecisionSteps, TerminalSteps` corresponding to the behavior_name
given as input.
The `DecisionSteps` contains information about the state of the agents
**that need an action this step** and have the behavior behavior_name.
The `TerminalSteps` contains information about the state of the agents
**whose episode ended** and have the behavior behavior_name.
Both `DecisionSteps` and `TerminalSteps` contain information such as
the observations, the rewards and the agent identifiers.
`DecisionSteps` also contains action masks for the next action while `TerminalSteps`
contains the reason for termination (did the Agent reach its maximum step and was
interrupted). The data is in `np.array` of which the first dimension is always the
number of agents note that the number of agents is not guaranteed to remain constant
during the simulation and it is not unusual to have either `DecisionSteps` or `TerminalSteps`
contain no Agents at all.
- **Set Actions :`env.set_actions(behavior_name: str, action: np.array)`**
Sets the actions for a whole agent group. `action` is a 2D `np.array` of `dtype=np.int32`
in the discrete action case and `dtype=np.float32` in the continuous action case.
The first dimension of `action` is the number of agents that requested a decision

__Note:__ If no action is provided for an agent group between two calls to `env.step()` then
the default action will be all zeros (in either discrete or continuous action space)
#### BathedStepResult and StepResult
#### DecisionSteps and DecisionStep
`DecisionSteps` (with `s`) contains information about a whole batch of Agents while
`DecisionStep` (no `s`) only contains information about a single Agent.
A `BatchedStepResult` has the following fields :
A `DecisionSteps` has the following fields :
- `obs` is a list of numpy arrays observations collected by the group of
agent. The first dimension of the array corresponds to the batch size of

rewards collected by each agent since the last simulation step.
- `done` is an array of booleans of length batch size. Is true if the
associated Agent was terminated during the last simulation step.
- `max_step` is an array of booleans of length batch size. Is true if the
associated Agent reached its maximum number of steps during the last
simulation step.
- `agent_id` is an int vector of length batch size containing unique
identifier for the corresponding Agent. This is used to track Agents
across simulation steps.

It also has the two following methods:
- `n_agents()` Returns the number of agents requesting a decision since
the last call to `env.step()`
- `get_agent_step_result(agent_id: int)` Returns a `StepResult`
- `len(DecisionSteps)` Returns the number of agents requesting a decision since
the last call to `env.step()`.
- `DecisionSteps[agent_id]` Returns a `DecisionStep`
A `StepResult` has the following fields:
A `DecisionStep` has the following fields:
- `obs` is a list of numpy arrays observations collected by the group of
agent. (Each array has one less dimension than the arrays in `BatchedStepResult`)
- `obs` is a list of numpy arrays observations collected by the agent.
(Each array has one less dimension than the arrays in `DecisionSteps`)
- `max_step` is a bool. Is true if the Agent reached its maximum number of
steps during the last simulation step.
- `agent_id` is an int and an unique identifier for the corresponding Agent.
- `action_mask` is an optional list of one dimensional array of booleans.
Only available in multi-discrete action space type.

#### AgentGroupSpec
#### TerminalSteps and TerminalStep
Similarly to `DecisionSteps` and `DecisionStep`,
`TerminalSteps` (with `s`) contains information about a whole batch of Agents while
`TerminalStep` (no `s`) only contains information about a single Agent.
An Agent group can either have discrete or continuous actions. To check which type
A `TerminalSteps` has the following fields :
- `obs` is a list of numpy arrays observations collected by the group of
agent. The first dimension of the array corresponds to the batch size of
the group (number of agents requesting a decision since the last call to
`env.step()`).
- `reward` is a float vector of length batch size. Corresponds to the
rewards collected by each agent since the last simulation step.
- `done` is an array of booleans of length batch size. Is true if the
associated Agent was terminated during the last simulation step.
- `agent_id` is an int vector of length batch size containing unique
identifier for the corresponding Agent. This is used to track Agents
across simulation steps.
- `max_step` is an array of booleans of length batch size. Is true if the
associated Agent reached its maximum number of steps during the last
simulation step.
It also has the two following methods:
- `len(TerminalSteps)` Returns the number of agents requesting a decision since
the last call to `env.step()`.
- `TerminalSteps[agent_id]` Returns a `TerminalStep`
for the Agent with the `agent_id` unique identifier.
A `TerminalStep` has the following fields:
- `obs` is a list of numpy arrays observations collected by the agent.
(Each array has one less dimension than the arrays in `TerminalSteps`)
- `reward` is a float. Corresponds to the rewards collected by the agent
since the last simulation step.
- `done` is a bool. Is true if the Agent was terminated during the last
simulation step.
- `agent_id` is an int and an unique identifier for the corresponding Agent.
- `max_step` is a bool. Is true if the Agent reached its maximum number of
steps during the last simulation step.
#### BehaviorSpec
An Agent behavior can either have discrete or continuous actions. To check which type
An `AgentGroupSpec` has the following fields :
A `BehaviorSpec` has the following fields :
BatchedStepResult and StepResult.
DecisionSteps, DecisionStep, TerminalSteps and TerminalStep.
- `action_type` is the type of data of the action. it can be discrete or
continuous. If discrete, the action tensors are expected to be `np.int32`. If
continuous, the actions are expected to be `np.float32`.

### Communicating additional information with the Environment
In addition to the means of communicating between Unity and python described above,
we also provide methods for sharing agent-agnostic information. These
additional methods are referred to as side channels. ML-Agents includes two ready-made

15
gym-unity/README.md


information on the gym interface, see [here](https://github.com/openai/gym).
We provide a gym wrapper and instructions for using it with existing machine
learning algorithms which utilize gyms. Both wrappers provide interfaces on top
learning algorithms which utilize gym. Our wrapper provides interfaces on top
of our `UnityEnvironment` class, which is the default way of interfacing with a
Unity environment via Python.

or by running the following from the `/gym-unity` directory of the repository:
```sh
pip install .
pip install -e .
```
## Using the Gym Wrapper

```python
from gym_unity.envs import UnityEnv
env = UnityEnv(environment_filename, worker_id, use_visual, uint8_visual, multiagent)
env = UnityEnv(environment_filename, worker_id, use_visual, uint8_visual)
```
* `environment_filename` refers to the path to the Unity environment.

(0-255). Many common Gym environments (e.g. Atari) do this. By default they
will be floats (0.0-1.0). Defaults to `False`.
* `multiagent` refers to whether you intent to launch an environment which
contains more than one agent. Defaults to `False`.
* `flatten_branched` will flatten a branched discrete action space into a Gym Discrete.
Otherwise, it will be converted into a MultiDiscrete. Defaults to `False`.

## Limitations
* It is only possible to use an environment with a single Agent.
* It is only possible to use an environment with a **single** Agent.
* The `BatchedStepResult` output from the environment can still be accessed from the
`info` provided by `env.step(action)`.
* The `TerminalSteps` or `DecisionSteps` output from the environment can still be
accessed from the `info` provided by `env.step(action)`.
* Stacked vector observations are not supported.
* Environment registration for use with `gym.make()` is currently not supported.

342
gym-unity/gym_unity/envs/__init__.py


from gym import error, spaces
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.base_env import BatchedStepResult
from mlagents_envs.base_env import DecisionSteps, TerminalSteps
from mlagents_envs import logging_util

logger = logging_util.get_logger(__name__)
logging_util.set_log_level(logging_util.INFO)
GymSingleStepResult = Tuple[np.ndarray, float, bool, Dict]
GymMultiStepResult = Tuple[List[np.ndarray], List[float], List[bool], Dict]
GymStepResult = Union[GymSingleStepResult, GymMultiStepResult]
GymStepResult = Tuple[np.ndarray, float, bool, Dict]
Multi-agent environments use lists for object types, as done here:
https://github.com/openai/multiagent-particle-envs
"""
def __init__(

use_visual: bool = False,
uint8_visual: bool = False,
multiagent: bool = False,
flatten_branched: bool = False,
no_graphics: bool = False,
allow_multiple_visual_obs: bool = False,

:param worker_id: Worker number for environment.
:param use_visual: Whether to use visual observation or vector observation.
:param uint8_visual: Return visual observations as uint8 (0-255) matrices instead of float (0.0-1.0).
:param multiagent: Whether to run in multi-agent mode (lists of obs, reward, done).
:param flatten_branched: If True, turn branched discrete action spaces into a Discrete space rather than
MultiDiscrete.
:param no_graphics: Whether to run the Unity simulator in no-graphics mode

)
# Take a single step so that the brain information will be sent over
if not self._env.get_agent_groups():
if not self._env.get_behavior_names():
self.agent_mapper = AgentIdIndexMapper()
self._previous_step_result: BatchedStepResult = None
self._multiagent = multiagent
self._previous_decision_step: DecisionSteps = None
self._flattener = None
# Hidden flag used by Atari environments to determine if the game is over
self.game_over = False

if len(self._env.get_agent_groups()) != 1:
if len(self._env.get_behavior_names()) != 1:
"There can only be one brain in a UnityEnvironment "
"There can only be one behavior in a UnityEnvironment "
self.brain_name = self._env.get_agent_groups()[0]
self.name = self.brain_name
self.group_spec = self._env.get_agent_group_spec(self.brain_name)
self.name = self._env.get_behavior_names()[0]
self.group_spec = self._env.get_behavior_spec(self.name)
if use_visual and self._get_n_vis_obs() == 0:
raise UnityGymException(

# Check for number of agents in scene.
self._env.reset()
step_result = self._env.get_step_result(self.brain_name)
self._check_agents(step_result.n_agents())
self._previous_step_result = step_result
self.agent_mapper.set_initial_agents(list(self._previous_step_result.agent_id))
decision_steps, _ = self._env.get_steps(self.name)
self._check_agents(len(decision_steps))
self._previous_decision_step = decision_steps
# Set observation and action spaces
if self.group_spec.is_action_discrete():

def reset(self) -> Union[List[np.ndarray], np.ndarray]:
"""Resets the state of the environment and returns an initial observation.
In the case of multi-agent environments, this is a list.
step_result = self._step(True)
n_agents = step_result.n_agents()
self._env.reset()
decision_step, _ = self._env.get_steps(self.name)
n_agents = len(decision_step)
if not self._multiagent:
res: GymStepResult = self._single_step(step_result)
else:
res = self._multi_step(step_result)
res: GymStepResult = self._single_step(decision_step)
return res[0]
def step(self, action: List[Any]) -> GymStepResult:

Accepts an action and returns a tuple (observation, reward, done, info).
In the case of multi-agent environments, these are lists.
Args:
action (object/list): an action provided by the environment
Returns:

info (dict): contains auxiliary diagnostic information, including BatchedStepResult.
info (dict): contains auxiliary diagnostic information.
# Use random actions for all other agents in environment.
if self._multiagent:
if not isinstance(action, list):
raise UnityGymException(
"The environment was expecting `action` to be a list."
)
if len(action) != self._n_agents:
raise UnityGymException(
"The environment was expecting a list of {} actions.".format(
self._n_agents
)
)
else:
if self._flattener is not None:
# Action space is discrete and flattened - we expect a list of scalars
action = [self._flattener.lookup_action(_act) for _act in action]
action = np.array(action)
else:
if self._flattener is not None:
# Translate action into list
action = self._flattener.lookup_action(action)
if self._flattener is not None:
# Translate action into list
action = self._flattener.lookup_action(action)
action = np.array(action).reshape((self._n_agents, spec.action_size))
action = self._sanitize_action(action)
self._env.set_actions(self.brain_name, action)
action = np.array(action).reshape((1, spec.action_size))
self._env.set_actions(self.name, action)
step_result = self._step()
n_agents = step_result.n_agents()
self._check_agents(n_agents)
if not self._multiagent:
single_res = self._single_step(step_result)
self.game_over = single_res[2]
return single_res
self._env.step()
decision_step, terminal_step = self._env.get_steps(self.name)
if len(terminal_step) != 0:
# The agent is done
self.game_over = True
return self._single_step(terminal_step)
multi_res = self._multi_step(step_result)
self.game_over = all(multi_res[2])
return multi_res
return self._single_step(decision_step)
def _single_step(self, info: BatchedStepResult) -> GymSingleStepResult:
def _single_step(self, info: Union[DecisionSteps, TerminalSteps]) -> GymStepResult:
if self.use_visual:
visual_obs = self._get_vis_obs_list(info)

"The Agent does not have vector observations and the environment was not setup "
+ "to use visual observations."
)
done = isinstance(info, TerminalSteps)
return (
default_observation,
info.reward[0],
info.done[0],
{"batched_step_result": info},
)
return (default_observation, info.reward[0], done, {"step": info})
def _preprocess_single(self, single_visual_obs: np.ndarray) -> np.ndarray:
if self.uint8_visual:

def _multi_step(self, info: BatchedStepResult) -> GymMultiStepResult:
if self.use_visual:
self.visual_obs = self._preprocess_multi(self._get_vis_obs_list(info))
default_observation = self.visual_obs
else:
default_observation = self._get_vector_obs(info)
return (
list(default_observation),
list(info.reward),
list(info.done),
{"batched_step_result": info},
)
def _get_n_vis_obs(self) -> int:
result = 0
for shape in self.group_spec.observation_shapes:

return shape
return None
def _get_vis_obs_list(self, step_result: BatchedStepResult) -> List[np.ndarray]:
def _get_vis_obs_list(
self, step_result: Union[DecisionSteps, TerminalSteps]
) -> List[np.ndarray]:
result: List[np.ndarray] = []
for obs in step_result.obs:
if len(obs.shape) == 4:

def _get_vector_obs(self, step_result: BatchedStepResult) -> np.ndarray:
def _get_vector_obs(
self, step_result: Union[DecisionSteps, TerminalSteps]
) -> np.ndarray:
result: List[np.ndarray] = []
for obs in step_result.obs:
if len(obs.shape) == 2:

result += shape[0]
return result
def _preprocess_multi(
self, multiple_visual_obs: List[np.ndarray]
) -> List[np.ndarray]:
if self.uint8_visual:
return [
(255.0 * _visual_obs).astype(np.uint8)
for _visual_obs in multiple_visual_obs
]
else:
return multiple_visual_obs
def render(self, mode="rgb_array"):
return self.visual_obs

return
def _check_agents(self, n_agents: int) -> None:
if not self._multiagent and n_agents > 1:
raise UnityGymException(
"The environment was launched as a single-agent environment, however "
"there is more than one agent in the scene."
)
elif self._multiagent and n_agents <= 1:
raise UnityGymException(
"The environment was launched as a mutli-agent environment, however "
"there is only one agent in the scene."
)
if self._n_agents == -1:
self._n_agents = n_agents
logger.info("{} agents within environment.".format(n_agents))
elif self._n_agents != n_agents:
if self._n_agents > 1:
"The number of agents in the environment has changed since "
"initialization. This is not supported."
"There can only be one Agent in the environment but {n_agents} were detected."
def _sanitize_info(self, step_result: BatchedStepResult) -> BatchedStepResult:
n_extra_agents = step_result.n_agents() - self._n_agents
if n_extra_agents < 0:
# In this case, some Agents did not request a decision when expected
raise UnityGymException(
"The number of agents in the scene does not match the expected number."
)
if step_result.n_agents() - sum(step_result.done) != self._n_agents:
raise UnityGymException(
"The number of agents in the scene does not match the expected number."
)
for index, agent_id in enumerate(step_result.agent_id):
if step_result.done[index]:
self.agent_mapper.mark_agent_done(agent_id, step_result.reward[index])
# Set the new AgentDone flags to True
# Note that the corresponding agent_id that gets marked done will be different
# than the original agent that was done, but this is OK since the gym interface
# only cares about the ordering.
for index, agent_id in enumerate(step_result.agent_id):
if not self._previous_step_result.contains_agent(agent_id):
if step_result.done[index]:
# If the Agent is already done (e.g. it ended its epsiode twice in one step)
# Don't try to register it here.
continue
# Register this agent, and get the reward of the previous agent that
# was in its index, so that we can return it to the gym.
last_reward = self.agent_mapper.register_new_agent_id(agent_id)
step_result.done[index] = True
step_result.reward[index] = last_reward
self._previous_step_result = step_result # store the new original
# Get a permutation of the agent IDs so that a given ID stays in the same
# index as where it was first seen.
new_id_order = self.agent_mapper.get_id_permutation(list(step_result.agent_id))
_mask: Optional[List[np.array]] = None
if step_result.action_mask is not None:
_mask = []
for mask_index in range(len(step_result.action_mask)):
_mask.append(step_result.action_mask[mask_index][new_id_order])
new_obs: List[np.array] = []
for obs_index in range(len(step_result.obs)):
new_obs.append(step_result.obs[obs_index][new_id_order])
return BatchedStepResult(
obs=new_obs,
reward=step_result.reward[new_id_order],
done=step_result.done[new_id_order],
max_step=step_result.max_step[new_id_order],
agent_id=step_result.agent_id[new_id_order],
action_mask=_mask,
)
def _sanitize_action(self, action: np.array) -> np.array:
sanitized_action = np.zeros(
(self._previous_step_result.n_agents(), self.group_spec.action_size)
)
for index, agent_id in enumerate(self._previous_step_result.agent_id):
if not self._previous_step_result.done[index]:
array_index = self.agent_mapper.get_gym_index(agent_id)
sanitized_action[index, :] = action[array_index, :]
return sanitized_action
def _step(self, needs_reset: bool = False) -> BatchedStepResult:
if needs_reset:
self._env.reset()
else:
self._env.step()
info = self._env.get_step_result(self.brain_name)
# Two possible cases here:
# 1) all agents requested decisions (some of which might be done)
# 2) some Agents were marked Done in between steps.
# In case 2, we re-request decisions until all agents request a real decision.
while info.n_agents() - sum(info.done) < self._n_agents:
if not info.done.all():
raise UnityGymException(
"The environment does not have the expected amount of agents. "
+ "Some agents did not request decisions at the same time."
)
for agent_id, reward in zip(info.agent_id, info.reward):
self.agent_mapper.mark_agent_done(agent_id, reward)
self._env.step()
info = self._env.get_step_result(self.brain_name)
return self._sanitize_info(info)
@property
def metadata(self):
return {"render.modes": ["rgb_array"]}

:return: The List containing the branched actions.
"""
return self.action_lookup[action]
class AgentIdIndexMapper:
def __init__(self) -> None:
self._agent_id_to_gym_index: Dict[int, int] = {}
self._done_agents_index_to_last_reward: Dict[int, float] = {}
def set_initial_agents(self, agent_ids: List[int]) -> None:
"""
Provide the initial list of agent ids for the mapper
"""
for idx, agent_id in enumerate(agent_ids):
self._agent_id_to_gym_index[agent_id] = idx
def mark_agent_done(self, agent_id: int, reward: float) -> None:
"""
Declare the agent done with the corresponding final reward.
"""
if agent_id in self._agent_id_to_gym_index:
gym_index = self._agent_id_to_gym_index.pop(agent_id)
self._done_agents_index_to_last_reward[gym_index] = reward
else:
# Agent was never registered in the first place (e.g. EndEpisode called multiple times)
pass
def register_new_agent_id(self, agent_id: int) -> float:
"""
Adds the new agent ID and returns the reward to use for the previous agent in this index
"""
# Any free index is OK here.
free_index, last_reward = self._done_agents_index_to_last_reward.popitem()
self._agent_id_to_gym_index[agent_id] = free_index
return last_reward
def get_id_permutation(self, agent_ids: List[int]) -> List[int]:
"""
Get the permutation from new agent ids to the order that preserves the positions of previous agents.
The result is a list with each integer from 0 to len(_agent_id_to_gym_index)-1
appearing exactly once.
"""
# Map the new agent ids to the their index
new_agent_ids_to_index = {
agent_id: idx for idx, agent_id in enumerate(agent_ids)
}
# Make the output list. We don't write to it sequentially, so start with dummy values.
new_permutation = [-1] * len(self._agent_id_to_gym_index)
# For each agent ID, find the new index of the agent, and write it in the original index.
for agent_id, original_index in self._agent_id_to_gym_index.items():
new_permutation[original_index] = new_agent_ids_to_index[agent_id]
return new_permutation
def get_gym_index(self, agent_id: int) -> int:
"""
Get the gym index for the current agent.
"""
return self._agent_id_to_gym_index[agent_id]
class AgentIdIndexMapperSlow:
"""
Reference implementation of AgentIdIndexMapper.
The operations are O(N^2) so it shouldn't be used for large numbers of agents.
See AgentIdIndexMapper for method descriptions
"""
def __init__(self) -> None:
self._gym_id_order: List[int] = []
self._done_agents_index_to_last_reward: Dict[int, float] = {}
def set_initial_agents(self, agent_ids: List[int]) -> None:
self._gym_id_order = list(agent_ids)
def mark_agent_done(self, agent_id: int, reward: float) -> None:
try:
gym_index = self._gym_id_order.index(agent_id)
self._done_agents_index_to_last_reward[gym_index] = reward
self._gym_id_order[gym_index] = -1
except ValueError:
# Agent was never registered in the first place (e.g. EndEpisode called multiple times)
pass
def register_new_agent_id(self, agent_id: int) -> float:
original_index = self._gym_id_order.index(-1)
self._gym_id_order[original_index] = agent_id
reward = self._done_agents_index_to_last_reward.pop(original_index)
return reward
def get_id_permutation(self, agent_ids):
new_id_order = []
for agent_id in self._gym_id_order:
new_id_order.append(agent_ids.index(agent_id))
return new_id_order
def get_gym_index(self, agent_id: int) -> int:
return self._gym_id_order.index(agent_id)

198
gym-unity/gym_unity/tests/test_gym.py


import numpy as np
from gym import spaces
from gym_unity.envs import (
UnityEnv,
UnityGymException,
AgentIdIndexMapper,
AgentIdIndexMapperSlow,
from gym_unity.envs import UnityEnv
from mlagents_envs.base_env import (
BehaviorSpec,
ActionType,
DecisionSteps,
TerminalSteps,
from mlagents_envs.base_env import AgentGroupSpec, ActionType, BatchedStepResult
mock_step = create_mock_vector_step_result()
setup_mock_unityenvironment(mock_env, mock_spec, mock_step)
mock_decision_step, mock_terminal_step = create_mock_vector_steps(mock_spec)
setup_mock_unityenvironment(
mock_env, mock_spec, mock_decision_step, mock_terminal_step
)
env = UnityEnv(" ", use_visual=False, multiagent=False)
env = UnityEnv(" ", use_visual=False)
assert isinstance(env, UnityEnv)
assert isinstance(env.reset(), np.ndarray)
actions = env.action_space.sample()

@mock.patch("gym_unity.envs.UnityEnvironment")
def test_multi_agent(mock_env):
mock_spec = create_mock_group_spec()
mock_step = create_mock_vector_step_result(num_agents=2)
setup_mock_unityenvironment(mock_env, mock_spec, mock_step)
with pytest.raises(UnityGymException):
UnityEnv(" ", multiagent=False)
env = UnityEnv(" ", use_visual=False, multiagent=True)
assert isinstance(env.reset(), list)
actions = [env.action_space.sample() for i in range(env.number_agents)]
obs, rew, done, info = env.step(actions)
assert isinstance(obs, list)
assert isinstance(rew, list)
assert isinstance(done, list)
assert isinstance(info, dict)
@mock.patch("gym_unity.envs.UnityEnvironment")
mock_step = create_mock_vector_step_result(num_agents=1)
setup_mock_unityenvironment(mock_env, mock_spec, mock_step)
mock_decision_step, mock_terminal_step = create_mock_vector_steps(
mock_spec, num_agents=1
)
setup_mock_unityenvironment(
mock_env, mock_spec, mock_decision_step, mock_terminal_step
)
env = UnityEnv(" ", use_visual=False, multiagent=False, flatten_branched=True)
env = UnityEnv(" ", use_visual=False, flatten_branched=True)
assert isinstance(env.action_space, spaces.Discrete)
assert env.action_space.n == 12
assert env._flattener.lookup_action(0) == [0, 0, 0]

env = UnityEnv(" ", use_visual=False, multiagent=False, flatten_branched=False)
env = UnityEnv(" ", use_visual=False, flatten_branched=False)
assert isinstance(env.action_space, spaces.MultiDiscrete)

mock_spec = create_mock_group_spec(number_visual_observations=1)
mock_step = create_mock_vector_step_result(number_visual_observations=1)
setup_mock_unityenvironment(mock_env, mock_spec, mock_step)
mock_decision_step, mock_terminal_step = create_mock_vector_steps(
mock_spec, number_visual_observations=1
)
setup_mock_unityenvironment(
mock_env, mock_spec, mock_decision_step, mock_terminal_step
)
env = UnityEnv(" ", use_visual=True, multiagent=False, uint8_visual=use_uint8)
env = UnityEnv(" ", use_visual=True, uint8_visual=use_uint8)
assert isinstance(env, UnityEnv)
assert isinstance(env.reset(), np.ndarray)
actions = env.action_space.sample()

assert isinstance(info, dict)
@mock.patch("gym_unity.envs.UnityEnvironment")
def test_sanitize_action_shuffled_id(mock_env):
mock_spec = create_mock_group_spec(
vector_action_space_type="discrete", vector_action_space_size=[2, 2, 3]
)
mock_step = create_mock_vector_step_result(num_agents=5)
mock_step.agent_id = np.array(range(5))
setup_mock_unityenvironment(mock_env, mock_spec, mock_step)
env = UnityEnv(" ", use_visual=False, multiagent=True)
shuffled_step_result = create_mock_vector_step_result(num_agents=5)
shuffled_order = [4, 2, 3, 1, 0]
shuffled_step_result.reward = np.array(shuffled_order)
shuffled_step_result.agent_id = np.array(shuffled_order)
sanitized_result = env._sanitize_info(shuffled_step_result)
for expected_reward, reward in zip(range(5), sanitized_result.reward):
assert expected_reward == reward
for expected_agent_id, agent_id in zip(range(5), sanitized_result.agent_id):
assert expected_agent_id == agent_id
@mock.patch("gym_unity.envs.UnityEnvironment")
def test_sanitize_action_one_agent_done(mock_env):
mock_spec = create_mock_group_spec(
vector_action_space_type="discrete", vector_action_space_size=[2, 2, 3]
)
mock_step = create_mock_vector_step_result(num_agents=5)
mock_step.agent_id = np.array(range(5))
setup_mock_unityenvironment(mock_env, mock_spec, mock_step)
env = UnityEnv(" ", use_visual=False, multiagent=True)
received_step_result = create_mock_vector_step_result(num_agents=6)
received_step_result.agent_id = np.array(range(6))
# agent #3 (id = 2) is Done
received_step_result.done = np.array([False] * 2 + [True] + [False] * 3)
sanitized_result = env._sanitize_info(received_step_result)
for expected_agent_id, agent_id in zip([0, 1, 5, 3, 4], sanitized_result.agent_id):
assert expected_agent_id == agent_id
@mock.patch("gym_unity.envs.UnityEnvironment")
def test_sanitize_action_new_agent_done(mock_env):
mock_spec = create_mock_group_spec(
vector_action_space_type="discrete", vector_action_space_size=[2, 2, 3]
)
mock_step = create_mock_vector_step_result(num_agents=3)
mock_step.agent_id = np.array(range(5))
setup_mock_unityenvironment(mock_env, mock_spec, mock_step)
env = UnityEnv(" ", use_visual=False, multiagent=True)
received_step_result = create_mock_vector_step_result(num_agents=7)
received_step_result.agent_id = np.array(range(7))
# agent #3 (id = 2) is Done
# so is the "new" agent (id = 5)
done = [False] * 7
done[2] = True
done[5] = True
received_step_result.done = np.array(done)
sanitized_result = env._sanitize_info(received_step_result)
for expected_agent_id, agent_id in zip([0, 1, 6, 3, 4], sanitized_result.agent_id):
assert expected_agent_id == agent_id
@mock.patch("gym_unity.envs.UnityEnvironment")
def test_sanitize_action_single_agent_multiple_done(mock_env):
mock_spec = create_mock_group_spec(
vector_action_space_type="discrete", vector_action_space_size=[2, 2, 3]
)
mock_step = create_mock_vector_step_result(num_agents=1)
mock_step.agent_id = np.array(range(1))
setup_mock_unityenvironment(mock_env, mock_spec, mock_step)
env = UnityEnv(" ", use_visual=False, multiagent=False)
received_step_result = create_mock_vector_step_result(num_agents=3)
received_step_result.agent_id = np.array(range(3))
# original agent (id = 0) is Done
# so is the "new" agent (id = 1)
done = [True, True, False]
received_step_result.done = np.array(done)
sanitized_result = env._sanitize_info(received_step_result)
for expected_agent_id, agent_id in zip([2], sanitized_result.agent_id):
assert expected_agent_id == agent_id
# Helper methods

obs_shapes = [(vector_observation_space_size,)]
for _ in range(number_visual_observations):
obs_shapes += [(8, 8, 3)]
return AgentGroupSpec(obs_shapes, act_type, vector_action_space_size)
return BehaviorSpec(obs_shapes, act_type, vector_action_space_size)
def create_mock_vector_step_result(num_agents=1, number_visual_observations=0):
def create_mock_vector_steps(specs, num_agents=1, number_visual_observations=0):
:BehaviorSpecs specs: The BehaviorSpecs for this mock
:int num_agents: Number of "agents" to imitate in your BatchedStepResult values.
"""
obs = [np.array([num_agents * [1, 2, 3]]).reshape(num_agents, 3)]

done = np.array(num_agents * [False])
return BatchedStepResult(obs, rewards, done, done, agents, None)
return DecisionSteps(obs, rewards, agents, None), TerminalSteps.empty(specs)
def setup_mock_unityenvironment(mock_env, mock_spec, mock_result):
def setup_mock_unityenvironment(mock_env, mock_spec, mock_decision, mock_termination):
"""
Takes a mock UnityEnvironment and adds the appropriate properties, defined by the mock
GroupSpec and BatchedStepResult.

:Mock mock_result: A BatchedStepResult object that will be returned at each step and reset.
:Mock mock_decision: A DecisionSteps object that will be returned at each step and reset.
:Mock mock_termination: A TerminationSteps object that will be returned at each step and reset.
mock_env.return_value.get_agent_groups.return_value = ["MockBrain"]
mock_env.return_value.get_agent_group_spec.return_value = mock_spec
mock_env.return_value.get_step_result.return_value = mock_result
@pytest.mark.parametrize("mapper_cls", [AgentIdIndexMapper, AgentIdIndexMapperSlow])
def test_agent_id_index_mapper(mapper_cls):
mapper = mapper_cls()
initial_agent_ids = [1001, 1002, 1003, 1004]
mapper.set_initial_agents(initial_agent_ids)
# Mark some agents as done with their last rewards.
mapper.mark_agent_done(1001, 42.0)
mapper.mark_agent_done(1004, 1337.0)
# Make sure we can handle an unknown agent id being marked done.
# This can happen when an agent ends an episode on the same step it starts.
mapper.mark_agent_done(9999, -1.0)
# Now add new agents, and get the rewards of the agent they replaced.
old_reward1 = mapper.register_new_agent_id(2001)
old_reward2 = mapper.register_new_agent_id(2002)
# Order of the rewards don't matter
assert {old_reward1, old_reward2} == {42.0, 1337.0}
new_agent_ids = [1002, 1003, 2001, 2002]
permutation = mapper.get_id_permutation(new_agent_ids)
# Make sure it's actually a permutation - needs to contain 0..N-1 with no repeats.
assert set(permutation) == set(range(0, 4))
# For initial agents that were in the initial group, they need to be in the same slot.
# Agents that were added later can appear in any free slot.
permuted_ids = [new_agent_ids[i] for i in permutation]
for idx, agent_id in enumerate(initial_agent_ids):
if agent_id in permuted_ids:
assert permuted_ids[idx] == agent_id
mock_env.return_value.get_behavior_names.return_value = ["MockBrain"]
mock_env.return_value.get_behavior_spec.return_value = mock_spec
mock_env.return_value.get_steps.return_value = (mock_decision, mock_termination)

249
ml-agents-envs/mlagents_envs/base_env.py


"""
Python Environment API for the ML-Agents toolkit
The aim of this API is to expose groups of similar Agents evolving in Unity
The aim of this API is to expose Agents evolving in a simulation
There can be multiple groups of similar Agents (same observations and actions
spaces) in the simulation. These groups are identified by a agent_group that
corresponds to a single group of Agents in the simulation.
This API supports multi-agent scenarios and groups similar Agents (same
observations, actions spaces and behavior) together. These groups of Agents are
identified by their BehaviorName.
batched manner. When retrieving the state of a group of Agents, said state
contains the data for the whole group. Agents in these groups are identified
by a unique int identifier that allows tracking of Agents across simulation
steps. Note that there is no guarantee that the number or order of the Agents
in the state will be consistent across simulation steps.
batched manner. Agents are identified by a unique AgentId identifier that
allows tracking of Agents across simulation steps. Note that there is no
guarantee that the number or order of the Agents in the state will be
consistent across simulation steps.
A simulation steps corresponds to moving the simulation forward until at least
one agent in the simulation sends its observations to Python again. Since
Agents can request decisions at different frequencies, a simulation step does

from abc import ABC, abstractmethod
from typing import List, NamedTuple, Tuple, Optional, Union, Dict
from collections.abc import Mapping
from typing import List, NamedTuple, Tuple, Optional, Union, Dict, Iterator, Any
AgentGroup = str
BehaviorName = str
class StepResult(NamedTuple):
class DecisionStep(NamedTuple):
- obs is a list of numpy arrays observations collected by the group of
agent.
- obs is a list of numpy arrays observations collected by the agent.
- done is a bool. Is true if the Agent was terminated during the last
simulation step.
- max_step is a bool. Is true if the Agent reached its maximum number of
steps during the last simulation step.
- agent_id is an int and an unique identifier for the corresponding Agent.
- action_mask is an optional list of one dimensional array of booleans.
Only available in multi-discrete action space type.

obs: List[np.ndarray]
reward: float
done: bool
max_step: bool
class BatchedStepResult:
class DecisionSteps(Mapping):
Contains the data a group of similar Agents collected since the last
Contains the data a batch of similar Agents collected since the last
agents and the batch size of the BatchedStepResult are not fixed across
agents and the batch size of the DecisionSteps are not fixed across
- obs is a list of numpy arrays observations collected by the group of
agent. Each obs has one extra dimension compared to StepResult: the first
dimension of the array corresponds to the batch size of
the group.
- obs is a list of numpy arrays observations collected by the batch of
agent. Each obs has one extra dimension compared to DecisionStep: the
first dimension of the array corresponds to the batch size of the batch.
- done is an array of booleans of length batch size. Is true if the
associated Agent was terminated during the last simulation step.
- max_step is an array of booleans of length batch size. Is true if the
associated Agent reached its maximum number of steps during the last
simulation step.
- agent_id is an int vector of length batch size containing unique
identifier for the corresponding Agent. This is used to track Agents
across simulation steps.

this simulation step.
"""
def __init__(self, obs, reward, done, max_step, agent_id, action_mask):
def __init__(self, obs, reward, agent_id, action_mask):
self.done: np.ndarray = done
self.max_step: np.ndarray = max_step
self.agent_id: np.ndarray = agent_id
self.action_mask: Optional[List[np.ndarray]] = action_mask
self._agent_id_to_index: Optional[Dict[AgentId, int]] = None

"""
:returns: A Dict that maps agent_id to the index of those agents in
this BatchedStepResult.
this DecisionSteps.
"""
if self._agent_id_to_index is None:
self._agent_id_to_index = {}

def contains_agent(self, agent_id: AgentId) -> bool:
return agent_id in self.agent_id_to_index
def __len__(self) -> int:
return len(self.agent_id)
def get_agent_step_result(self, agent_id: AgentId) -> StepResult:
def __getitem__(self, agent_id: AgentId) -> DecisionStep:
returns the step result for a specific agent.
returns the DecisionStep for a specific agent.
:returns: obs, reward, done, agent_id and optional action mask for a
specific agent
:returns: The DecisionStep
if not self.contains_agent(agent_id):
raise IndexError(
"get_agent_step_result failed. agent_id {} is not present in the BatchedStepResult".format(
agent_id
)
if agent_id not in self.agent_id_to_index:
raise KeyError(
"agent_id {} is not present in the DecisionSteps".format(agent_id)
)
agent_index = self._agent_id_to_index[agent_id] # type: ignore
agent_obs = []

agent_mask = []
for mask in self.action_mask:
agent_mask.append(mask[agent_index])
return StepResult(
return DecisionStep(
done=self.done[agent_index],
max_step=self.max_step[agent_index],
def __iter__(self) -> Iterator[Any]:
yield from self.agent_id
def empty(spec: "AgentGroupSpec") -> "BatchedStepResult":
def empty(spec: "BehaviorSpec") -> "DecisionSteps":
Returns an empty BatchedStepResult.
:param spec: The AgentGroupSpec for the BatchedStepResult
Returns an empty DecisionSteps.
:param spec: The BehaviorSpec for the DecisionSteps
return BatchedStepResult(
return DecisionSteps(
done=np.zeros(0, dtype=np.bool),
max_step=np.zeros(0, dtype=np.bool),
def n_agents(self) -> int:
class TerminalStep(NamedTuple):
"""
Contains the data a single Agent collected when its episode ended.
- obs is a list of numpy arrays observations collected by the agent.
- reward is a float. Corresponds to the rewards collected by the agent
since the last simulation step.
- max_step is a bool. Is true if the Agent reached its maximum number of
steps during the last simulation step.
- agent_id is an int and an unique identifier for the corresponding Agent.
"""
obs: List[np.ndarray]
reward: float
max_step: bool
agent_id: AgentId
class TerminalSteps(Mapping):
"""
Contains the data a batch of Agents collected when their episode
terminated. All Agents present in the TerminalSteps have ended their
episode.
- obs is a list of numpy arrays observations collected by the batch of
agent. Each obs has one extra dimension compared to DecisionStep: the
first dimension of the array corresponds to the batch size of the batch.
- reward is a float vector of length batch size. Corresponds to the
rewards collected by each agent since the last simulation step.
- max_step is an array of booleans of length batch size. Is true if the
associated Agent reached its maximum number of steps during the last
simulation step.
- agent_id is an int vector of length batch size containing unique
identifier for the corresponding Agent. This is used to track Agents
across simulation steps.
"""
def __init__(self, obs, reward, max_step, agent_id):
self.obs: List[np.ndarray] = obs
self.reward: np.ndarray = reward
self.max_step: np.ndarray = max_step
self.agent_id: np.ndarray = agent_id
self._agent_id_to_index: Optional[Dict[AgentId, int]] = None
@property
def agent_id_to_index(self) -> Dict[AgentId, int]:
"""
:returns: A Dict that maps agent_id to the index of those agents in
this TerminalSteps.
"""
if self._agent_id_to_index is None:
self._agent_id_to_index = {}
for a_idx, a_id in enumerate(self.agent_id):
self._agent_id_to_index[a_id] = a_idx
return self._agent_id_to_index
def __len__(self) -> int:
def __getitem__(self, agent_id: AgentId) -> TerminalStep:
"""
returns the TerminalStep for a specific agent.
:param agent_id: The id of the agent
:returns: obs, reward, done, agent_id and optional action mask for a
specific agent
"""
if agent_id not in self.agent_id_to_index:
raise KeyError(
"agent_id {} is not present in the TerminalSteps".format(agent_id)
)
agent_index = self._agent_id_to_index[agent_id] # type: ignore
agent_obs = []
for batched_obs in self.obs:
agent_obs.append(batched_obs[agent_index])
return TerminalStep(
obs=agent_obs,
reward=self.reward[agent_index],
max_step=self.max_step[agent_index],
agent_id=agent_id,
)
def __iter__(self) -> Iterator[Any]:
yield from self.agent_id
@staticmethod
def empty(spec: "BehaviorSpec") -> "TerminalSteps":
"""
Returns an empty TerminalSteps.
:param spec: The BehaviorSpec for the TerminalSteps
"""
obs: List[np.ndarray] = []
for shape in spec.observation_shapes:
obs += [np.zeros((0,) + shape, dtype=np.float32)]
return TerminalSteps(
obs=obs,
reward=np.zeros(0, dtype=np.float32),
max_step=np.zeros(0, dtype=np.bool),
agent_id=np.zeros(0, dtype=np.int32),
)
class ActionType(Enum):
DISCRETE = 0

class AgentGroupSpec(NamedTuple):
class BehaviorSpec(NamedTuple):
spaces for a group of Agents.
spaces for a group of Agents under the same behavior.
the ordering of the BatchedStepResult and StepResult.
the ordering of the DecisionSteps and TerminalSteps.
- action_type is the type of data of the action. it can be discrete or
continuous. If discrete, the action tensors are expected to be int32. If
continuous, the actions are expected to be float32.

def is_action_discrete(self) -> bool:
"""
Returns true if the Agent group uses discrete actions
Returns true if this Behavior uses discrete actions
Returns true if the Agent group uses continuous actions
Returns true if this Behavior uses continuous actions
"""
return self.action_type == ActionType.CONTINUOUS

pass
@abstractmethod
def get_agent_groups(self) -> List[AgentGroup]:
def get_behavior_names(self) -> List[BehaviorName]:
Returns the list of the agent group names present in the environment.
Agents grouped under the same group name have the same action and
observation specs, and are expected to behave similarly in the environment.
Returns the list of the behavior names present in the environment.
Agents grouped under the same behavior name have the same action and
observation specs, and are expected to behave similarly in the
environment.
:return: the list of agent group names.
:return: the list of agent BehaviorName.
def set_actions(self, agent_group: AgentGroup, action: np.ndarray) -> None:
def set_actions(self, behavior_name: BehaviorName, action: np.ndarray) -> None:
the step result.
:param agent_group: The name of the group the agents are part of
the DecisionSteps.
:param behavior_name: The name of the behavior the agents are part of
:param action: A two dimensional np.ndarray corresponding to the action
(either int or float)
"""

def set_action_for_agent(
self, agent_group: AgentGroup, agent_id: AgentId, action: np.ndarray
self, behavior_name: BehaviorName, agent_id: AgentId, action: np.ndarray
:param agent_group: The name of the group the agent is part of
:param behavior_name: The name of the behavior the agent is part of
:param action: A two dimensional np.ndarray corresponding to the action
:param action: A one dimensional np.ndarray corresponding to the action
def get_step_result(self, agent_group: AgentGroup) -> BatchedStepResult:
def get_steps(
self, behavior_name: BehaviorName
) -> Tuple[DecisionSteps, TerminalSteps]:
Retrieves the observations of the agents that requested a step in the
Retrieves the steps of the agents that requested a step in the
:param agent_group: The name of the group the agents are part of
:return: A BatchedStepResult NamedTuple containing the observations,
the rewards and the done flags for this group of agents.
:param behavior_name: The name of the behavior the agents are part of
:return: A tuple containing :
- A DecisionSteps NamedTuple containing the observations,
the rewards, the agent ids and the action masks for the Agents
of the specified behavior. These Agents need an action this step.
- A TerminalSteps NamedTuple containing the observations,
rewards, agent ids and max_step flags of the agents that had their
episode terminated last step.
def get_agent_group_spec(self, agent_group: AgentGroup) -> AgentGroupSpec:
def get_behavior_spec(self, behavior_name: BehaviorName) -> BehaviorSpec:
Get the AgentGroupSpec corresponding to the agent group name
:param agent_group: The name of the group the agents are part of
:return: A AgentGroupSpec corresponding to that agent group name
Get the BehaviorSpec corresponding to the behavior name
:param behavior_name: The name of the behavior the agents are part of
:return: A BehaviorSpec corresponding to that behavior
"""
pass

102
ml-agents-envs/mlagents_envs/environment.py


import numpy as np
import os
import subprocess
from typing import Dict, List, Optional, Any
from typing import Dict, List, Optional, Any, Tuple
import mlagents_envs

from mlagents_envs.base_env import (
BaseEnv,
BatchedStepResult,
AgentGroupSpec,
AgentGroup,
DecisionSteps,
TerminalSteps,
BehaviorSpec,
BehaviorName,
AgentId,
)
from mlagents_envs.timers import timed, hierarchical_timer

)
from mlagents_envs.communicator_objects.command_pb2 import STEP, RESET
from mlagents_envs.rpc_utils import (
agent_group_spec_from_proto,
batched_step_result_from_proto,
)
from mlagents_envs.rpc_utils import behavior_spec_from_proto, steps_from_proto
from mlagents_envs.communicator_objects.unity_rl_input_pb2 import UnityRLInputProto
from mlagents_envs.communicator_objects.unity_rl_output_pb2 import UnityRLOutputProto

f"Connected to Unity environment with package version {aca_params.package_version} "
f"and communication version {aca_params.communication_version}"
)
self._env_state: Dict[str, BatchedStepResult] = {}
self._env_specs: Dict[str, AgentGroupSpec] = {}
self._env_state: Dict[str, Tuple[DecisionSteps, TerminalSteps]] = {}
self._env_specs: Dict[str, BehaviorSpec] = {}
self._update_group_specs(aca_output)
self._update_behavior_specs(aca_output)
@staticmethod
def get_communicator(worker_id, base_port, timeout_wait):

f'"chmod -R 755 {launch_string}"'
) from perm
def _update_group_specs(self, output: UnityOutputProto) -> None:
def _update_behavior_specs(self, output: UnityOutputProto) -> None:
init_output = output.rl_initialization_output
for brain_param in init_output.brain_parameters:
# Each BrainParameter in the rl_initialization_output should have at least one AgentInfo

agent = agent_infos.value[0]
new_spec = agent_group_spec_from_proto(brain_param, agent)
new_spec = behavior_spec_from_proto(brain_param, agent)
self._env_specs[brain_param.brain_name] = new_spec
logger.info(f"Connected new brain:\n{brain_param.brain_name}")

for brain_name in self._env_specs.keys():
if brain_name in output.agentInfos:
agent_info_list = output.agentInfos[brain_name].value
self._env_state[brain_name] = batched_step_result_from_proto(
self._env_state[brain_name] = steps_from_proto(
self._env_state[brain_name] = BatchedStepResult.empty(
self._env_specs[brain_name]
self._env_state[brain_name] = (
DecisionSteps.empty(self._env_specs[brain_name]),
TerminalSteps.empty(self._env_specs[brain_name]),
)
self._parse_side_channel_message(self.side_channels, output.side_channel)

if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
self._update_group_specs(outputs)
self._update_behavior_specs(outputs)
rl_output = outputs.rl_output
self._update_state(rl_output)
self._is_first_message = False

if group_name not in self._env_actions:
n_agents = 0
if group_name in self._env_state:
n_agents = self._env_state[group_name].n_agents()
n_agents = len(self._env_state[group_name][0])
self._env_actions[group_name] = self._env_specs[
group_name
].create_empty_action(n_agents)

if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
self._update_group_specs(outputs)
self._update_behavior_specs(outputs)
def get_agent_groups(self) -> List[AgentGroup]:
def get_behavior_names(self):
def _assert_group_exists(self, agent_group: str) -> None:
if agent_group not in self._env_specs:
def _assert_behavior_exists(self, behavior_name: str) -> None:
if behavior_name not in self._env_specs:
"in the environment".format(agent_group)
"in the environment".format(behavior_name)
def set_actions(self, agent_group: AgentGroup, action: np.ndarray) -> None:
self._assert_group_exists(agent_group)
if agent_group not in self._env_state:
def set_actions(self, behavior_name: BehaviorName, action: np.ndarray) -> None:
self._assert_behavior_exists(behavior_name)
if behavior_name not in self._env_state:
spec = self._env_specs[agent_group]
spec = self._env_specs[behavior_name]
expected_shape = (self._env_state[agent_group].n_agents(), spec.action_size)
expected_shape = (len(self._env_state[behavior_name][0]), spec.action_size)
"The group {0} needs an input of dimension {1} but received input of dimension {2}".format(
agent_group, expected_shape, action.shape
"The behavior {0} needs an input of dimension {1} but received input of dimension {2}".format(
behavior_name, expected_shape, action.shape
self._env_actions[agent_group] = action
self._env_actions[behavior_name] = action
self, agent_group: AgentGroup, agent_id: AgentId, action: np.ndarray
self, behavior_name: BehaviorName, agent_id: AgentId, action: np.ndarray
self._assert_group_exists(agent_group)
if agent_group not in self._env_state:
self._assert_behavior_exists(behavior_name)
if behavior_name not in self._env_state:
spec = self._env_specs[agent_group]
spec = self._env_specs[behavior_name]
"The Agent {0} in group {1} needs an input of dimension {2} but received input of dimension {3}".format(
agent_id, agent_group, expected_shape, action.shape
f"The Agent {0} with BehaviorName {1} needs an input of dimension "
f"{2} but received input of dimension {3}".format(
agent_id, behavior_name, expected_shape, action.shape
)
)
expected_type = np.float32 if spec.is_action_continuous() else np.int32

if agent_group not in self._env_actions:
self._env_actions[agent_group] = spec.create_empty_action(
self._env_state[agent_group].n_agents()
if behavior_name not in self._env_actions:
self._env_actions[behavior_name] = spec.create_empty_action(
len(self._env_state[behavior_name][0])
index = np.where(self._env_state[agent_group].agent_id == agent_id)[0][0]
index = np.where(self._env_state[behavior_name][0].agent_id == agent_id)[0][
0
]
except IndexError as ie:
raise IndexError(
"agent_id {} is did not request a decision at the previous step".format(

self._env_actions[agent_group][index] = action
self._env_actions[behavior_name][index] = action
def get_step_result(self, agent_group: AgentGroup) -> BatchedStepResult:
self._assert_group_exists(agent_group)
return self._env_state[agent_group]
def get_steps(
self, behavior_name: BehaviorName
) -> Tuple[DecisionSteps, TerminalSteps]:
self._assert_behavior_exists(behavior_name)
return self._env_state[behavior_name]
def get_agent_group_spec(self, agent_group: AgentGroup) -> AgentGroupSpec:
self._assert_group_exists(agent_group)
return self._env_specs[agent_group]
def get_behavior_spec(self, behavior_name: BehaviorName) -> BehaviorSpec:
self._assert_behavior_exists(behavior_name)
return self._env_specs[behavior_name]
def close(self):
"""

) -> UnityInputProto:
rl_in = UnityRLInputProto()
for b in vector_action:
n_agents = self._env_state[b].n_agents()
n_agents = len(self._env_state[b][0])
if n_agents == 0:
continue
for i in range(n_agents):

99
ml-agents-envs/mlagents_envs/rpc_utils.py


from mlagents_envs.base_env import AgentGroupSpec, ActionType, BatchedStepResult
from mlagents_envs.base_env import (
BehaviorSpec,
ActionType,
DecisionSteps,
TerminalSteps,
)
from mlagents_envs.exception import UnityObservationException
from mlagents_envs.timers import hierarchical_timer, timed
from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto

from PIL import Image
def agent_group_spec_from_proto(
def behavior_spec_from_proto(
) -> AgentGroupSpec:
) -> BehaviorSpec:
Converts brain parameter and agent info proto to AgentGroupSpec object.
Converts brain parameter and agent info proto to BehaviorSpec object.
:return: AgentGroupSpec object.
:return: BehaviorSpec object.
"""
observation_shape = [tuple(obs.shape) for obs in agent_info.observations]
action_type = (

] = brain_param_proto.vector_action_size[0]
else:
action_shape = tuple(brain_param_proto.vector_action_size)
return AgentGroupSpec(observation_shape, action_type, action_shape)
return BehaviorSpec(observation_shape, action_type, action_shape)
@timed

@timed
def batched_step_result_from_proto(
def steps_from_proto(
group_spec: AgentGroupSpec,
) -> BatchedStepResult:
obs_list: List[np.ndarray] = []
for obs_index, obs_shape in enumerate(group_spec.observation_shapes):
behavior_spec: BehaviorSpec,
) -> Tuple[DecisionSteps, TerminalSteps]:
decision_agent_info_list = [
agent_info for agent_info in agent_info_list if not agent_info.done
]
terminal_agent_info_list = [
agent_info for agent_info in agent_info_list if agent_info.done
]
decision_obs_list: List[np.ndarray] = []
terminal_obs_list: List[np.ndarray] = []
for obs_index, obs_shape in enumerate(behavior_spec.observation_shapes):
obs_list.append(
_process_visual_observation(obs_index, obs_shape, agent_info_list)
decision_obs_list.append(
_process_visual_observation(
obs_index, obs_shape, decision_agent_info_list
)
)
terminal_obs_list.append(
_process_visual_observation(
obs_index, obs_shape, terminal_agent_info_list
)
obs_list.append(
_process_vector_observation(obs_index, obs_shape, agent_info_list)
decision_obs_list.append(
_process_vector_observation(
obs_index, obs_shape, decision_agent_info_list
)
)
terminal_obs_list.append(
_process_vector_observation(
obs_index, obs_shape, terminal_agent_info_list
)
rewards = np.array(
[agent_info.reward for agent_info in agent_info_list], dtype=np.float32
decision_rewards = np.array(
[agent_info.reward for agent_info in decision_agent_info_list], dtype=np.float32
)
terminal_rewards = np.array(
[agent_info.reward for agent_info in terminal_agent_info_list], dtype=np.float32
_raise_on_nan_and_inf(rewards, "rewards")
_raise_on_nan_and_inf(decision_rewards, "rewards")
_raise_on_nan_and_inf(terminal_rewards, "rewards")
done = np.array([agent_info.done for agent_info in agent_info_list], dtype=np.bool)
[agent_info.max_step_reached for agent_info in agent_info_list], dtype=np.bool
[agent_info.max_step_reached for agent_info in terminal_agent_info_list],
dtype=np.bool,
agent_id = np.array(
[agent_info.id for agent_info in agent_info_list], dtype=np.int32
decision_agent_id = np.array(
[agent_info.id for agent_info in decision_agent_info_list], dtype=np.int32
)
terminal_agent_id = np.array(
[agent_info.id for agent_info in terminal_agent_info_list], dtype=np.int32
if group_spec.is_action_discrete():
if any([agent_info.action_mask is not None] for agent_info in agent_info_list):
n_agents = len(agent_info_list)
a_size = np.sum(group_spec.discrete_action_branches)
if behavior_spec.is_action_discrete():
if any(
[agent_info.action_mask is not None]
for agent_info in decision_agent_info_list
):
n_agents = len(decision_agent_info_list)
a_size = np.sum(behavior_spec.discrete_action_branches)
for agent_index, agent_info in enumerate(agent_info_list):
for agent_index, agent_info in enumerate(decision_agent_info_list):
if agent_info.action_mask is not None:
if len(agent_info.action_mask) == a_size:
mask_matrix[agent_index, :] = [

action_mask = (1 - mask_matrix).astype(np.bool)
indices = _generate_split_indices(group_spec.discrete_action_branches)
indices = _generate_split_indices(behavior_spec.discrete_action_branches)
return BatchedStepResult(obs_list, rewards, done, max_step, agent_id, action_mask)
return (
DecisionSteps(
decision_obs_list, decision_rewards, decision_agent_id, action_mask
),
TerminalSteps(terminal_obs_list, terminal_rewards, max_step, terminal_agent_id),
)
def _generate_split_indices(dims):

43
ml-agents-envs/mlagents_envs/tests/test_envs.py


import numpy as np
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.base_env import BatchedStepResult
from mlagents_envs.base_env import DecisionSteps, TerminalSteps
from mlagents_envs.exception import UnityEnvironmentException, UnityActionException
from mlagents_envs.mock_communicator import MockCommunicator

discrete_action=False, visual_inputs=0
)
env = UnityEnvironment(" ")
assert env.get_agent_groups() == ["RealFakeBrain"]
assert env.get_behavior_names() == ["RealFakeBrain"]
env.close()

discrete_action=False, visual_inputs=0
)
env = UnityEnvironment(" ")
spec = env.get_agent_group_spec("RealFakeBrain")
spec = env.get_behavior_spec("RealFakeBrain")
batched_step_result = env.get_step_result("RealFakeBrain")
decision_steps, terminal_steps = env.get_steps("RealFakeBrain")
assert isinstance(batched_step_result, BatchedStepResult)
assert len(spec.observation_shapes) == len(batched_step_result.obs)
n_agents = batched_step_result.n_agents()
for shape, obs in zip(spec.observation_shapes, batched_step_result.obs):
assert isinstance(decision_steps, DecisionSteps)
assert isinstance(terminal_steps, TerminalSteps)
assert len(spec.observation_shapes) == len(decision_steps.obs)
assert len(spec.observation_shapes) == len(terminal_steps.obs)
n_agents = len(decision_steps)
for shape, obs in zip(spec.observation_shapes, decision_steps.obs):
assert (n_agents,) + shape == obs.shape
n_agents = len(terminal_steps)
for shape, obs in zip(spec.observation_shapes, terminal_steps.obs):
assert (n_agents,) + shape == obs.shape

discrete_action=False, visual_inputs=0
)
env = UnityEnvironment(" ")
spec = env.get_agent_group_spec("RealFakeBrain")
spec = env.get_behavior_spec("RealFakeBrain")
batched_step_result = env.get_step_result("RealFakeBrain")
n_agents = batched_step_result.n_agents()
decision_steps, terminal_steps = env.get_steps("RealFakeBrain")
n_agents = len(decision_steps)
env.set_actions(
"RealFakeBrain", np.zeros((n_agents, spec.action_size), dtype=np.float32)
)

"RealFakeBrain",
np.zeros((n_agents - 1, spec.action_size), dtype=np.float32),
)
batched_step_result = env.get_step_result("RealFakeBrain")
n_agents = batched_step_result.n_agents()
decision_steps, terminal_steps = env.get_steps("RealFakeBrain")
n_agents = len(decision_steps)
env.set_actions(
"RealFakeBrain", -1 * np.ones((n_agents, spec.action_size), dtype=np.float32)
)

assert isinstance(batched_step_result, BatchedStepResult)
assert len(spec.observation_shapes) == len(batched_step_result.obs)
for shape, obs in zip(spec.observation_shapes, batched_step_result.obs):
assert isinstance(decision_steps, DecisionSteps)
assert isinstance(terminal_steps, TerminalSteps)
assert len(spec.observation_shapes) == len(decision_steps.obs)
assert len(spec.observation_shapes) == len(terminal_steps.obs)
for shape, obs in zip(spec.observation_shapes, decision_steps.obs):
assert not batched_step_result.done[0]
assert batched_step_result.done[2]
assert 0 in decision_steps
assert 2 in terminal_steps
@mock.patch("mlagents_envs.environment.UnityEnvironment.executable_launcher")

170
ml-agents-envs/mlagents_envs/tests/test_rpc_utils.py


AgentInfoActionPairProto,
)
from mlagents_envs.communicator_objects.agent_action_pb2 import AgentActionProto
from mlagents_envs.base_env import AgentGroupSpec, ActionType, BatchedStepResult
from mlagents_envs.base_env import (
BehaviorSpec,
ActionType,
DecisionSteps,
TerminalSteps,
)
agent_group_spec_from_proto,
behavior_spec_from_proto,
batched_step_result_from_proto,
steps_from_proto,
)
from PIL import Image

ap = AgentInfoProto()
ap.reward = float("inf") if infinite_rewards else agent_index
ap.done = agent_index % 2 == 0
ap.max_step_reached = agent_index % 2 == 1
ap.max_step_reached = agent_index % 4 == 0
ap.id = agent_index
ap.action_mask.extend([True, False] * 5)
obs_proto_list = []

return obs_proto
def proto_from_batched_step_result(
batched_step_result: BatchedStepResult
def proto_from_steps(
decision_steps: DecisionSteps, terminal_steps: TerminalSteps
for agent_id in batched_step_result.agent_id:
agent_id_index = batched_step_result.agent_id_to_index[agent_id]
reward = batched_step_result.reward[agent_id_index]
done = batched_step_result.done[agent_id_index]
max_step_reached = batched_step_result.max_step[agent_id_index]
# Take care of the DecisionSteps first
for agent_id in decision_steps.agent_id:
agent_id_index = decision_steps.agent_id_to_index[agent_id]
reward = decision_steps.reward[agent_id_index]
done = False
max_step_reached = False
if batched_step_result.action_mask is not None:
if decision_steps.action_mask is not None:
for _branch in batched_step_result.action_mask:
for _branch in decision_steps.action_mask:
for all_observations_of_type in batched_step_result.obs:
for all_observations_of_type in decision_steps.obs:
observation = all_observations_of_type[agent_id_index]
if len(observation.shape) == 3:
observations.append(generate_uncompressed_proto_obs(observation))

compression_type=NONE,
)
)
agent_info_proto = AgentInfoProto(
reward=reward,
done=done,

observations=observations,
)
agent_info_protos.append(agent_info_proto)
# Take care of the TerminalSteps second
for agent_id in terminal_steps.agent_id:
agent_id_index = terminal_steps.agent_id_to_index[agent_id]
reward = terminal_steps.reward[agent_id_index]
done = True
max_step_reached = terminal_steps.max_step[agent_id_index]
final_observations: List[ObservationProto] = []
for all_observations_of_type in terminal_steps.obs:
observation = all_observations_of_type[agent_id_index]
if len(observation.shape) == 3:
final_observations.append(generate_uncompressed_proto_obs(observation))
else:
final_observations.append(
ObservationProto(
float_data=ObservationProto.FloatData(data=observation),
shape=[len(observation)],
compression_type=NONE,
)
)
agent_info_proto = AgentInfoProto(
reward=reward,
done=done,
id=agent_id,
max_step_reached=max_step_reached,
action_mask=None,
observations=final_observations,
)
agent_info_protos.append(agent_info_proto)
# The arguments here are the BatchedStepResult and actions for a single agent name
def proto_from_batched_step_result_and_action(
batched_step_result: BatchedStepResult, actions: np.ndarray
# The arguments here are the DecisionSteps, TerminalSteps and actions for a single agent name
def proto_from_steps_and_action(
decision_steps: DecisionSteps, terminal_steps: TerminalSteps, actions: np.ndarray
agent_info_protos = proto_from_batched_step_result(batched_step_result)
agent_info_protos = proto_from_steps(decision_steps, terminal_steps)
agent_action_protos = [
AgentActionProto(vector_actions=action) for action in actions
]

def test_batched_step_result_from_proto():
n_agents = 10
shapes = [(3,), (4,)]
group_spec = AgentGroupSpec(shapes, ActionType.CONTINUOUS, 3)
spec = BehaviorSpec(shapes, ActionType.CONTINUOUS, 3)
result = batched_step_result_from_proto(ap_list, group_spec)
assert list(result.reward) == list(range(n_agents))
assert list(result.agent_id) == list(range(n_agents))
for index in range(n_agents):
assert result.done[index] == (index % 2 == 0)
assert result.max_step[index] == (index % 2 == 1)
assert list(result.obs[0].shape) == [n_agents] + list(shapes[0])
assert list(result.obs[1].shape) == [n_agents] + list(shapes[1])
decision_steps, terminal_steps = steps_from_proto(ap_list, spec)
for agent_id in range(n_agents):
if agent_id in decision_steps:
# we set the reward equal to the agent id in generate_list_agent_proto
assert decision_steps[agent_id].reward == agent_id
elif agent_id in terminal_steps:
assert terminal_steps[agent_id].reward == agent_id
else:
raise Exception("Missing agent from the steps")
# We sort the AgentId since they are split between DecisionSteps and TerminalSteps
combined_agent_id = list(decision_steps.agent_id) + list(terminal_steps.agent_id)
combined_agent_id.sort()
assert combined_agent_id == list(range(n_agents))
for agent_id in range(n_agents):
assert (agent_id in terminal_steps) == (agent_id % 2 == 0)
if agent_id in terminal_steps:
assert terminal_steps[agent_id].max_step == (agent_id % 4 == 0)
assert decision_steps.obs[0].shape[1] == shapes[0][0]
assert decision_steps.obs[1].shape[1] == shapes[1][0]
assert terminal_steps.obs[0].shape[1] == shapes[0][0]
assert terminal_steps.obs[1].shape[1] == shapes[1][0]
group_spec = AgentGroupSpec(shapes, ActionType.DISCRETE, (7, 3))
behavior_spec = BehaviorSpec(shapes, ActionType.DISCRETE, (7, 3))
result = batched_step_result_from_proto(ap_list, group_spec)
masks = result.action_mask
decision_steps, terminal_steps = steps_from_proto(ap_list, behavior_spec)
masks = decision_steps.action_mask
assert masks[0].shape == (n_agents, 7)
assert masks[1].shape == (n_agents, 3)
assert masks[0].shape == (n_agents / 2, 7) # half agents are done
assert masks[1].shape == (n_agents / 2, 3) # half agents are done
assert masks[0][0, 0]
assert not masks[1][0, 0]
assert masks[1][0, 1]

n_agents = 10
shapes = [(3,), (4,)]
group_spec = AgentGroupSpec(shapes, ActionType.DISCRETE, (10,))
behavior_spec = BehaviorSpec(shapes, ActionType.DISCRETE, (10,))
result = batched_step_result_from_proto(ap_list, group_spec)
masks = result.action_mask
decision_steps, terminal_steps = steps_from_proto(ap_list, behavior_spec)
masks = decision_steps.action_mask
assert masks[0].shape == (n_agents, 10)
assert masks[0].shape == (n_agents / 2, 10)
assert masks[0][0, 0]

group_spec = AgentGroupSpec(shapes, ActionType.DISCRETE, (2, 2, 6))
behavior_spec = BehaviorSpec(shapes, ActionType.DISCRETE, (2, 2, 6))
result = batched_step_result_from_proto(ap_list, group_spec)
masks = result.action_mask
decision_steps, terminal_steps = steps_from_proto(ap_list, behavior_spec)
masks = decision_steps.action_mask
assert masks[0].shape == (n_agents, 2)
assert masks[1].shape == (n_agents, 2)
assert masks[2].shape == (n_agents, 6)
assert masks[0].shape == (n_agents / 2, 2)
assert masks[1].shape == (n_agents / 2, 2)
assert masks[2].shape == (n_agents / 2, 6)
assert masks[0][0, 0]

group_spec = AgentGroupSpec(shapes, ActionType.CONTINUOUS, 10)
behavior_spec = BehaviorSpec(shapes, ActionType.CONTINUOUS, 10)
result = batched_step_result_from_proto(ap_list, group_spec)
masks = result.action_mask
decision_steps, terminal_steps = steps_from_proto(ap_list, behavior_spec)
masks = decision_steps.action_mask
def test_agent_group_spec_from_proto():
def test_agent_behavior_spec_from_proto():
group_spec = agent_group_spec_from_proto(bp, agent_proto)
assert group_spec.is_action_discrete()
assert not group_spec.is_action_continuous()
assert group_spec.observation_shapes == [(3,), (4,)]
assert group_spec.discrete_action_branches == (5, 4)
assert group_spec.action_size == 2
behavior_spec = behavior_spec_from_proto(bp, agent_proto)
assert behavior_spec.is_action_discrete()
assert not behavior_spec.is_action_continuous()
assert behavior_spec.observation_shapes == [(3,), (4,)]
assert behavior_spec.discrete_action_branches == (5, 4)
assert behavior_spec.action_size == 2
group_spec = agent_group_spec_from_proto(bp, agent_proto)
assert not group_spec.is_action_discrete()
assert group_spec.is_action_continuous()
assert group_spec.action_size == 6
behavior_spec = behavior_spec_from_proto(bp, agent_proto)
assert not behavior_spec.is_action_discrete()
assert behavior_spec.is_action_continuous()
assert behavior_spec.action_size == 6
group_spec = AgentGroupSpec(shapes, ActionType.CONTINUOUS, 3)
behavior_spec = BehaviorSpec(shapes, ActionType.CONTINUOUS, 3)
batched_step_result_from_proto(ap_list, group_spec)
steps_from_proto(ap_list, behavior_spec)
group_spec = AgentGroupSpec(shapes, ActionType.CONTINUOUS, 3)
behavior_spec = BehaviorSpec(shapes, ActionType.CONTINUOUS, 3)
batched_step_result_from_proto(ap_list, group_spec)
steps_from_proto(ap_list, behavior_spec)

179
ml-agents/mlagents/trainers/agent_processor.py


import sys
from typing import List, Dict, Deque, TypeVar, Generic, Tuple, Any
from typing import List, Dict, Deque, TypeVar, Generic, Tuple, Any, Union
from mlagents_envs.base_env import BatchedStepResult, StepResult
from mlagents_envs.base_env import (
DecisionSteps,
DecisionStep,
TerminalSteps,
TerminalStep,
)
from mlagents_envs.side_channel.stats_side_channel import StatsAggregationMethod
from mlagents.trainers.trajectory import Trajectory, AgentExperience
from mlagents.trainers.policy.tf_policy import TFPolicy

:param stats_category: The category under which to write the stats. Usually, this comes from the Trainer.
"""
self.experience_buffers: Dict[str, List[AgentExperience]] = defaultdict(list)
self.last_step_result: Dict[str, Tuple[StepResult, int]] = {}
self.last_step_result: Dict[str, Tuple[DecisionStep, int]] = {}
# last_take_action_outputs stores the action a_t taken before the current observation s_(t+1), while
# grabbing previous_action from the policy grabs the action PRIOR to that, a_(t-1).
self.last_take_action_outputs: Dict[str, ActionInfoOutputs] = {}

def add_experiences(
self,
batched_step_result: BatchedStepResult,
decision_steps: DecisionSteps,
terminal_steps: TerminalSteps,
:param batched_step_result: current BatchedStepResult.
:param decision_steps: current DecisionSteps.
:param terminal_steps: current TerminalSteps.
:param previous_action: The outputs of the Policy's get_action method.
"""
take_action_outputs = previous_action.outputs

if global_id in self.last_step_result: # Don't store if agent just reset
self.last_take_action_outputs[global_id] = take_action_outputs
for _id in batched_step_result.agent_id: # Assume agent_id is 1-D
local_id = int(
_id
) # Needed for mypy to pass since ndarray has no content type
curr_agent_step = batched_step_result.get_agent_step_result(local_id)
# Iterate over all the terminal steps
for terminal_step in terminal_steps.values():
local_id = terminal_step.agent_id
stored_agent_step, idx = self.last_step_result.get(global_id, (None, None))
stored_take_action_outputs = self.last_take_action_outputs.get(
global_id, None
self._process_step(
terminal_step, global_id, terminal_steps.agent_id_to_index[local_id]
if stored_agent_step is not None and stored_take_action_outputs is not None:
# We know the step is from the same worker, so use the local agent id.
obs = stored_agent_step.obs
if not stored_agent_step.done:
if self.policy.use_recurrent:
memory = self.policy.retrieve_memories([global_id])[0, :]
else:
memory = None
done = curr_agent_step.done
max_step = curr_agent_step.max_step
# Add the outputs of the last eval
action = stored_take_action_outputs["action"][idx]
if self.policy.use_continuous_act:
action_pre = stored_take_action_outputs["pre_action"][idx]
else:
action_pre = None
action_probs = stored_take_action_outputs["log_probs"][idx]
action_mask = stored_agent_step.action_mask
prev_action = self.policy.retrieve_previous_action([global_id])[
0, :
]
experience = AgentExperience(
obs=obs,
reward=curr_agent_step.reward,
done=done,
action=action,
action_probs=action_probs,
action_pre=action_pre,
action_mask=action_mask,
prev_action=prev_action,
max_step=max_step,
memory=memory,
)
# Add the value outputs if needed
self.experience_buffers[global_id].append(experience)
self.episode_rewards[global_id] += curr_agent_step.reward
if (
curr_agent_step.done
or (
len(self.experience_buffers[global_id])
>= self.max_trajectory_length
)
) and len(self.experience_buffers[global_id]) > 0:
# Make next AgentExperience
next_obs = curr_agent_step.obs
trajectory = Trajectory(
steps=self.experience_buffers[global_id],
agent_id=global_id,
next_obs=next_obs,
behavior_id=self.behavior_id,
)
for traj_queue in self.trajectory_queues:
traj_queue.put(trajectory)
self.experience_buffers[global_id] = []
if curr_agent_step.done:
# Record episode length for agents which have had at least
# 1 step. Done after reset ignored.
self.stats_reporter.add_stat(
"Environment/Episode Length",
self.episode_steps.get(global_id, 0),
)
elif not curr_agent_step.done:
self.episode_steps[global_id] += 1
# Index is needed to grab from last_take_action_outputs
self.last_step_result[global_id] = (
curr_agent_step,
batched_step_result.agent_id_to_index[_id],
# Iterate over all the decision steps
for ongoing_step in decision_steps.values():
local_id = ongoing_step.agent_id
global_id = get_global_agent_id(worker_id, local_id)
self._process_step(
ongoing_step, global_id, decision_steps.agent_id_to_index[local_id]
# Delete all done agents, regardless of if they had a 0-length episode.
if curr_agent_step.done:
self._clean_agent_data(global_id)
for _gid in action_global_agent_ids:
# If the ID doesn't have a last step result, the agent just reset,

self.policy.save_previous_action(
[_gid], take_action_outputs["action"]
)
def _process_step(
self, step: Union[TerminalStep, DecisionStep], global_id: str, index: int
) -> None:
terminated = isinstance(step, TerminalStep)
stored_decision_step, idx = self.last_step_result.get(global_id, (None, None))
stored_take_action_outputs = self.last_take_action_outputs.get(global_id, None)
if not terminated:
# Index is needed to grab from last_take_action_outputs
self.last_step_result[global_id] = (step, index)
# This state is the consequence of a past action
if stored_decision_step is not None and stored_take_action_outputs is not None:
obs = stored_decision_step.obs
if self.policy.use_recurrent:
memory = self.policy.retrieve_memories([global_id])[0, :]
else:
memory = None
done = terminated # Since this is an ongoing step
max_step = step.max_step if terminated else False
# Add the outputs of the last eval
action = stored_take_action_outputs["action"][idx]
if self.policy.use_continuous_act:
action_pre = stored_take_action_outputs["pre_action"][idx]
else:
action_pre = None
action_probs = stored_take_action_outputs["log_probs"][idx]
action_mask = stored_decision_step.action_mask
prev_action = self.policy.retrieve_previous_action([global_id])[0, :]
experience = AgentExperience(
obs=obs,
reward=step.reward,
done=done,
action=action,
action_probs=action_probs,
action_pre=action_pre,
action_mask=action_mask,
prev_action=prev_action,
max_step=max_step,
memory=memory,
)
# Add the value outputs if needed
self.experience_buffers[global_id].append(experience)
self.episode_rewards[global_id] += step.reward
if not terminated:
self.episode_steps[global_id] += 1
# if the trajectory is too long, we truncate it
if (
len(self.experience_buffers[global_id]) >= self.max_trajectory_length
or terminated
):
# Make next AgentExperience
next_obs = step.obs
trajectory = Trajectory(
steps=self.experience_buffers[global_id],
agent_id=global_id,
next_obs=next_obs,
behavior_id=self.behavior_id,
)
for traj_queue in self.trajectory_queues:
traj_queue.put(trajectory)
self.experience_buffers[global_id] = []
if terminated:
# Record episode length.
self.stats_reporter.add_stat(
"Environment/Episode Length", self.episode_steps.get(global_id, 0)
)
self._clean_agent_data(global_id)
def _clean_agent_data(self, global_id: str) -> None:
"""

16
ml-agents/mlagents/trainers/brain_conversion_utils.py


from mlagents.trainers.brain import BrainParameters, CameraResolution
from mlagents_envs.base_env import AgentGroupSpec
from mlagents_envs.base_env import BehaviorSpec
def group_spec_to_brain_parameters(
name: str, group_spec: AgentGroupSpec
def behavior_spec_to_brain_parameters(
name: str, behavior_spec: BehaviorSpec
[shape[0] for shape in group_spec.observation_shapes if len(shape) == 1]
[shape[0] for shape in behavior_spec.observation_shapes if len(shape) == 1]
vis_sizes = [shape for shape in group_spec.observation_shapes if len(shape) == 3]
vis_sizes = [shape for shape in behavior_spec.observation_shapes if len(shape) == 3]
if group_spec.is_action_discrete():
a_size += list(group_spec.discrete_action_branches)
if behavior_spec.is_action_discrete():
a_size += list(behavior_spec.discrete_action_branches)
a_size += [group_spec.action_size]
a_size += [behavior_spec.action_size]
vector_action_space_type = 1
return BrainParameters(
name, int(vec_size), cam_res, a_size, [], vector_action_space_type

59
ml-agents/mlagents/trainers/demo_loader.py


import numpy as np
from mlagents.trainers.buffer import AgentBuffer
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.brain_conversion_utils import group_spec_to_brain_parameters
from mlagents.trainers.brain_conversion_utils import behavior_spec_to_brain_parameters
from mlagents_envs.rpc_utils import (
agent_group_spec_from_proto,
batched_step_result_from_proto,
)
from mlagents_envs.base_env import AgentGroupSpec
from mlagents_envs.rpc_utils import behavior_spec_from_proto, steps_from_proto
from mlagents_envs.base_env import BehaviorSpec
from mlagents_envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto
from mlagents_envs.communicator_objects.demonstration_meta_pb2 import (
DemonstrationMetaProto,

@timed
def make_demo_buffer(
pair_infos: List[AgentInfoActionPairProto],
group_spec: AgentGroupSpec,
behavior_spec: BehaviorSpec,
sequence_length: int,
) -> AgentBuffer:
# Create and populate buffer using experiences

if idx > len(pair_infos) - 2:
break
next_pair_info = pair_infos[idx + 1]
current_step_info = batched_step_result_from_proto(
[current_pair_info.agent_info], group_spec
current_decision_step, current_terminal_step = steps_from_proto(
[current_pair_info.agent_info], behavior_spec
next_step_info = batched_step_result_from_proto(
[next_pair_info.agent_info], group_spec
next_decision_step, next_terminal_step = steps_from_proto(
[next_pair_info.agent_info], behavior_spec
)
previous_action = (
np.array(pair_infos[idx].action_info.vector_actions, dtype=np.float32) * 0

pair_infos[idx - 1].action_info.vector_actions, dtype=np.float32
)
curr_agent_id = current_step_info.agent_id[0]
current_agent_step_info = current_step_info.get_agent_step_result(curr_agent_id)
next_agent_id = next_step_info.agent_id[0]
next_agent_step_info = next_step_info.get_agent_step_result(next_agent_id)
next_done = len(next_terminal_step) == 1
next_reward = 0
if len(next_terminal_step) == 1:
next_reward = next_terminal_step.reward[0]
else:
next_reward = next_decision_step.reward[0]
current_obs = None
if len(current_terminal_step) == 1:
current_obs = list(current_terminal_step.values())[0].obs
else:
current_obs = list(current_decision_step.values())[0].obs
demo_raw_buffer["done"].append(next_agent_step_info.done)
demo_raw_buffer["rewards"].append(next_agent_step_info.reward)
split_obs = SplitObservations.from_observations(current_agent_step_info.obs)
demo_raw_buffer["done"].append(next_done)
demo_raw_buffer["rewards"].append(next_reward)
split_obs = SplitObservations.from_observations(current_obs)
if next_step_info.done:
if next_done:
demo_raw_buffer.resequence_and_append(
demo_processed_buffer, batch_size=None, training_length=sequence_length
)

:param sequence_length: Length of trajectories to fill buffer.
:return:
"""
group_spec, info_action_pair, _ = load_demonstration(file_path)
demo_buffer = make_demo_buffer(info_action_pair, group_spec, sequence_length)
brain_params = group_spec_to_brain_parameters("DemoBrain", group_spec)
behavior_spec, info_action_pair, _ = load_demonstration(file_path)
demo_buffer = make_demo_buffer(info_action_pair, behavior_spec, sequence_length)
brain_params = behavior_spec_to_brain_parameters("DemoBrain", behavior_spec)
return brain_params, demo_buffer

# First 32 bytes of file dedicated to meta-data.
file_paths = get_demo_files(file_path)
group_spec = None
behavior_spec = None
brain_param_proto = None
info_action_pairs = []
total_expected = 0

if obs_decoded > 1:
agent_info_action = AgentInfoActionPairProto()
agent_info_action.ParseFromString(data[pos : pos + next_pos])
if group_spec is None:
group_spec = agent_group_spec_from_proto(
if behavior_spec is None:
behavior_spec = behavior_spec_from_proto(
brain_param_proto, agent_info_action.agent_info
)
info_action_pairs.append(agent_info_action)

obs_decoded += 1
if not group_spec:
if not behavior_spec:
return group_spec, info_action_pairs, total_expected
return behavior_spec, info_action_pairs, total_expected
def write_delimited(f, message):

36
ml-agents/mlagents/trainers/env_manager.py


from abc import ABC, abstractmethod
from typing import List, Dict, NamedTuple, Iterable, Tuple
from mlagents_envs.base_env import BatchedStepResult, AgentGroupSpec, AgentGroup
from mlagents_envs.base_env import (
DecisionSteps,
TerminalSteps,
BehaviorSpec,
BehaviorName,
)
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.policy.tf_policy import TFPolicy
from mlagents.trainers.agent_processor import AgentManager, AgentManagerQueue

AllStepResult = Dict[AgentGroup, BatchedStepResult]
AllGroupSpec = Dict[AgentGroup, AgentGroupSpec]
AllStepResult = Dict[BehaviorName, Tuple[DecisionSteps, TerminalSteps]]
AllGroupSpec = Dict[BehaviorName, BehaviorSpec]
logger = get_logger(__name__)

current_all_step_result: AllStepResult
worker_id: int
brain_name_to_action_info: Dict[AgentGroup, ActionInfo]
brain_name_to_action_info: Dict[BehaviorName, ActionInfo]
def name_behavior_ids(self) -> Iterable[AgentGroup]:
def name_behavior_ids(self) -> Iterable[BehaviorName]:
return self.current_all_step_result.keys()
@staticmethod

class EnvManager(ABC):
def __init__(self):
self.policies: Dict[AgentGroup, TFPolicy] = {}
self.agent_managers: Dict[AgentGroup, AgentManager] = {}
self.policies: Dict[BehaviorName, TFPolicy] = {}
self.agent_managers: Dict[BehaviorName, AgentManager] = {}
def set_policy(self, brain_name: AgentGroup, policy: TFPolicy) -> None:
def set_policy(self, brain_name: BehaviorName, policy: TFPolicy) -> None:
def set_agent_manager(self, brain_name: AgentGroup, manager: AgentManager) -> None:
def set_agent_manager(
self, brain_name: BehaviorName, manager: AgentManager
) -> None:
self.agent_managers[brain_name] = manager
@abstractmethod

@property
@abstractmethod
def external_brains(self) -> Dict[AgentGroup, BrainParameters]:
def external_brains(self) -> Dict[BehaviorName, BrainParameters]:
def get_properties(self) -> Dict[AgentGroup, float]:
def get_properties(self) -> Dict[BehaviorName, float]:
pass
@abstractmethod

)
)
continue
decision_steps, terminal_steps = step_info.current_all_step_result[
name_behavior_id
]
step_info.current_all_step_result[name_behavior_id],
decision_steps,
terminal_steps,
step_info.worker_id,
step_info.brain_name_to_action_info.get(
name_behavior_id, ActionInfo.empty()

10
ml-agents/mlagents/trainers/policy/nn_policy.py


from typing import Any, Dict, Optional, List
from mlagents.tf_utils import tf
from mlagents_envs.timers import timed
from mlagents_envs.base_env import BatchedStepResult
from mlagents_envs.base_env import DecisionSteps
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.models import EncoderType
from mlagents.trainers.models import ModelUtils

@timed
def evaluate(
self, batched_step_result: BatchedStepResult, global_agent_ids: List[str]
self, decision_requests: DecisionSteps, global_agent_ids: List[str]
:param batched_step_result: BatchedStepResult object containing inputs.
:param decision_requests: DecisionSteps object containing inputs.
self.batch_size_ph: batched_step_result.n_agents(),
self.batch_size_ph: len(decision_requests),
self.sequence_length_ph: 1,
}
if self.use_recurrent:

)
feed_dict[self.memory_in] = self.retrieve_memories(global_agent_ids)
feed_dict = self.fill_eval_dict(feed_dict, batched_step_result)
feed_dict = self.fill_eval_dict(feed_dict, decision_requests)
run_out = self._execute_model(feed_dict, self.inference_dict)
return run_out

4
ml-agents/mlagents/trainers/policy/policy.py


from abc import ABC, abstractmethod
from mlagents_envs.base_env import BatchedStepResult
from mlagents_envs.base_env import DecisionSteps
from mlagents.trainers.action_info import ActionInfo

self, batched_step_result: BatchedStepResult, worker_id: int = 0
self, decision_requests: DecisionSteps, worker_id: int = 0
) -> ActionInfo:
pass

25
ml-agents/mlagents/trainers/policy/tf_policy.py


from mlagents.trainers.action_info import ActionInfo
from mlagents.trainers.trajectory import SplitObservations
from mlagents.trainers.brain_conversion_utils import get_global_agent_id
from mlagents_envs.base_env import BatchedStepResult
from mlagents_envs.base_env import DecisionSteps
from mlagents.trainers.models import ModelUtils

self.sess.run(self.assign_ops, feed_dict=feed_dict)
def evaluate(
self, batched_step_result: BatchedStepResult, global_agent_ids: List[str]
self, decision_requests: DecisionSteps, global_agent_ids: List[str]
:param batched_step_result: BatchedStepResult input to network.
:param decision_requests: DecisionSteps input to network.
self, batched_step_result: BatchedStepResult, worker_id: int = 0
self, decision_requests: DecisionSteps, worker_id: int = 0
:param batched_step_result: A dictionary of brain names and BatchedStepResult from environment.
:param decision_requests: A dictionary of brain names and DecisionSteps from environment.
the BatchedStepResult came from. Used to construct a globally unique id for each agent.
the DecisionSteps came from. Used to construct a globally unique id for each agent.
if batched_step_result.n_agents() == 0:
if len(decision_requests) == 0:
for agent_id in batched_step_result.agent_id
for agent_id in decision_requests.agent_id
batched_step_result, global_agent_ids
decision_requests, global_agent_ids
)
self.save_memories(global_agent_ids, run_out.get("memory_out"))

outputs=run_out,
agent_ids=batched_step_result.agent_id,
agent_ids=decision_requests.agent_id,
)
def update(self, mini_batch, num_sequences):

feed_dict[self.vector_in] = vec_vis_obs.vector_observations
if not self.use_continuous_act:
mask = np.ones(
(
batched_step_result.n_agents(),
np.sum(self.brain.vector_action_space_size),
),
(len(batched_step_result), np.sum(self.brain.vector_action_space_size)),
dtype=np.float32,
)
if batched_step_result.action_mask is not None:

26
ml-agents/mlagents/trainers/simple_env_manager.py


from typing import Dict, List
from mlagents_envs.base_env import BaseEnv, AgentGroup
from mlagents_envs.base_env import BaseEnv, BehaviorName
from mlagents.trainers.brain_conversion_utils import group_spec_to_brain_parameters
from mlagents.trainers.brain_conversion_utils import behavior_spec_to_brain_parameters
class SimpleEnvManager(EnvManager):

return [step_info]
def _reset_env(
self, config: Dict[AgentGroup, float] = None
self, config: Dict[BehaviorName, float] = None
) -> List[EnvironmentStep]: # type: ignore
if config is not None:
for k, v in config.items():

return [self.previous_step]
@property
def external_brains(self) -> Dict[AgentGroup, BrainParameters]:
def external_brains(self) -> Dict[BehaviorName, BrainParameters]:
for brain_name in self.env.get_agent_groups():
result[brain_name] = group_spec_to_brain_parameters(
brain_name, self.env.get_agent_group_spec(brain_name)
for brain_name in self.env.get_behavior_names():
result[brain_name] = behavior_spec_to_brain_parameters(
brain_name, self.env.get_behavior_spec(brain_name)
def get_properties(self) -> Dict[AgentGroup, float]:
def get_properties(self) -> Dict[BehaviorName, float]:
return self.shared_float_properties.get_property_dict_copy()
def close(self):

def _take_step(self, last_step: EnvironmentStep) -> Dict[AgentGroup, ActionInfo]:
def _take_step(self, last_step: EnvironmentStep) -> Dict[BehaviorName, ActionInfo]:
for brain_name, step_info in last_step.current_all_step_result.items():
for brain_name, step_tuple in last_step.current_all_step_result.items():
step_info,
step_tuple[0],
0, # As there is only one worker, we assign the worker_id to 0.
)
return all_action_info

for brain_name in self.env.get_agent_groups():
all_step_result[brain_name] = self.env.get_step_result(brain_name)
for brain_name in self.env.get_behavior_names():
all_step_result[brain_name] = self.env.get_steps(brain_name)
return all_step_result

24
ml-agents/mlagents/trainers/subprocess_env_manager.py


from multiprocessing import Process, Pipe, Queue
from multiprocessing.connection import Connection
from queue import Empty as EmptyQueueException
from mlagents_envs.base_env import BaseEnv, AgentGroup
from mlagents_envs.base_env import BaseEnv, BehaviorName
from mlagents_envs.logging_util import get_logger
from mlagents.trainers.env_manager import EnvManager, EnvironmentStep, AllStepResult
from mlagents_envs.timers import (

StatsAggregationMethod,
)
from mlagents_envs.side_channel.side_channel import SideChannel
from mlagents.trainers.brain_conversion_utils import group_spec_to_brain_parameters
from mlagents.trainers.brain_conversion_utils import behavior_spec_to_brain_parameters
logger = get_logger(__name__)

def _generate_all_results() -> AllStepResult:
all_step_result: AllStepResult = {}
for brain_name in env.get_agent_groups():
all_step_result[brain_name] = env.get_step_result(brain_name)
for brain_name in env.get_behavior_names():
all_step_result[brain_name] = env.get_steps(brain_name)
for brain_name in env.get_agent_groups():
result[brain_name] = group_spec_to_brain_parameters(
brain_name, env.get_agent_group_spec(brain_name)
for brain_name in env.get_behavior_names():
result[brain_name] = behavior_spec_to_brain_parameters(
brain_name, env.get_behavior_spec(brain_name)
)
return result

return list(map(lambda ew: ew.previous_step, self.env_workers))
@property
def external_brains(self) -> Dict[AgentGroup, BrainParameters]:
def external_brains(self) -> Dict[BehaviorName, BrainParameters]:
def get_properties(self) -> Dict[AgentGroup, float]:
def get_properties(self) -> Dict[BehaviorName, float]:
self.env_workers[0].send(EnvironmentCommand.GET_PROPERTIES)
return self.env_workers[0].recv().payload

return step_infos
@timed
def _take_step(self, last_step: EnvironmentStep) -> Dict[AgentGroup, ActionInfo]:
def _take_step(self, last_step: EnvironmentStep) -> Dict[BehaviorName, ActionInfo]:
for brain_name, batch_step_result in last_step.current_all_step_result.items():
for brain_name, step_tuple in last_step.current_all_step_result.items():
batch_step_result, last_step.worker_id
step_tuple[0], last_step.worker_id
)
return all_action_info

42
ml-agents/mlagents/trainers/tests/mock_brain.py


from unittest import mock
from typing import List
from typing import List, Tuple
from mlagents_envs.base_env import BatchedStepResult
from mlagents_envs.base_env import (
DecisionSteps,
TerminalSteps,
BehaviorSpec,
ActionType,
)
def create_mock_brainparams(

return mock_brain()
def create_mock_batchedstep(
def create_mock_steps(
num_agents: int = 1,
num_vector_observations: int = 0,
num_vis_observations: int = 0,

) -> BatchedStepResult:
) -> Tuple[DecisionSteps, TerminalSteps]:
Creates a mock BatchedStepResult with observations. Imitates constant
vector/visual observations, rewards, dones, and agents.
Creates a mock Tuple[DecisionSteps, TerminalSteps] with observations.
Imitates constant vector/visual observations, rewards, dones, and agents.
:int num_agents: Number of "agents" to imitate.
:int num_vector_observations: Number of "observations" in your observation space

:bool done: Whether all the agents in the batch are done
"""
if action_shape is None:
action_shape = [2]

]
reward = np.array(num_agents * [1.0], dtype=np.float32)
done = np.array(num_agents * [done], dtype=np.bool)
return BatchedStepResult(obs_list, reward, done, max_step, agent_id, action_mask)
behavior_spec = BehaviorSpec(
[(84, 84, 3)] * num_vis_observations + [(num_vector_observations, 0, 0)],
ActionType.DISCRETE if discrete else ActionType.CONTINUOUS,
action_shape if discrete else action_shape[0],
)
if done:
return (
DecisionSteps.empty(behavior_spec),
TerminalSteps(obs_list, reward, max_step, agent_id),
)
else:
return (
DecisionSteps(obs_list, reward, agent_id, action_mask),
TerminalSteps.empty(behavior_spec),
)
def create_batchedstep_from_brainparams(
def create_steps_from_brainparams(
) -> BatchedStepResult:
return create_mock_batchedstep(
) -> Tuple[DecisionSteps, TerminalSteps]:
return create_mock_steps(
num_agents=num_agents,
num_vector_observations=brain_params.vector_observation_space_size,
num_vis_observations=brain_params.number_visual_observations,

137
ml-agents/mlagents/trainers/tests/simple_test_envs.py


from mlagents_envs.base_env import (
BaseEnv,
AgentGroupSpec,
BatchedStepResult,
BehaviorSpec,
DecisionSteps,
TerminalSteps,
from mlagents_envs.tests.test_rpc_utils import proto_from_batched_step_result_and_action
from mlagents_envs.tests.test_rpc_utils import proto_from_steps_and_action
from mlagents_envs.communicator_objects.agent_info_action_pair_pb2 import (
AgentInfoActionPairProto,
)

self.vis_obs_size = vis_obs_size
self.vec_obs_size = vec_obs_size
action_type = ActionType.DISCRETE if use_discrete else ActionType.CONTINUOUS
self.group_spec = AgentGroupSpec(
self.behavior_spec = BehaviorSpec(
self._make_obs_spec(),
action_type,
tuple(2 for _ in range(action_size)) if use_discrete else action_size,

self.positions: Dict[str, List[float]] = {}
self.step_count: Dict[str, float] = {}
self.random = random.Random(str(self.group_spec))
self.random = random.Random(str(self.behavior_spec))
self.step_result: Dict[str, BatchedStepResult] = {}
self.step_result: Dict[str, Tuple[DecisionSteps, TerminalSteps]] = {}
self.agent_id: Dict[str, int] = {}
self.step_size = step_size # defines the difficulty of the test

obs.append(np.ones((1,) + self.vis_obs_size, dtype=np.float32) * value)
return obs
def get_agent_groups(self):
def get_behavior_names(self):
def get_agent_group_spec(self, name):
return self.group_spec
def get_behavior_spec(self, behavior_name):
return self.behavior_spec
def set_action_for_agent(self, name, id, data):
def set_action_for_agent(self, behavior_name, agent_id, action):
def set_actions(self, name, data):
self.action[name] = data
def set_actions(self, behavior_name, action):
self.action[behavior_name] = action
def get_step_result(self, name):
return self.step_result[name]
def get_steps(self, behavior_name):
return self.step_result[behavior_name]
def _take_action(self, name: str) -> bool:
deltas = []

def _make_batched_step(
self, name: str, done: bool, reward: float
) -> BatchedStepResult:
) -> Tuple[DecisionSteps, TerminalSteps]:
m_done = np.array([done], dtype=np.bool)
decision_step = DecisionSteps(m_vector_obs, m_reward, m_agent_id, action_mask)
terminal_step = TerminalSteps.empty(self.behavior_spec)
m_vector_obs,
m_reward,
m_done,
m_agent_id,
action_mask,
) = self._construct_reset_step(
m_vector_obs,
new_vector_obs,
m_reward,
m_done,
m_agent_id,
action_mask,
name,
new_reward,
new_done,
new_agent_id,
new_action_mask,
) = self._construct_reset_step(name)
decision_step = DecisionSteps(
new_vector_obs, new_reward, new_agent_id, new_action_mask
return BatchedStepResult(
m_vector_obs,
m_reward,
m_done,
np.zeros(m_done.shape, dtype=bool),
m_agent_id,
action_mask,
)
terminal_step = TerminalSteps(
m_vector_obs, m_reward, np.array([False], dtype=np.bool), m_agent_id
)
return (decision_step, terminal_step)
self,
vector_obs: List[np.ndarray],
new_vector_obs: List[np.ndarray],
reward: np.ndarray,
done: np.ndarray,
agent_id: np.ndarray,
action_mask: List[np.ndarray],
name: str,
) -> Tuple[List[np.ndarray], np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
self, name: str
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
m_vector_obs = [
np.concatenate((old, new), axis=0)
for old, new in zip(vector_obs, new_vector_obs)
]
m_reward = np.concatenate((reward, new_reward), axis=0)
m_done = np.concatenate((done, new_done), axis=0)
m_agent_id = np.concatenate((agent_id, new_agent_id), axis=0)
if action_mask is not None:
action_mask = [
np.concatenate((old, new), axis=0)
for old, new in zip(action_mask, new_action_mask)
]
return m_vector_obs, m_reward, m_done, m_agent_id, action_mask
return new_reward, new_done, new_agent_id, new_action_mask
def step(self) -> None:
assert all(action is not None for action in self.action.values())

def _make_batched_step(
self, name: str, done: bool, reward: float
) -> BatchedStepResult:
) -> Tuple[DecisionSteps, TerminalSteps]:
m_done = np.array([done], dtype=np.bool)
decision_step = DecisionSteps(m_vector_obs, m_reward, m_agent_id, action_mask)
terminal_step = TerminalSteps.empty(self.behavior_spec)
if done:
self._reset_agent(name)
recurrent_obs_val = (

(
m_vector_obs,
m_reward,
m_done,
m_agent_id,
action_mask,
) = self._construct_reset_step(
m_vector_obs,
new_vector_obs,
m_reward,
m_done,
m_agent_id,
action_mask,
name,
new_reward,
new_done,
new_agent_id,
new_action_mask,
) = self._construct_reset_step(name)
decision_step = DecisionSteps(
new_vector_obs, new_reward, new_agent_id, new_action_mask
return BatchedStepResult(
m_vector_obs,
m_reward,
m_done,
np.zeros(m_done.shape, dtype=bool),
m_agent_id,
action_mask,
)
terminal_step = TerminalSteps(
m_vector_obs, m_reward, np.array([False], dtype=np.bool), m_agent_id
)
return (decision_step, terminal_step)
class RecordEnvironment(SimpleEnvironment):

def step(self) -> None:
super().step()
for name in self.names:
self.demonstration_protos[
name
] += proto_from_batched_step_result_and_action(
self.step_result[name], self.action[name]
self.demonstration_protos[name] += proto_from_steps_and_action(
self.step_result[name][0], self.step_result[name][1], self.action[name]
)
self.demonstration_protos[name] = self.demonstration_protos[name][
-self.n_demos :

54
ml-agents/mlagents/trainers/tests/test_agent_processor.py


"pre_action": [0.1, 0.1],
"log_probs": [0.1, 0.1],
}
mock_step = mb.create_mock_batchedstep(
mock_decision_steps, mock_terminal_steps = mb.create_mock_steps(
num_agents=2,
num_vector_observations=8,
action_shape=[2],

action=[0.1, 0.1],
value=[0.1, 0.1],
outputs=fake_action_outputs,
agent_ids=mock_step.agent_id,
agent_ids=mock_decision_steps.agent_id,
processor.add_experiences(mock_step, 0, ActionInfo.empty())
processor.add_experiences(
mock_decision_steps, mock_terminal_steps, 0, ActionInfo.empty()
)
processor.add_experiences(mock_step, 0, fake_action_info)
processor.add_experiences(
mock_decision_steps, mock_terminal_steps, 0, fake_action_info
)
# Assert that two trajectories have been added to the Trainer
assert len(tqueue.put.call_args_list) == 2

# Assert that the AgentProcessor is empty
assert len(processor.experience_buffers[0]) == 0
# Test empty BatchedStepResult
mock_step = mb.create_mock_batchedstep(
# Test empty steps
mock_decision_steps, mock_terminal_steps = mb.create_mock_steps(
processor.add_experiences(mock_step, 0, ActionInfo([], [], {}, []))
processor.add_experiences(
mock_decision_steps, mock_terminal_steps, 0, ActionInfo([], [], {}, [])
)
# Assert that the AgentProcessor is still empty
assert len(processor.experience_buffers[0]) == 0

"pre_action": [0.1],
"log_probs": [0.1],
}
mock_step = mb.create_mock_batchedstep(
mock_decision_step, mock_terminal_step = mb.create_mock_steps(
mock_done_step = mb.create_mock_batchedstep(
mock_done_decision_step, mock_done_terminal_step = mb.create_mock_steps(
num_agents=1,
num_vector_observations=8,
action_shape=[2],

action=[0.1],
value=[0.1],
outputs=fake_action_outputs,
agent_ids=mock_step.agent_id,
agent_ids=mock_decision_step.agent_id,
processor.add_experiences(mock_step, 0, ActionInfo.empty())
processor.add_experiences(
mock_decision_step, mock_terminal_step, 0, ActionInfo.empty()
)
# Run 3 trajectories, with different workers (to simulate different agents)
add_calls = []

processor.add_experiences(mock_step, _ep, fake_action_info)
processor.add_experiences(
mock_decision_step, mock_terminal_step, _ep, fake_action_info
)
processor.add_experiences(mock_done_step, _ep, fake_action_info)
processor.add_experiences(
mock_done_decision_step, mock_done_terminal_step, _ep, fake_action_info
)
# Make sure we don't add experiences from the prior agents after the done
remove_calls.append(mock.call([get_global_agent_id(_ep, 0)]))

assert len(processor.last_step_result.keys()) == 0
# check that steps with immediate dones don't add to dicts
processor.add_experiences(mock_done_step, 0, ActionInfo.empty())
processor.add_experiences(
mock_done_decision_step, mock_done_terminal_step, 0, ActionInfo.empty()
)
assert len(processor.experience_buffers.keys()) == 0
assert len(processor.last_take_action_outputs.keys()) == 0
assert len(processor.episode_steps.keys()) == 0

"pre_action": [0.1],
"log_probs": [0.1],
}
mock_step = mb.create_mock_batchedstep(
mock_decision_step, mock_terminal_step = mb.create_mock_steps(
num_agents=1,
num_vector_observations=8,
action_shape=[2],

action=[0.1],
value=[0.1],
outputs=fake_action_outputs,
agent_ids=mock_step.agent_id,
agent_ids=mock_decision_step.agent_id,
processor.add_experiences(mock_step, 0, ActionInfo.empty())
processor.add_experiences(
mock_decision_step, mock_terminal_step, 0, ActionInfo.empty()
)
processor.add_experiences(mock_step, _ep, fake_action_info)
processor.add_experiences(
mock_decision_step, mock_terminal_step, _ep, fake_action_info
)
# Make sure we don't add experiences from the prior agents after the done
# Call end episode

8
ml-agents/mlagents/trainers/tests/test_demo_loader.py


def test_load_demo():
path_prefix = os.path.dirname(os.path.abspath(__file__))
group_spec, pair_infos, total_expected = load_demonstration(
behavior_spec, pair_infos, total_expected = load_demonstration(
assert np.sum(group_spec.observation_shapes[0]) == 8
assert np.sum(behavior_spec.observation_shapes[0]) == 8
assert len(pair_infos) == total_expected
_, demo_buffer = demo_to_buffer(path_prefix + "/test.demo", 1)

def test_load_demo_dir():
path_prefix = os.path.dirname(os.path.abspath(__file__))
group_spec, pair_infos, total_expected = load_demonstration(
behavior_spec, pair_infos, total_expected = load_demonstration(
assert np.sum(group_spec.observation_shapes[0]) == 8
assert np.sum(behavior_spec.observation_shapes[0]) == 8
assert len(pair_infos) == total_expected
_, demo_buffer = demo_to_buffer(path_prefix + "/test_demo_dir", 1)

12
ml-agents/mlagents/trainers/tests/test_nn_policy.py


"""
Make sure two policies have the same output for the same input.
"""
step = mb.create_batchedstep_from_brainparams(policy1.brain, num_agents=1)
run_out1 = policy1.evaluate(step, list(step.agent_id))
run_out2 = policy2.evaluate(step, list(step.agent_id))
decision_step, _ = mb.create_steps_from_brainparams(policy1.brain, num_agents=1)
run_out1 = policy1.evaluate(decision_step, list(decision_step.agent_id))
run_out2 = policy2.evaluate(decision_step, list(decision_step.agent_id))
np.testing.assert_array_equal(run_out2["log_probs"], run_out1["log_probs"])

policy = create_policy_mock(
dummy_config, use_rnn=rnn, use_discrete=discrete, use_visual=visual
)
step = mb.create_batchedstep_from_brainparams(policy.brain, num_agents=NUM_AGENTS)
decision_step, terminal_step = mb.create_steps_from_brainparams(
policy.brain, num_agents=NUM_AGENTS
)
run_out = policy.evaluate(step, list(step.agent_id))
run_out = policy.evaluate(decision_step, list(decision_step.agent_id))
if discrete:
run_out["action"].shape == (NUM_AGENTS, len(DISCRETE_ACTION_SPACE))
else:

24
ml-agents/mlagents/trainers/tests/test_policy.py


from mlagents.trainers.policy.tf_policy import TFPolicy
from mlagents_envs.base_env import BatchedStepResult, AgentGroupSpec
from mlagents_envs.base_env import DecisionSteps, BehaviorSpec
from mlagents.trainers.action_info import ActionInfo
from unittest.mock import MagicMock
import numpy as np

test_seed = 3
policy = FakePolicy(test_seed, basic_mock_brain(), basic_params())
# Doesn't really matter what this is
dummy_groupspec = AgentGroupSpec([(1,)], "continuous", 1)
no_agent_step = BatchedStepResult.empty(dummy_groupspec)
dummy_groupspec = BehaviorSpec([(1,)], "continuous", 1)
no_agent_step = DecisionSteps.empty(dummy_groupspec)
result = policy.get_action(no_agent_step)
assert result == ActionInfo.empty()

policy = FakePolicy(test_seed, basic_mock_brain(), basic_params())
policy.evaluate = MagicMock(return_value={})
policy.save_memories = MagicMock()
step_with_agents = BatchedStepResult(
[],
np.array([], dtype=np.float32),
np.array([False], dtype=np.bool),
np.array([], dtype=np.bool),
np.array([0]),
None,
step_with_agents = DecisionSteps(
[], np.array([], dtype=np.float32), np.array([0]), None
)
result = policy.get_action(step_with_agents, worker_id=0)
assert result == ActionInfo(None, None, {}, [0])

"value": np.array([1.1], dtype=np.float32),
}
policy.evaluate = MagicMock(return_value=policy_eval_out)
step_with_agents = BatchedStepResult(
[],
np.array([], dtype=np.float32),
np.array([False], dtype=np.bool),
np.array([], dtype=np.bool),
np.array([0]),
None,
step_with_agents = DecisionSteps(
[], np.array([], dtype=np.float32), np.array([0]), None
)
result = policy.get_action(step_with_agents)
expected = ActionInfo(

4
ml-agents/mlagents/trainers/tests/test_simple_rl.py


env = SimpleEnvironment(
[BRAIN_NAME], use_discrete=use_discrete, action_size=2, step_size=0.8
)
override_vals = {"buffer_init_steps": 2000, "max_steps": 4000}
override_vals = {"buffer_init_steps": 2000, "max_steps": 10000}
config = generate_config(SAC_CONFIG, override_vals)
_check_environment_trains(env, config, success_threshold=0.8)

step_size=0.2,
)
override_vals = {
"max_steps": 1000,
"max_steps": 500,
"learning_rate": 3.0e-4,
"behavioral_cloning": {"demo_path": demo_path, "strength": 1.0, "steps": 1000},
"reward_signals": {

5
ml-agents/mlagents/trainers/tests/test_subprocess_env_manager.py


agent_manager_mock = mock.Mock()
env_manager.set_agent_manager(brain_name, agent_manager_mock)
step_info_dict = {brain_name: Mock()}
step_info_dict = {brain_name: (Mock(), Mock())}
env_stats = {
"averaged": (1.0, StatsAggregationMethod.AVERAGE),
"most_recent": (2.0, StatsAggregationMethod.MOST_RECENT),

env_manager._step.assert_called_once()
agent_manager_mock.add_experiences.assert_called_once_with(
step_info.current_all_step_result[brain_name],
step_info.current_all_step_result[brain_name][0],
step_info.current_all_step_result[brain_name][1],
0,
step_info.brain_name_to_action_info[brain_name],
)

27
ml-agents/tests/yamato/scripts/run_gym.py


import argparse
import numpy as np
from gym_unity.envs import UnityEnv

Run the gym test using the specified environment
:param env_name: Name of the Unity environment binary to launch
"""
multi_env = UnityEnv(
env_name, worker_id=1, use_visual=False, multiagent=True, no_graphics=True
)
env = UnityEnv(env_name, worker_id=1, use_visual=False, no_graphics=True)
print(str(multi_env))
print(str(env))
initial_observations = multi_env.reset()
initial_observations = env.reset()
if len(multi_env.observation_space.shape) == 1:
if len(env.observation_space.shape) == 1:
print("Agent observations look like: \n{}".format(initial_observations[0]))
print("Agent observations look like: \n{}".format(initial_observations))
multi_env.reset()
env.reset()
actions = [
multi_env.action_space.sample()
for agent in range(multi_env.number_agents)
]
observations, rewards, dones, info = multi_env.step(actions)
episode_rewards += np.mean(rewards)
done = dones[0]
actions = env.action_space.sample()
obs, reward, done, _ = env.step(actions)
episode_rewards += reward
multi_env.close()
env.close()
if __name__ == "__main__":

29
ml-agents/tests/yamato/scripts/run_llapi.py


env.reset()
# Set the default brain to work with
group_name = env.get_agent_groups()[0]
group_spec = env.get_agent_group_spec(group_name)
group_name = env.get_behavior_names()[0]
group_spec = env.get_behavior_spec(group_name)
step_result = env.get_step_result(group_name)
decision_steps, terminal_steps = env.get_steps(group_name)
# Examine the number of observations per Agent
print("Number of observations : ", len(group_spec.observation_shapes))

print("Is there a visual observation ?", vis_obs)
# Examine the state space for the first observation for the first agent
print("First Agent observation looks like: \n{}".format(step_result.obs[0][0]))
print(
"First Agent observation looks like: \n{}".format(decision_steps.obs[0][0])
)
step_result = env.get_step_result(group_name)
decision_steps, terminal_steps = env.get_steps(group_name)
tracked_agent = -1
step_result.n_agents(), group_spec.action_size
len(decision_steps), group_spec.action_size
)
elif group_spec.is_action_discrete():

np.random.randint(
0, branch_size[i], size=(step_result.n_agents())
0, branch_size[i], size=(len(decision_steps))
)
for i in range(len(branch_size))
]

action = None
if tracked_agent == -1 and len(decision_steps) > 1:
tracked_agent = decision_steps.agent_id[0]
step_result = env.get_step_result(group_name)
episode_rewards += step_result.reward[0]
done = step_result.done[0]
decision_steps, terminal_steps = env.get_steps(group_name)
done = False
if tracked_agent in decision_steps:
episode_rewards += decision_steps[tracked_agent].reward
if tracked_agent in terminal_steps:
episode_rewards += terminal_steps[tracked_agent].reward
done = True
print("Total reward this episode: {}".format(episode_rewards))
finally:
env.close()

18
ml-agents/tests/yamato/standalone_build_tests.py


import sys
import argparse
def main():
def main(scene_path):
returncode = run_standalone_build(base_path, verbose=True)
executable_name = None
if scene_path is not None:
executable_name = scene_path.strip(".unity")
executable_name = executable_name.split("/")[-1]
executable_name = "testPlayer-" + executable_name
returncode = run_standalone_build(
base_path, verbose=True, output_path=executable_name, scene_path=scene_path
)
if returncode == 0:
print("Test run SUCCEEDED!")

if __name__ == "__main__":
main()
parser = argparse.ArgumentParser()
parser.add_argument("--scene", default=None)
args = parser.parse_args()
main(args.scene)

7
ml-agents/tests/yamato/yamato_utils.py


def run_standalone_build(
base_path: str, verbose: bool = False, output_path: str = None
base_path: str,
verbose: bool = False,
output_path: str = None,
scene_path: str = None,
) -> int:
"""
Run BuildStandalonePlayerOSX test to produce a player. The location defaults to Project/testPlayer.

test_args += ["-logfile", "-"]
if output_path is not None:
test_args += ["--mlagents-build-output-path", output_path]
if scene_path is not None:
test_args += ["--mlagents-build-scene-path", scene_path]
print(f"{' '.join(test_args)} ...")
timeout = 30 * 60 # 30 minutes, just in case

正在加载...
取消
保存