浏览代码

Merge branch 'master' into develop-demo-load-seek

/develop/demo-load-seek
GitHub 5 年前
当前提交
e3af96ca
共有 75 个文件被更改,包括 1471 次插入689 次删除
  1. 2
      Project/Assets/ML-Agents/Examples/3DBall/Scripts/Ball3DAgent.cs
  2. 2
      Project/Assets/ML-Agents/Examples/3DBall/Scripts/Ball3DHardAgent.cs
  3. 2
      Project/Assets/ML-Agents/Examples/Bouncer/Scripts/BouncerAgent.cs
  4. 2
      Project/Assets/ML-Agents/Examples/GridWorld/Scripts/GridArea.cs
  5. 2
      Project/Assets/ML-Agents/Examples/Tennis/Scripts/TennisAgent.cs
  6. 2
      Project/Assets/ML-Agents/Examples/Walker/Scripts/WalkerAgent.cs
  7. 108
      README.md
  8. 2
      com.unity.ml-agents/CHANGELOG.md
  9. 93
      com.unity.ml-agents/Documentation~/com.unity.ml-agents.md
  10. 10
      com.unity.ml-agents/Editor/RayPerceptionSensorComponentBaseEditor.cs
  11. 2
      com.unity.ml-agents/Runtime/Academy.cs
  12. 33
      com.unity.ml-agents/Runtime/Agent.cs
  13. 48
      com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
  14. 2
      com.unity.ml-agents/Runtime/Inference/ModelRunner.cs
  15. 32
      com.unity.ml-agents/Runtime/Policies/BehaviorParameters.cs
  16. 14
      com.unity.ml-agents/Runtime/Policies/BrainParameters.cs
  17. 19
      com.unity.ml-agents/Runtime/Sensors/CameraSensor.cs
  18. 78
      com.unity.ml-agents/Runtime/Sensors/CameraSensorComponent.cs
  19. 8
      com.unity.ml-agents/Runtime/Sensors/RayPerceptionSensorComponent3D.cs
  20. 31
      com.unity.ml-agents/Runtime/Sensors/RayPerceptionSensorComponentBase.cs
  21. 28
      com.unity.ml-agents/Runtime/Sensors/RenderTextureSensor.cs
  22. 55
      com.unity.ml-agents/Runtime/Sensors/RenderTextureSensorComponent.cs
  23. 37
      com.unity.ml-agents/Runtime/SideChannels/FloatPropertiesChannel.cs
  24. 2
      config/sac_trainer_config.yaml
  25. 4
      config/trainer_config.yaml
  26. 2
      docs/Getting-Started-with-Balance-Ball.md
  27. 160
      docs/Installation.md
  28. 5
      docs/Learning-Environment-Best-Practices.md
  29. 4
      docs/Learning-Environment-Create-New.md
  30. 9
      docs/Learning-Environment-Design-Agents.md
  31. 4
      docs/Learning-Environment-Examples.md
  32. 35
      docs/Limitations.md
  33. 4
      docs/Migrating.md
  34. 2
      docs/Readme.md
  35. 9
      docs/Using-Docker.md
  36. 12
      docs/Using-Virtual-Environment.md
  37. 951
      docs/images/unity_package_manager_window.png
  38. 2
      docs/localized/KR/docs/Installation.md
  39. 2
      docs/localized/zh-CN/docs/Installation.md
  40. 4
      ml-agents-envs/mlagents_envs/communicator.py
  41. 2
      ml-agents-envs/mlagents_envs/environment.py
  42. 5
      ml-agents-envs/mlagents_envs/exception.py
  43. 3
      ml-agents-envs/mlagents_envs/rpc_communicator.py
  44. 3
      ml-agents-envs/mlagents_envs/rpc_utils.py
  45. 4
      ml-agents/mlagents/trainers/brain.py
  46. 3
      ml-agents/mlagents/trainers/components/reward_signals/gail/signal.py
  47. 4
      ml-agents/mlagents/trainers/components/reward_signals/reward_signal_factory.py
  48. 2
      ml-agents/mlagents/trainers/demo_loader.py
  49. 7
      ml-agents/mlagents/trainers/ghost/trainer.py
  50. 13
      ml-agents/mlagents/trainers/learn.py
  51. 3
      ml-agents/mlagents/trainers/models.py
  52. 5
      ml-agents/mlagents/trainers/policy/nn_policy.py
  53. 3
      ml-agents/mlagents/trainers/policy/tf_policy.py
  54. 5
      ml-agents/mlagents/trainers/ppo/optimizer.py
  55. 3
      ml-agents/mlagents/trainers/ppo/trainer.py
  56. 6
      ml-agents/mlagents/trainers/sac/network.py
  57. 14
      ml-agents/mlagents/trainers/sac/optimizer.py
  58. 3
      ml-agents/mlagents/trainers/sac/trainer.py
  59. 3
      ml-agents/mlagents/trainers/trainer/rl_trainer.py
  60. 10
      ml-agents/mlagents/trainers/trainer/trainer.py
  61. 48
      com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs
  62. 11
      com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs.meta
  63. 43
      com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs
  64. 11
      com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs.meta
  65. 34
      com.unity.ml-agents/Tests/Editor/Sensor/RenderTextureSenorTests.cs
  66. 11
      com.unity.ml-agents/Tests/Editor/Sensor/RenderTextureSenorTests.cs.meta
  67. 42
      com.unity.ml-agents/Tests/Editor/Sensor/RenderTextureSensorComponentTests.cs
  68. 11
      com.unity.ml-agents/Tests/Editor/Sensor/RenderTextureSensorComponentTests.cs.meta
  69. 10
      ml-agents/mlagents/logging_util.py
  70. 3
      com.unity.ml-agents/Documentation~/TableOfContents.md
  71. 7
      com.unity.ml-agents/README.md.meta
  72. 5
      com.unity.ml-agents/README.md
  73. 0
      /com.unity.ml-agents/Documentation~/com.unity.ml-agents.md
  74. 0
      /docs/localized/KR/docs/Installation-Anaconda-Windows.md
  75. 0
      /docs/Installation-Anaconda-Windows.md

2
Project/Assets/ML-Agents/Examples/3DBall/Scripts/Ball3DAgent.cs


[Header("Specific to Ball3D")]
public GameObject ball;
Rigidbody m_BallRb;
IFloatProperties m_ResetParams;
FloatPropertiesChannel m_ResetParams;
public override void InitializeAgent()
{

2
Project/Assets/ML-Agents/Examples/3DBall/Scripts/Ball3DHardAgent.cs


[Header("Specific to Ball3DHard")]
public GameObject ball;
Rigidbody m_BallRb;
IFloatProperties m_ResetParams;
FloatPropertiesChannel m_ResetParams;
public override void InitializeAgent()
{

2
Project/Assets/ML-Agents/Examples/Bouncer/Scripts/BouncerAgent.cs


int m_NumberJumps = 20;
int m_JumpLeft = 20;
IFloatProperties m_ResetParams;
FloatPropertiesChannel m_ResetParams;
public override void InitializeAgent()
{

2
Project/Assets/ML-Agents/Examples/GridWorld/Scripts/GridArea.cs


public GameObject trueAgent;
IFloatProperties m_ResetParameters;
FloatPropertiesChannel m_ResetParameters;
Camera m_AgentCam;

2
Project/Assets/ML-Agents/Examples/Tennis/Scripts/TennisAgent.cs


Rigidbody m_AgentRb;
Rigidbody m_BallRb;
float m_InvertMult;
IFloatProperties m_ResetParams;
FloatPropertiesChannel m_ResetParams;
// Looks for the scoreboard based on the name of the gameObjects.
// Do not modify the names of the Score GameObjects

2
Project/Assets/ML-Agents/Examples/Walker/Scripts/WalkerAgent.cs


Rigidbody m_ChestRb;
Rigidbody m_SpineRb;
IFloatProperties m_ResetParams;
FloatPropertiesChannel m_ResetParams;
public override void InitializeAgent()
{

108
README.md


used for multiple purposes, including controlling NPC behavior (in a variety of
settings such as multi-agent and adversarial), automated testing of game builds
and evaluating different game design decisions pre-release. The ML-Agents
toolkit is mutually beneficial for both game developers and AI researchers as it
Toolkit is mutually beneficial for both game developers and AI researchers as it
provides a central platform where advances in AI can be evaluated on Unity’s
rich environments and then made accessible to the wider research and game
developer communities.

* Unity environment control from Python
* 10+ sample Unity environments
* 15+ sample Unity environments
* Two deep reinforcement learning algorithms,
[Proximal Policy Optimization](https://github.com/Unity-Technologies/ml-agents/tree/latest_release/docs/Training-PPO.md)
(PPO) and [Soft Actor-Critic](https://github.com/Unity-Technologies/ml-agents/tree/latest_release/docs/Training-SAC.md)

* Built-in support for Imitation Learning
* Flexible agent control with On Demand Decision Making
* Visualizing network outputs within the environment
* Simplified set-up with Docker
## Documentation
## Releases & Documentation
**Our latest, stable release is 0.14.1. Click
[here](https://github.com/Unity-Technologies/ml-agents/tree/latest_release/docs/Readme.md) to
get started with the latest release of ML-Agents.**
The table below lists all our releases, including our `master` branch which is under active
development and may be unstable. A few helpful guidelines:
* The docs links in the table below include installation and usage instructions specific to each
release. Remember to always use the documentation that corresponds to the release version you're
using.
* See the [GitHub releases](https://github.com/Unity-Technologies/ml-agents/releases) for more
details of the changes between versions.
* If you have used an earlier version of the ML-Agents Toolkit, we strongly recommend our
[guide on migrating from earlier versions](docs/Migrating.md).
* For more information, in addition to installation and usage instructions, see
the [documentation for the latest release](https://github.com/Unity-Technologies/ml-agents/tree/latest_release/docs/Readme.md).
* If you are a researcher interested in a discussion of Unity as an AI platform, see a pre-print of our [reference paper on Unity and the ML-Agents Toolkit](https://arxiv.org/abs/1809.02627). Also, see below for instructions on citing this paper.
* If you have used an earlier version of the ML-Agents toolkit, we strongly
recommend our [guide on migrating from earlier versions](docs/Migrating.md).
| **Version** | **Release Date** | **Source** | **Documentation** | **Download** |
|:-------:|:------:|:-------------:|:-------:|:------------:|
| **master** (unstable) | -- | [source](https://github.com/Unity-Technologies/ml-agents/tree/master) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/master/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/master.zip) |
| **0.14.1** (latest stable release) | February 26, 2020 | **[source](https://github.com/Unity-Technologies/ml-agents/tree/latest_release)** | **[docs](https://github.com/Unity-Technologies/ml-agents/tree/latest_release/docs/Readme.md)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/latest_release.zip)** |
| **0.14.0** | February 13, 2020 | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.14.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.14.0/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.14.0.zip) |
| **0.13.1** | January 21, 2020 | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.13.1) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.13.1/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.13.1.zip) |
| **0.13.0** | January 8, 2020 | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.13.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.13.0/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.13.0.zip) |
| **0.12.1** | December 11, 2019 | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.12.1) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.12.1/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.12.1.zip) |
| **0.12.0** | December 2, 2019 | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.12.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.12.0/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.12.0.zip) |
| **0.11.0** | November 4, 2019 | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.11.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.11.0/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.11.0.zip) |
| **0.10.1** | October 9, 2019 | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.10.1) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.10.1/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.10.1.zip) |
| **0.10.0** | September 30, 2019 | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.10.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.10.0/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.10.0.zip) |
## Citation
If you are a researcher interested in a discussion of Unity as an AI platform, see a pre-print
of our [reference paper on Unity and the ML-Agents Toolkit](https://arxiv.org/abs/1809.02627).
If you use Unity or the ML-Agents Toolkit to conduct research, we ask that you cite the following
paper as a reference:
Juliani, A., Berges, V., Vckay, E., Gao, Y., Henry, H., Mattar, M., Lange, D. (2018). Unity: A General Platform for Intelligent Agents. *arXiv preprint arXiv:1809.02627.* https://github.com/Unity-Technologies/ml-agents.
* (February 28, 2020) [Training intelligent adversaries using self-play with ML-Agents](https://blogs.unity3d.com/2020/02/28/training-intelligent-adversaries-using-self-play-with-ml-agents/)
* (November 11, 2019) [Training your agents 7 times faster with ML-Agents](https://blogs.unity3d.com/2019/11/11/training-your-agents-7-times-faster-with-ml-agents/)
* (October 21, 2019) [The AI@Unity interns help shape the world](https://blogs.unity3d.com/2019/10/21/the-aiunity-interns-help-shape-the-world/)
* (April 15, 2019) [Unity ML-Agents Toolkit v0.8: Faster training on real games](https://blogs.unity3d.com/2019/04/15/unity-ml-agents-toolkit-v0-8-faster-training-on-real-games/)
* (March 1, 2019) [Unity ML-Agents Toolkit v0.7: A leap towards cross-platform inference](https://blogs.unity3d.com/2019/03/01/unity-ml-agents-toolkit-v0-7-a-leap-towards-cross-platform-inference/)
* (December 17, 2018) [ML-Agents Toolkit v0.6: Improved usability of Brains and Imitation Learning](https://blogs.unity3d.com/2018/12/17/ml-agents-toolkit-v0-6-improved-usability-of-brains-and-imitation-learning/)
* (October 2, 2018) [Puppo, The Corgi: Cuteness Overload with the Unity ML-Agents Toolkit](https://blogs.unity3d.com/2018/10/02/puppo-the-corgi-cuteness-overload-with-the-unity-ml-agents-toolkit/)
* (September 11, 2018) [ML-Agents Toolkit v0.5, new resources for AI researchers available now](https://blogs.unity3d.com/2018/09/11/ml-agents-toolkit-v0-5-new-resources-for-ai-researchers-available-now/)
* (June 26, 2018) [Solving sparse-reward tasks with Curiosity](https://blogs.unity3d.com/2018/06/26/solving-sparse-reward-tasks-with-curiosity/)
* (June 19, 2018) [Unity ML-Agents Toolkit v0.4 and Udacity Deep Reinforcement Learning Nanodegree](https://blogs.unity3d.com/2018/06/19/unity-ml-agents-toolkit-v0-4-and-udacity-deep-reinforcement-learning-nanodegree/)
* (May 24, 2018) [Imitation Learning in Unity: The Workflow](https://blogs.unity3d.com/2018/05/24/imitation-learning-in-unity-the-workflow/)
* (March 15, 2018) [ML-Agents Toolkit v0.3 Beta released: Imitation Learning, feedback-driven features, and more](https://blogs.unity3d.com/2018/03/15/ml-agents-v0-3-beta-released-imitation-learning-feedback-driven-features-and-more/)
* (December 11, 2017) [Using Machine Learning Agents in a real game: a beginner’s guide](https://blogs.unity3d.com/2017/12/11/using-machine-learning-agents-in-a-real-game-a-beginners-guide/)
* (December 8, 2017) [Introducing ML-Agents Toolkit v0.2: Curriculum Learning, new environments, and more](https://blogs.unity3d.com/2017/12/08/introducing-ml-agents-v0-2-curriculum-learning-new-environments-and-more/)
* (September 19, 2017) [Introducing: Unity Machine Learning Agents Toolkit](https://blogs.unity3d.com/2017/09/19/introducing-unity-machine-learning-agents/)
* [Using Machine Learning Agents in a real game: a beginner’s guide](https://blogs.unity3d.com/2017/12/11/using-machine-learning-agents-in-a-real-game-a-beginners-guide/)
* [Post](https://blogs.unity3d.com/2018/02/28/introducing-the-winners-of-the-first-ml-agents-challenge/)
announcing the winners of our
[first ML-Agents Challenge](https://connect.unity.com/challenges/ml-agents-1)
* [Post](https://blogs.unity3d.com/2018/01/23/designing-safer-cities-through-simulations/)
overviewing how Unity can be leveraged as a simulator to design safer cities.
In addition to our own documentation, here are some additional, relevant articles:

## Community and Feedback
The ML-Agents toolkit is an open-source project and we encourage and welcome
The ML-Agents Toolkit is an open-source project and we encourage and welcome
For problems with the installation and setup of the the ML-Agents toolkit, or
For problems with the installation and setup of the the ML-Agents Toolkit, or
If you run into any other problems using the ML-Agents toolkit, or have a specific
If you run into any other problems using the ML-Agents Toolkit, or have a specific
Your opinion matters a great deal to us. Only by hearing your thoughts on the Unity ML-Agents Toolkit can we continue
to improve and grow. Please take a few minutes to [let us know about it](https://github.com/Unity-Technologies/ml-agents/issues/1454).
Your opinion matters a great deal to us. Only by hearing your thoughts on the Unity ML-Agents
Toolkit can we continue to improve and grow. Please take a few minutes to
[let us know about it](https://github.com/Unity-Technologies/ml-agents/issues/1454).
## Releases
The latest release is 0.14.1. Previous releases can be found below:
| **Version** | **Source** | **Documentation** | **Download** |
|:-------:|:------:|:-------------:|:-------:|
| **0.14.0** | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.14.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.14.0/docs) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.14.0.zip) |
| **0.13.1** | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.13.1) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.13.1/docs) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.13.1.zip) |
| **0.13.0** | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.13.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.13.0/docs) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.13.0.zip) |
| **0.12.1** | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.12.1) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.12.1/docs) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.12.1.zip) |
| **0.12.0** | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.12.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.12.0/docs) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.12.0.zip) |
| **0.11.0** | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.11.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.11.0/docs) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.11.0.zip) |
| **0.10.1** | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.10.1) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.10.1/docs) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.10.1.zip) |
| **0.10.0** | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.10.0) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.10.0/docs) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.10.0.zip) |
See the [GitHub releases](https://github.com/Unity-Technologies/ml-agents/releases) for more details of the changes
between versions.
Please note that the `master` branch is under active development, so the documentation there may differ from the code
of a previous release. Always use the documentation that corresponds to the release version you're using.
## Citation
If you use Unity or the ML-Agents Toolkit to conduct research, we ask that you cite the following paper as a reference:
Juliani, A., Berges, V., Vckay, E., Gao, Y., Henry, H., Mattar, M., Lange, D. (2018). Unity: A General Platform for Intelligent Agents. *arXiv preprint arXiv:1809.02627.* https://github.com/Unity-Technologies/ml-agents.

2
com.unity.ml-agents/CHANGELOG.md


- The method `GetStepCount()` on the Agent class has been replaced with the property getter `StepCount`
- `RayPerceptionSensorComponent` and related classes now display the debug gizmos whenever the Agent is selected (not just Play mode).
- Most fields on `RayPerceptionSensorComponent` can now be changed while the editor is in Play mode. The exceptions to this are fields that affect the number of observations.
- Most fields on `CameraSensorComponent` and `RenderTextureSensorComponent` were changed to private and replaced by properties with the same name.
- The `IFloatProperties` interface has been removed.
## [0.14.1-preview] - 2020-02-25

93
com.unity.ml-agents/Documentation~/com.unity.ml-agents.md


Please see the [ML-Agents README)(https://github.com/Unity-Technologies/ml-agents/blob/master/README.md)
# About ML-Agents package (`com.unity.ml-agents`)
The Unity ML-Agents package contains the C# SDK for the
[Unity ML-Agents Toolkit](https://github.com/Unity-Technologies/ml-agents).
The package provides the ability for any Unity scene to be converted into a learning
environment where character behaviors can be trained using a variety of machine learning
algorithms. Additionally, it enables any trained behavior to be embedded back into the Unity
scene. More specifically, the package provides the following core functionalities:
* Define Agents: entities whose behavior will be learned. Agents are entities
that generate observations (through sensors), take actions and receive rewards from
the environment.
* Define Behaviors: entities that specifiy how an agent should act. Multiple agents can
share the same Behavior and a scene may have multiple Behaviors.
* Record demonstrations of an agent within the Editor. These demonstrations can be
valuable to train a behavior for that agent.
* Embedding a trained behavior into the scene via the
[Unity Inference Engine](https://docs.unity3d.com/Packages/com.unity.barracuda@latest/index.html).
Thus an Agent can switch from a learning behavior to an inference behavior.
Note that this package does not contain the machine learning algorithms for training
behaviors. It relies on a Python package to orchestrate the training. This package
only enables instrumenting a Unity scene and setting it up for training, and then
embedding the trained model back into your Unity scene.
## Preview package
This package is available as a preview, so it is not ready for production use.
The features and documentation in this package might change before it is verified for release.
## Package contents
The following table describes the package folder structure:
|**Location**|**Description**|
|---|---|
|*Documentation~*|Contains the documentation for the Unity package.|
|*Editor*|Contains utilities for Editor windows and drawers.|
|*Plugins*|Contains third-party DLLs.|
|*Runtime*|Contains core C# APIs for integrating ML-Agents into your Unity scene. |
|*Tests*|Contains the unit tests for the package.|
<a name="Installation"></a>
## Installation
To install this package, follow the instructions in the
[Package Manager documentation](https://docs.unity3d.com/Manual/upm-ui-install.html).
To install the Python package to enable training behaviors, follow the instructions on our
[GitHub repository](https://github.com/Unity-Technologies/ml-agents/blob/latest_release/docs/Installation.md).
## Requirements
This version of the Unity ML-Agents package is compatible with the following versions of the Unity Editor:
* 2018.4 and later (recommended)
## Known limitations
### Headless Mode
If you enable Headless mode, you will not be able to collect visual observations
from your agents.
### Rendering Speed and Synchronization
Currently the speed of the game physics can only be increased to 100x real-time.
The Academy also moves in time with FixedUpdate() rather than Update(), so game
behavior implemented in Update() may be out of sync with the agent decision
making. See
[Execution Order of Event Functions](https://docs.unity3d.com/Manual/ExecutionOrder.html)
for more information.
You can control the frequency of Academy stepping by calling
`Academy.Instance.DisableAutomaticStepping()`, and then calling
`Academy.Instance.EnvironmentStep()`
### Unity Inference Engine Models
Currently, only models created with our trainers are supported for running
ML-Agents with a neural network behavior.
## Helpful links
If you are new to the Unity ML-Agents package, or have a question after reading
the documentation, you can checkout our
[GitHUb Repository](https://github.com/Unity-Technologies/ml-agents), which
also includes a number of ways to
[connect with us](https://github.com/Unity-Technologies/ml-agents#community-and-feedback)
including our [ML-Agents Forum](https://forum.unity.com/forums/ml-agents.453/).

10
com.unity.ml-agents/Editor/RayPerceptionSensorComponentBaseEditor.cs


EditorGUI.BeginChangeCheck();
EditorGUI.indentLevel++;
EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
// Because the number of rays and the tags affect the observation shape,
// they are not editable during play mode.
// Don't allow certain fields to be modified during play mode.
// * SensorName affects the ordering of the Agent's observations
// * The number of tags and rays affects the size of the observations.
EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
EditorGUILayout.PropertyField(so.FindProperty("m_DetectableTags"), true);
EditorGUILayout.PropertyField(so.FindProperty("m_RaysPerDirection"), true);
}

m_RequireSensorUpdate = true;
}
UpdateSensorIfDirty();
UpdateSensorIfDirty();
}

2
com.unity.ml-agents/Runtime/Academy.cs


/// <summary>
/// Collection of float properties (indexed by a string).
/// </summary>
public IFloatProperties FloatProperties;
public FloatPropertiesChannel FloatProperties;
// Fields not provided in the Inspector.

33
com.unity.ml-agents/Runtime/Agent.cs


}
/// <summary>
/// Updates the type of behavior for the agent.
/// </summary>
/// <param name="behaviorType"> The new behaviorType for the Agent
/// </param>
public void SetBehaviorType(BehaviorType behaviorType)
{
if (m_PolicyFactory.m_BehaviorType == behaviorType)
{
return;
}
m_PolicyFactory.m_BehaviorType = behaviorType;
m_Brain?.Dispose();
m_Brain = m_PolicyFactory.GeneratePolicy(Heuristic);
}
/// <summary>
/// Returns the current step counter (within the current episode).
/// </summary>
/// <returns>

// should stay the previous action before the Done(), so that it is properly recorded.
if (m_Action.vectorActions == null)
{
if (param.vectorActionSpaceType == SpaceType.Continuous)
{
m_Action.vectorActions = new float[param.vectorActionSize[0]];
m_Info.storedVectorActions = new float[param.vectorActionSize[0]];
}
else
{
m_Action.vectorActions = new float[param.vectorActionSize.Length];
m_Info.storedVectorActions = new float[param.vectorActionSize.Length];
}
m_Action.vectorActions = new float[param.numActions];
m_Info.storedVectorActions = new float[param.numActions];
}
}

{
Debug.LogWarning("Heuristic method called but not implemented. Returning placeholder actions.");
var param = m_PolicyFactory.brainParameters;
var actionSize = param.vectorActionSpaceType == SpaceType.Continuous ?
param.vectorActionSize[0] :
param.vectorActionSize.Length;
return new float[actionSize];
return new float[param.numActions];
}
/// <summary>

48
com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs


"A side channel with type index {0} is already registered. You cannot register multiple " +
"side channels of the same id.", channelId));
}
var numMessages = m_CachedMessages.Count;
for (int i = 0; i < numMessages; i++)
{
var cachedMessage = m_CachedMessages.Dequeue();
if (channelId == cachedMessage.ChannelId)
{
sideChannel.OnMessageReceived(cachedMessage.Message);
}
else
{
m_CachedMessages.Enqueue(cachedMessage);
}
}
m_SideChannels.Add(channelId, sideChannel);
}

}
}
private struct CachedSideChannelMessage
{
public Guid ChannelId;
public byte[] Message;
}
private static Queue<CachedSideChannelMessage> m_CachedMessages = new Queue<CachedSideChannelMessage>();
/// <summary>
/// Separates the data received from Python into individual messages for each registered side channel.
/// </summary>

{
while(m_CachedMessages.Count!=0)
{
var cachedMessage = m_CachedMessages.Dequeue();
if (sideChannels.ContainsKey(cachedMessage.ChannelId))
{
sideChannels[cachedMessage.ChannelId].OnMessageReceived(cachedMessage.Message);
}
else
{
Debug.Log(string.Format(
"Unknown side channel data received. Channel Id is "
+ ": {0}", cachedMessage.ChannelId));
}
}
if (dataReceived.Length == 0)
{
return;

}
else
{
Debug.Log(string.Format(
"Unknown side channel data received. Channel Id is "
+ ": {0}", channelId));
// Don't recognize this ID, but cache it in case the SideChannel that can handle
// it is registered before the next call to ProcessSideChannelData.
m_CachedMessages.Enqueue(new CachedSideChannelMessage
{
ChannelId = channelId,
Message = message
});
}
}
}

2
com.unity.ml-agents/Runtime/Inference/ModelRunner.cs


m_VisualObservationsInitialized = true;
}
Profiler.BeginSample("LearningBrain.DecideAction");
Profiler.BeginSample("ModelRunner.DecideAction");
Profiler.BeginSample($"MLAgents.{m_Model.name}.GenerateTensors");
// Prepare the input tensors to be feed into the engine

32
com.unity.ml-agents/Runtime/Policies/BehaviorParameters.cs


namespace MLAgents.Policies
{
/// <summary>
/// Defines what type of behavior the Agent will be using
/// - Default : The Agent will use the remote process for decision making.
/// if unavailable, will use inference and if no model is provided, will use
/// the heuristic.
/// - HeuristicOnly : The Agent will always use its heuristic
/// - InferenceOnly : The Agent will always use inference with the provided
/// neural network model.
/// </summary>
[Serializable]
public enum BehaviorType
{
Default,
HeuristicOnly,
InferenceOnly
}
/// <summary>
/// The Factory to generate policies.
/// </summary>

[Serializable]
enum BehaviorType
{
Default,
HeuristicOnly,
InferenceOnly
}
[HideInInspector]
[SerializeField]

// and will always have its default value
// This field is set in the custom editor.
#pragma warning disable 0649
BehaviorType m_BehaviorType;
internal BehaviorType m_BehaviorType;
#pragma warning restore 0649
[HideInInspector]
[SerializeField]

/// The team ID for this behavior.
/// </summary>
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("m_TeamID")]
[HideInInspector, SerializeField, FormerlySerializedAs("m_TeamID")]
public int TeamId;
[FormerlySerializedAs("m_useChildSensors")]

14
com.unity.ml-agents/Runtime/Policies/BrainParameters.cs


/// </summary>
public SpaceType vectorActionSpaceType = SpaceType.Discrete;
public int numActions
{
get{
switch(vectorActionSpaceType){
case SpaceType.Discrete:
return vectorActionSize.Length;
case SpaceType.Continuous:
return vectorActionSize[0];
default:
return 0;
}
}
}
/// <summary>
/// Deep clones the BrainParameter object.
/// </summary>

19
com.unity.ml-agents/Runtime/Sensors/CameraSensor.cs


SensorCompressionType m_CompressionType;
/// <summary>
/// The Camera used for rendering the sensor observations.
/// </summary>
public Camera camera
{
get { return m_Camera; }
set { m_Camera = value; }
}
/// <summary>
/// The compression type used by the sensor.
/// </summary>
public SensorCompressionType compressionType
{
get { return m_CompressionType; }
set { m_CompressionType = value; }
}
/// <summary>
/// Creates and returns the camera sensor.
/// </summary>
/// <param name="camera">Camera object to capture images from.</param>

78
com.unity.ml-agents/Runtime/Sensors/CameraSensorComponent.cs


using UnityEngine;
using UnityEngine.Serialization;
namespace MLAgents.Sensors
{

[AddComponentMenu("ML Agents/Camera Sensor", (int)MenuGroup.Sensors)]
public class CameraSensorComponent : SensorComponent
{
[HideInInspector, SerializeField, FormerlySerializedAs("camera")]
Camera m_Camera;
CameraSensor m_Sensor;
public new Camera camera;
public new Camera camera
{
get { return m_Camera; }
set { m_Camera = value; UpdateSensor(); }
}
[HideInInspector, SerializeField, FormerlySerializedAs("sensorName")]
string m_SensorName = "CameraSensor";
public string sensorName = "CameraSensor";
public string sensorName
{
get { return m_SensorName; }
internal set { m_SensorName = value; }
}
[HideInInspector, SerializeField, FormerlySerializedAs("width")]
int m_Width = 84;
/// Width of the generated image.
/// Width of the generated observation.
public int width = 84;
public int width
{
get { return m_Width; }
internal set { m_Width = value; }
}
[HideInInspector, SerializeField, FormerlySerializedAs("height")]
int m_Height = 84;
/// Height of the generated image.
/// Height of the generated observation.
public int height = 84;
public int height
{
get { return m_Height; }
internal set { m_Height = value; }
}
[HideInInspector, SerializeField, FormerlySerializedAs("grayscale")]
public bool m_Grayscale;
public bool grayscale;
public bool grayscale
{
get { return m_Grayscale; }
internal set { m_Grayscale = value; }
}
[HideInInspector, SerializeField, FormerlySerializedAs("compression")]
SensorCompressionType m_Compression = SensorCompressionType.PNG;
public SensorCompressionType compression = SensorCompressionType.PNG;
public SensorCompressionType compression
{
get { return m_Compression; }
set { m_Compression = value; UpdateSensor(); }
}
/// <summary>
/// Creates the <see cref="CameraSensor"/>

{
return new CameraSensor(camera, width, height, grayscale, sensorName, compression);
m_Sensor = new CameraSensor(m_Camera, m_Width, m_Height, grayscale, m_SensorName, compression);
return m_Sensor;
}
/// <summary>

public override int[] GetObservationShape()
{
return CameraSensor.GenerateShape(width, height, grayscale);
return CameraSensor.GenerateShape(m_Width, m_Height, grayscale);
}
/// <summary>
/// Update fields that are safe to change on the Sensor at runtime.
/// </summary>
internal void UpdateSensor()
{
if (m_Sensor != null)
{
m_Sensor.camera = m_Camera;
m_Sensor.compressionType = m_Compression;
}
}
}
}

8
com.unity.ml-agents/Runtime/Sensors/RayPerceptionSensorComponent3D.cs


[AddComponentMenu("ML Agents/Ray Perception Sensor 3D", (int)MenuGroup.Sensors)]
public class RayPerceptionSensorComponent3D : RayPerceptionSensorComponentBase
{
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("startVerticalOffset")]
[HideInInspector, SerializeField, FormerlySerializedAs("startVerticalOffset")]
[Range(-10f, 10f)]
[Tooltip("Ray start is offset up or down by this amount.")]
float m_StartVerticalOffset;

set { m_StartVerticalOffset = value; UpdateSensor(); }
}
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("endVerticalOffset")]
[HideInInspector, SerializeField, FormerlySerializedAs("endVerticalOffset")]
[Range(-10f, 10f)]
[Tooltip("Ray end is offset up or down by this amount.")]
float m_EndVerticalOffset;

31
com.unity.ml-agents/Runtime/Sensors/RayPerceptionSensorComponentBase.cs


/// </summary>
public abstract class RayPerceptionSensorComponentBase : SensorComponent
{
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("sensorName")]
[HideInInspector, SerializeField, FormerlySerializedAs("sensorName")]
string m_SensorName = "RayPerceptionSensor";
/// <summary>

internal set => m_SensorName = value;
}
[SerializeField]
[FormerlySerializedAs("detectableTags")]
[SerializeField, FormerlySerializedAs("detectableTags")]
[Tooltip("List of tags in the scene to compare against.")]
List<string> m_DetectableTags;

internal set => m_DetectableTags = value;
}
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("raysPerDirection")]
[HideInInspector, SerializeField, FormerlySerializedAs("raysPerDirection")]
[Range(0, 50)]
[Tooltip("Number of rays to the left and right of center.")]
int m_RaysPerDirection = 3;

internal set => m_RaysPerDirection = value;
}
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("maxRayDegrees")]
[HideInInspector, SerializeField, FormerlySerializedAs("maxRayDegrees")]
[Range(0, 180)]
[Tooltip("Cone size for rays. Using 90 degrees will cast rays to the left and right. " +
"Greater than 90 degrees will go backwards.")]

set { m_MaxRayDegrees = value; UpdateSensor(); }
}
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("sphereCastRadius")]
[HideInInspector, SerializeField, FormerlySerializedAs("sphereCastRadius")]
[Range(0f, 10f)]
[Tooltip("Radius of sphere to cast. Set to zero for raycasts.")]
float m_SphereCastRadius = 0.5f;

set { m_SphereCastRadius = value; UpdateSensor(); }
}
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("rayLength")]
[HideInInspector, SerializeField, FormerlySerializedAs("rayLength")]
[Range(1, 1000)]
[Tooltip("Length of the rays to cast.")]
float m_RayLength = 20f;

set { m_RayLength = value; UpdateSensor(); }
}
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("rayLayerMask")]
[HideInInspector, SerializeField, FormerlySerializedAs("rayLayerMask")]
[Tooltip("Controls which layers the rays can hit.")]
LayerMask m_RayLayerMask = Physics.DefaultRaycastLayers;

set { m_RayLayerMask = value; UpdateSensor();}
}
[HideInInspector]
[SerializeField]
[FormerlySerializedAs("observationStacks")]
[HideInInspector, SerializeField, FormerlySerializedAs("observationStacks")]
[Range(1, 50)]
[Tooltip("Whether to stack previous observations. Using 1 means no previous observations.")]
int m_ObservationStacks = 1;

28
com.unity.ml-agents/Runtime/Sensors/RenderTextureSensor.cs


SensorCompressionType m_CompressionType;
/// <summary>
/// The compression type used by the sensor.
/// </summary>
public SensorCompressionType compressionType
{
get { return m_CompressionType; }
set { m_CompressionType = value; }
}
/// <summary>
/// Initializes the sensor.
/// </summary>
/// <param name="renderTexture">The <see cref="RenderTexture"/> instance to wrap.</param>

var texture = ObservationToTexture(m_RenderTexture);
// TODO support more types here, e.g. JPG
var compressed = texture.EncodeToPNG();
UnityEngine.Object.Destroy(texture);
DestroyTexture(texture);
return compressed;
}
}

{
var texture = ObservationToTexture(m_RenderTexture);
var numWritten = Utilities.TextureToTensorProxy(texture, adapter, m_Grayscale);
UnityEngine.Object.Destroy(texture);
DestroyTexture(texture);
return numWritten;
}
}

texture2D.Apply();
RenderTexture.active = prevActiveRt;
return texture2D;
}
static void DestroyTexture(Texture2D texture)
{
if (Application.isEditor)
{
// Edit Mode tests complain if we use Destroy()
// TODO move to extension methods for UnityEngine.Object?
UnityEngine.Object.DestroyImmediate(texture);
}
else
{
UnityEngine.Object.Destroy(texture);
}
}
}
}

55
com.unity.ml-agents/Runtime/Sensors/RenderTextureSensorComponent.cs


using UnityEngine;
using UnityEngine.Serialization;
namespace MLAgents.Sensors
{

[AddComponentMenu("ML Agents/Render Texture Sensor", (int)MenuGroup.Sensors)]
public class RenderTextureSensorComponent : SensorComponent
{
RenderTextureSensor m_Sensor;
public RenderTexture renderTexture;
[HideInInspector, SerializeField, FormerlySerializedAs("renderTexture")]
RenderTexture m_RenderTexture;
public RenderTexture renderTexture
{
get { return m_RenderTexture; }
set { m_RenderTexture = value; }
}
[HideInInspector, SerializeField, FormerlySerializedAs("sensorName")]
string m_SensorName = "RenderTextureSensor";
/// Name of the sensor.
/// Name of the generated <see cref="RenderTextureSensor"/>.
public string sensorName = "RenderTextureSensor";
public string sensorName
{
get { return m_SensorName; }
internal set { m_SensorName = value; }
}
[HideInInspector, SerializeField, FormerlySerializedAs("grayscale")]
public bool m_Grayscale;
public bool grayscale;
public bool grayscale
{
get { return m_Grayscale; }
internal set { m_Grayscale = value; }
}
[HideInInspector, SerializeField, FormerlySerializedAs("compression")]
SensorCompressionType m_Compression = SensorCompressionType.PNG;
public SensorCompressionType compression = SensorCompressionType.PNG;
public SensorCompressionType compression
{
get { return m_Compression; }
set { m_Compression = value; UpdateSensor(); }
}
return new RenderTextureSensor(renderTexture, grayscale, sensorName, compression);
m_Sensor = new RenderTextureSensor(renderTexture, grayscale, sensorName, compression);
return m_Sensor;
}
/// <inheritdoc/>

var height = renderTexture != null ? renderTexture.height : 0;
return new[] { height, width, grayscale ? 1 : 3 };
}
/// <summary>
/// Update fields that are safe to change on the Sensor at runtime.
/// </summary>
internal void UpdateSensor()
{
if (m_Sensor != null)
{
m_Sensor.compressionType = m_Compression;
}
}
}
}

37
com.unity.ml-agents/Runtime/SideChannels/FloatPropertiesChannel.cs


namespace MLAgents.SideChannels
{
/// <summary>
/// Interface for managing a collection of float properties keyed by a string variable.
/// </summary>
public interface IFloatProperties
{
/// <summary>
/// Sets one of the float properties of the environment. This data will be sent to Python.
/// </summary>
/// <param name="key"> The string identifier of the property.</param>
/// <param name="value"> The float value of the property.</param>
void SetProperty(string key, float value);
/// <summary>
/// Get an Environment property with a default value. If there is a value for this property,
/// it will be returned, otherwise, the default value will be returned.
/// </summary>
/// <param name="key"> The string identifier of the property.</param>
/// <param name="defaultValue"> The default value of the property.</param>
/// <returns></returns>
float GetPropertyWithDefault(string key, float defaultValue);
/// <summary>
/// Registers an action to be performed everytime the property is changed.
/// </summary>
/// <param name="key"> The string identifier of the property.</param>
/// <param name="action"> The action that ill be performed. Takes a float as input.</param>
void RegisterCallback(string key, Action<float> action);
/// <summary>
/// Returns a list of all the string identifiers of the properties currently present.
/// </summary>
/// <returns> The list of string identifiers </returns>
IList<string> ListProperties();
}
/// <summary>
public class FloatPropertiesChannel : SideChannel, IFloatProperties
public class FloatPropertiesChannel : SideChannel
{
Dictionary<string, float> m_FloatProperties = new Dictionary<string, float>();
Dictionary<string, Action<float>> m_RegisteredActions = new Dictionary<string, Action<float>>();

2
config/sac_trainer_config.yaml


summary_freq: 20000
PushBlock:
max_steps: 1.5e7
max_steps: 2e6
init_entcoef: 0.05
hidden_units: 256
summary_freq: 60000

4
config/trainer_config.yaml


Bouncer:
normalize: true
max_steps: 7.0e6
max_steps: 4.0e6
max_steps: 1.5e7
max_steps: 2.0e6
batch_size: 128
buffer_size: 2048
beta: 1.0e-2

2
docs/Getting-Started-with-Balance-Ball.md


negative reward for dropping the ball. An Agent is also marked as done when it
drops the ball so that it will reset with a new ball for the next simulation
step.
* agent.Heuristic() - When the `Use Heuristic` checkbox is checked in the Behavior
* agent.Heuristic() - When the `Behavior Type` is set to `Heuristic Only` in the Behavior
Parameters of the Agent, the Agent will use the `Heuristic()` method to generate
the actions of the Agent. As such, the `Heuristic()` method returns an array of
floats. In the case of the Ball 3D Agent, the `Heuristic()` method converts the

160
docs/Installation.md


# Installation
To install and use ML-Agents, you need to install Unity, clone this repository and
install Python with additional dependencies. Each of the subsections below
overviews each step, in addition to a Docker set-up.
The ML-Agents Toolkit contains several components:
* Unity package ([`com.unity.ml-agents`](../com.unity.ml-agents/)) contains the Unity C#
SDK that will be integrated into your Unity scene.
* Three Python packages:
* [`mlagents`](../ml-agents/) contains the machine learning algorithms that enables you
to train behaviors in your Unity scene. Most users of ML-Agents will only need to
directly install `mlagents`.
* [`mlagents_envs`](../ml-agents-envs/) contains a Python API to interact with a Unity
scene. It is a foundational layer that facilitates data messaging between Unity scene
and the Python machine learning algorithms. Consequently, `mlagents` depends on `mlagents_envs`.
* [`gym_unity`](../gym-unity/) provides a Python-wrapper for your Unity scene that
supports the OpenAI Gym interface.
* Unity [Project](../Project/) that contains several
[example environments](Learning-Environment-Examples.md) that highlight the various features
of the toolkit to help you get started.
## Install **Unity 2018.4** or Later
Consequently, to install and use ML-Agents you will need to:
* Install Unity (2018.4 or later)
* Install Python (3.6.1 or higher)
* Clone this repository
* Install the `com.unity.ml-agents` Unity package
* Install the `mlagents` Python package
[Download](https://store.unity.com/download) and install Unity. If you would
like to use our Docker set-up (introduced later), make sure to select the _Linux
Build Support_ component when installing Unity.
### Install **Unity 2018.4** or Later
<p align="center">
<img src="images/unity_linux_build_support.png"
alt="Linux Build Support"
width="500" border="10" />
</p>
[Download](https://unity3d.com/get-unity/download) and install Unity. We strongly recommend
that you install Unity through the Unity Hub as it will enable you to manage multiple Unity
versions.
## Environment Setup
We now support a single mechanism for installing ML-Agents on Mac/Windows/Linux using Virtual
Environments. For more information on Virtual Environments and installation instructions,
follow this [guide](Using-Virtual-Environment.md).
### Install **Python 3.6.1** or Higher
Although we don't support Anaconda installation path of ML-Agents for Windows, the previous guide
is still in the docs folder. Please refer to [Windows Installation (Deprecated)](Installation-Windows.md).
We recommend [installing](https://www.python.org/downloads/) Python 3.6 or 3.7. If your Python
environment doesn't include `pip3`, see these
[instructions](https://packaging.python.org/guides/installing-using-linux-tools/#installing-pip-setuptools-wheel-with-linux-package-managers)
on installing it.
Although we do not provide support for Anaconda installation on Windows, the previous
[Windows Anaconda Installation (Deprecated) guide](Installation-Anaconda-Windows.md)
is still available.
Once installed, you will want to clone the ML-Agents Toolkit GitHub repository.
Now that you have installed Unity and Python, you will need to clone the
ML-Agents Toolkit GitHub repository to install the Unity package (the Python
packages can be installed directly from PyPi - a Python package registry).
```sh
git clone --branch latest_release https://github.com/Unity-Technologies/ml-agents.git

The `com.unity.ml-agents` subdirectory contains the core code to add to your projects.
The `Project` subdirectory contains many [example environments](Learning-Environment-Examples.md)
to help you get started.
### Package Installation
ML-Agents C# SDK is transitioning to a Unity Package. While we are working on getting into the
official packages list, you can add the `com.unity.ml-agents` package to your project by
navigating to the menu `Window` -> `Package Manager`. In the package manager window click
on the `+` button.
<p align="center">
<img src="images/unity_package_manager_window.png"
alt="Linux Build Support"
width="500" border="10" />
</p>
### Install the `com.unity.ml-agents` Unity package
**NOTE:** In Unity 2018.4 it's on the bottom right of the packages list, and in Unity 2019.3 it's on the top left of the packages list.
Select `Add package from disk...` and navigate into the
The Unity ML-Agents C# SDK is a Unity Package. We are working on getting it added to the
official Unity package registry which will enable you to install the `com.unity.ml-agents` package
[directly from the registry](https://docs.unity3d.com/Manual/upm-ui-install.html) without cloning
this repository. Until then, you will need to
[install it from the local package](https://docs.unity3d.com/Manual/upm-ui-local.html) that you
just cloned. You can add the `com.unity.ml-agents` package to
your project by navigating to the menu `Window` -> `Package Manager`. In the package manager
window click on the `+` button. Select `Add package from disk...` and navigate into the
**NOTE:** In Unity 2018.4 it's on the bottom right of the packages list, and in Unity 2019.3 it's
on the top left of the packages list.
<img src="images/unity_package_manager_window.png"
alt="Unity Package Manager Window"
height="340" border="10" />
alt="Linux Build Support"
width="500" border="10" />
alt="package.json"
height="340" border="10" />
The `ml-agents` subdirectory contains a Python package which provides deep reinforcement
learning trainers to use with Unity environments.
The `ml-agents-envs` subdirectory contains a Python API to interface with Unity, which
the `ml-agents` package depends on.
The `gym-unity` subdirectory contains a package to interface with OpenAI Gym.
### Install Python and mlagents Package
In order to use ML-Agents toolkit, you need Python 3.6.1 or higher.
[Download](https://www.python.org/downloads/) and install the latest version of Python if you do not already have it.
### Install the `mlagents` Python package
If your Python environment doesn't include `pip3`, see these
[instructions](https://packaging.python.org/guides/installing-using-linux-tools/#installing-pip-setuptools-wheel-with-linux-package-managers)
on installing it.
Installing the `mlagents` Python package involves installing other Python packages
that `mlagents` depends on. So you may run into installation issues if your machine
has older versions of any of those dependencies already installed. Consequently, our
supported path for installing `mlagents` is to leverage Python Virtual Environments.
Virtual Environments provide a mechanim for isolating the dependencies for each project
and are supported on Mac / Windows / Linux. We offer a dedicated
[guide on Virtual Environments](Using-Virtual-Environment.md).
To install the `mlagents` Python package, run from the command line:
To install the `mlagents` Python package, activate your virtual environment and run from the
command line:
Note that this will install `ml-agents` from PyPi, _not_ from the cloned repo.
Note that this will install `mlagents` from PyPi, _not_ from the cloned repo.
By installing the `mlagents` package, the dependencies listed in the [setup.py file](../ml-agents/setup.py) are also installed.
Some of the primary dependencies include:
- [TensorFlow](Background-TensorFlow.md) (Requires a CPU w/ AVX support)
- [Jupyter](Background-Jupyter.md)
**Notes:**
- We do not currently support Python 3.5 or lower.
- If you are using Anaconda and are having trouble with TensorFlow, please see
the following
[link](https://www.tensorflow.org/install/pip)
on how to install TensorFlow in an Anaconda environment.
By installing the `mlagents` package, the dependencies listed in the
[setup.py file](../ml-agents/setup.py) are also installed. These include
[TensorFlow](Background-TensorFlow.md) (Requires a CPU w/ AVX support) and
[Jupyter](Background-Jupyter.md).
### Installing for Development
#### Advanced: Installing for Development
If you intend to make modifications to `ml-agents` or `ml-agents-envs`, you should install
If you intend to make modifications to `mlagents` or `mlagents_envs`, you should install
`ml-agents` and `ml-agents-envs` separately. From the repo's root directory, run:
`mlagents` and `mlagents_envs` separately. From the repo's root directory, run:
cd ml-agents-envs
pip3 install -e ./
cd ..
cd ml-agents
pip3 install -e ./
pip3 install -e ./ml-agents-envs
pip3 install -e ./ml-agents
Running pip with the `-e` flag will let you make changes to the Python files directly and have those
reflected when you run `mlagents-learn`. It is important to install these packages in this order as the
`mlagents` package depends on `mlagents_envs`, and installing it in the other
Running pip with the `-e` flag will let you make changes to the Python files directly and have
those reflected when you run `mlagents-learn`. It is important to install these packages in this
order as the `mlagents` package depends on `mlagents_envs`, and installing it in the other
setting up the ML-Agents toolkit within Unity, running a pre-trained model, in
setting up the ML-Agents Toolkit within Unity, running a pre-trained model, in
addition to building and training environments.
## Help

5
docs/Learning-Environment-Best-Practices.md


lessons which progressively increase in difficulty are presented to the agent
([learn more here](Training-Curriculum-Learning.md)).
* When possible, it is often helpful to ensure that you can complete the task by
using a heuristic to control the agent. To do so, check the `Use Heuristic`
checkbox on the Agent and implement the `Heuristic()` method on the Agent.
using a heuristic to control the agent. To do so, set the `Behavior Type`
to `Heuristic Only` on the Agent's Behavior Parameters, and implement the
`Heuristic()` method on the Agent.
* It is often helpful to make many copies of the agent, and give them the same
`Behavior Name`. In this way the learning process can get more feedback
information from all of these agents, which helps it train faster.

4
docs/Learning-Environment-Create-New.md


to the values of the "Horizontal" and "Vertical" input axis (which correspond to
the keyboard arrow keys).
In order for the Agent to use the Heuristic, You will need to check the `Use Heuristic`
checkbox in the `Behavior Parameters` of the RollerAgent.
In order for the Agent to use the Heuristic, You will need to set the `Behavior Type`
to `Heuristic Only` in the `Behavior Parameters` of the RollerAgent.
Press **Play** to run the scene and use the arrows keys to move the Agent around

9
docs/Learning-Environment-Design-Agents.md


The Policy class abstracts out the decision making logic from the Agent itself so
that you can use the same Policy in multiple Agents. How a Policy makes its
decisions depends on the kind of Policy it is. You can change the Policy of an
Agent by changing its `Behavior Parameters`. If you check `Use Heuristic`, the
Agent will use its `Heuristic()` method to make decisions which can allow you to
control the Agent manually or write your own Policy. If the Agent has a `Model`
file, it Policy will use the neural network `Model` to take decisions.
Agent by changing its `Behavior Parameters`. If you set `Behavior Type` to
`Heuristic Only`, the Agent will use its `Heuristic()` method to make decisions
which can allow you to control the Agent manually or write your own Policy. If
the Agent has a `Model` file, it Policy will use the neural network `Model` to
take decisions.
## Decisions

4
docs/Learning-Environment-Examples.md


* Goal: The agents must hit the ball so that the opponent cannot hit a valid
return.
* Agents: The environment contains two agent with same Behavior Parameters.
After training you can check the `Use Heuristic` checkbox on one of the Agents
to play against your trained model.
After training you can set the `Behavior Type` to `Heuristic Only` on one of the Agent's
Behavior Parameters to play against your trained model.
* Agent Reward Function (independent):
* +1.0 To the agent that wins the point. An agent wins a point by preventing
the opponent from hitting a valid return.

35
docs/Limitations.md


# Limitations
## Unity SDK
### Headless Mode
If you enable Headless mode, you will not be able to collect visual observations
from your agents.
### Rendering Speed and Synchronization
Currently the speed of the game physics can only be increased to 100x real-time.
The Academy also moves in time with FixedUpdate() rather than Update(), so game
behavior implemented in Update() may be out of sync with the agent decision
making. See
[Execution Order of Event Functions](https://docs.unity3d.com/Manual/ExecutionOrder.html)
for more information.
You can control the frequency of Academy stepping by calling
`Academy.Instance.DisableAutomaticStepping()`, and then calling
`Academy.Instance.EnvironmentStep()`
### Unity Inference Engine Models
Currently, only models created with our trainers are supported for running
ML-Agents with a neural network behavior.
## Python API
### Python version
As of version 0.3, we no longer support Python 2.
See the package-specific Limitations pages:
* [Unity `com.unity.mlagents` package](../com.unity.ml-agents/Documentation~/com.unity.ml-agents.md)
* [`mlagents` Python package](../ml-agents/README.md)
* [`mlagents_envs` Python package](../ml-agents-envs/README.md)
* [`gym_unity` Python package](../gym-unity/README.md)

4
docs/Migrating.md


* `BrainParameters` and `SpaceType` have been removed from the public API
* `BehaviorParameters` have been removed from the public API.
* `DecisionRequester` has been made internal (you can still use the DecisionRequesterComponent from the inspector). `RepeatAction` was renamed `TakeActionsBetweenDecisions` for clarity.
* The `IFloatProperties` interface has been removed.
### Steps to Migrate
* Add the `using MLAgents.Sensors;` in addition to `using MLAgents;` on top of your Agent's script.

iterate through `RayPerceptionOutput.rayOutputs` and call `RayPerceptionOutput.RayOutput.ToFloatArray()`.
* Re-import all of your `*.NN` files to work with the updated Barracuda package.
* Replace all calls to `Agent.GetStepCount()` with `Agent.StepCount`
* Replace all calls to `Agent.GetStepCount()` with `Agent.StepCount`.
* Replace `IFloatProperties` variables with `FloatPropertiesChannel` variables.
## Migrating from 0.13 to 0.14

2
docs/Readme.md


* [Training on the Cloud with Amazon Web Services](Training-on-Amazon-Web-Service.md)
* [Training on the Cloud with Microsoft Azure](Training-on-Microsoft-Azure.md)
* [Using Docker](Using-Docker.md)
* [Installation-Windows](Installation-Windows.md)
* [Windows Anaconda Installation](Installation-Anaconda-Windows.md)

9
docs/Using-Docker.md


## Requirements
- Unity _Linux Build Support_ Component
- Unity _Linux Build Support_ Component. Make sure to select the _Linux
Build Support_ component when installing Unity.
<p align="center">
<img src="images/unity_linux_build_support.png"
alt="Linux Build Support"
width="500" border="10" />
</p>
## Setup

12
docs/Using-Virtual-Environment.md


spinning up a new environment and verifying the compatibility of the code with the
different version.
Requirement - Python 3.6 must be installed on the machine you would like
to run ML-Agents on (either local laptop/desktop or remote server). Python 3.6 can be
installed from [here](https://www.python.org/downloads/).
## Python Version Requirement (Required)
This guide has been tested with Python 3.6 and 3.7. Python 3.8 is not supported at this time.

1. To activate the environment execute `$ source ~/python-envs/sample-env/bin/activate`
1. Verify pip version is the same as in the __Installing Pip__ section. In case it is not the latest, upgrade to
the latest pip version using `$ pip3 install --upgrade pip`
1. Install ML-Agents package using `$ pip3 install mlagents`
1. To deactivate the environment execute `$ deactivate`
1. To deactivate the environment execute `$ deactivate` (you can reactivate the environment
using the same `activate` command listed above)
## Ubuntu Setup

1. To activate the environment execute `python-envs\sample-env\Scripts\activate`
1. Verify pip version is the same as in the __Installing Pip__ section. In case it is not the
latest, upgrade to the latest pip version using `pip install --upgrade pip`
1. Install ML-Agents package using `pip install mlagents`
1. To deactivate the environment execute `deactivate`
1. To deactivate the environment execute `deactivate` (you can reactivate the environment
using the same `activate` command listed above)
Note:
- Verify that you are using Python 3.6 or Python 3.7. Launch a command prompt using `cmd` and

951
docs/images/unity_package_manager_window.png

之前 之后
宽度: 1002  |  高度: 1150  |  大小: 266 KiB

2
docs/localized/KR/docs/Installation.md


</p>
## Windows 사용자
Windows에서 환경을 설정하기 위해, [세부 사항](Installation-Windows.md)에 설정 방법에 대해 작성하였습니다.
Windows에서 환경을 설정하기 위해, [세부 사항](Installation-Anaconda-Windows.md)에 설정 방법에 대해 작성하였습니다.
Mac과 Linux는 다음 가이드를 확인해주십시오.
## Mac 또는 Unix 사용자

2
docs/localized/zh-CN/docs/Installation.md


### Windows 用户
如果您是刚接触 Python 和 TensorFlow 的 Windows 用户,请遵循[此指南](/docs/Installation-Windows.md)来设置 Python 环境。
如果您是刚接触 Python 和 TensorFlow 的 Windows 用户,请遵循[此指南](/docs/Installation-Anaconda-Windows.md)来设置 Python 环境。
### Mac 和 Unix 用户

4
ml-agents-envs/mlagents_envs/communicator.py


import logging
logger = logging.getLogger("mlagents_envs")
class Communicator(object):

2
ml-agents-envs/mlagents_envs/environment.py


import signal
import struct
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("mlagents_envs")

5
ml-agents-envs/mlagents_envs/exception.py


import logging
logger = logging.getLogger("mlagents_envs")
class UnityException(Exception):
"""
Any error related to ml-agents environment.

3
ml-agents-envs/mlagents_envs/rpc_communicator.py


import logging
import grpc
from typing import Optional

from mlagents_envs.communicator_objects.unity_input_pb2 import UnityInputProto
from mlagents_envs.communicator_objects.unity_output_pb2 import UnityOutputProto
from .exception import UnityTimeOutException, UnityWorkerInUseException
logger = logging.getLogger("mlagents_envs")
class UnityToExternalServicerImplementation(UnityToExternalProtoServicer):

3
ml-agents-envs/mlagents_envs/rpc_utils.py


NONE as COMPRESSION_NONE,
)
from mlagents_envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto
import logging
logger = logging.getLogger("mlagents_envs")
def agent_group_spec_from_proto(

4
ml-agents/mlagents/trainers/brain.py


import logging
logger = logging.getLogger("mlagents.trainers")
class CameraResolution(NamedTuple):

3
ml-agents/mlagents/trainers/components/reward_signals/gail/signal.py


from typing import Any, Dict, List
import logging
import numpy as np
from mlagents.tf_utils import tf

from mlagents.trainers.demo_loader import demo_to_buffer
LOGGER = logging.getLogger("mlagents.trainers")
class GAILRewardSignal(RewardSignal):

4
ml-agents/mlagents/trainers/components/reward_signals/reward_signal_factory.py


import logging
from mlagents.trainers.exception import UnityTrainerException
from mlagents.trainers.components.reward_signals import RewardSignal
from mlagents.trainers.components.reward_signals.extrinsic.signal import (

CuriosityRewardSignal,
)
from mlagents.trainers.policy.tf_policy import TFPolicy
logger = logging.getLogger("mlagents.trainers")
NAME_TO_CLASS: Dict[str, Type[RewardSignal]] = {

2
ml-agents/mlagents/trainers/demo_loader.py


import logging
import os
import gzip
import struct

def __next__(self):
return next(self._gen)
@timed
def make_demo_buffer(

7
ml-agents/mlagents/trainers/ghost/trainer.py


# # Unity ML-Agents Toolkit
# ## ML-Agent Learning (Ghost Trainer)
# import logging
from typing import Deque, Dict, List, Any, cast
import numpy as np

from mlagents.trainers.trajectory import Trajectory
from mlagents.trainers.agent_processor import AgentManagerQueue
LOGGER = logging.getLogger("mlagents.trainers")
logger = logging.getLogger("mlagents.trainers")
class GhostTrainer(Trainer):

Saves training statistics to Tensorboard.
"""
opponents = np.array(self.policy_elos, dtype=np.float32)
LOGGER.info(
logger.info(
" Learning brain {} ELO: {:0.3f}\n"
"Mean Opponent ELO: {:0.3f}"
" Std Opponent ELO: {:0.3f}".format(

x = "current"
self.policy_elos[-1] = self.current_elo
self.current_opponent = -1 if x == "current" else x
LOGGER.debug(
logger.debug(
"Step {}: Swapping snapshot {} to id {} with {} learning".format(
self.get_step, x, name_behavior_id, self.learning_behavior_name
)

13
ml-agents/mlagents/trainers/learn.py


from mlagents_envs.side_channel.side_channel import SideChannel
from mlagents_envs.side_channel.engine_configuration_channel import EngineConfig
from mlagents_envs.exception import UnityEnvironmentException
from mlagents.logging_util import create_logger
def _create_parser():

except Exception:
print("\n\n\tUnity Technologies\n")
print(get_version_string())
trainer_logger = logging.getLogger("mlagents.trainers")
env_logger = logging.getLogger("mlagents_envs")
trainer_logger.setLevel("DEBUG")
env_logger.setLevel("DEBUG")
log_level = logging.DEBUG
trainer_logger.setLevel("INFO")
env_logger.setLevel("INFO")
# disable noisy warnings from tensorflow.
log_level = logging.INFO
# disable noisy warnings from tensorflow
trainer_logger = create_logger("mlagents.trainers", log_level)
trainer_logger.debug("Configuration for this run:")
trainer_logger.debug(json.dumps(options._asdict(), indent=4))

3
ml-agents/mlagents/trainers/models.py


import logging
from enum import Enum
from typing import Callable, Dict, List, Tuple, NamedTuple

from mlagents.trainers.exception import UnityTrainerException
from mlagents.trainers.brain import CameraResolution
logger = logging.getLogger("mlagents.trainers")
ActivationFunction = Callable[[tf.Tensor], tf.Tensor]
EncoderFunction = Callable[

5
ml-agents/mlagents/trainers/policy/nn_policy.py


import logging
from mlagents_envs.timers import timed
from mlagents_envs.base_env import BatchedStepResult
from mlagents.trainers.brain import BrainParameters

GaussianDistribution,
MultiCategoricalDistribution,
)
logger = logging.getLogger("mlagents.trainers")
EPSILON = 1e-6 # Small value to avoid divide by zero

3
ml-agents/mlagents/trainers/policy/tf_policy.py


import logging
from typing import Any, Dict, List, Optional
from mlagents_envs.exception import UnityException
from mlagents.trainers.policy import Policy
from mlagents.trainers.action_info import ActionInfo

5
ml-agents/mlagents/trainers/ppo/optimizer.py


import logging
import numpy as np
from mlagents.tf_utils import tf
from mlagents_envs.timers import timed

from mlagents.trainers.buffer import AgentBuffer
logger = logging.getLogger("mlagents.trainers")
class PPOOptimizer(TFOptimizer):

3
ml-agents/mlagents/trainers/ppo/trainer.py


"""
if self.policy:
logger.warning(
"add_policy has been called twice. {} is not a multi-agent trainer".format(
"Your environment contains multiple teams, but {} doesn't support adversarial games. Enable self-play to \
train adversarial games.".format(
self.__class__.__name__
)
)

6
ml-agents/mlagents/trainers/sac/network.py


import logging
from mlagents.trainers.models import ModelUtils, EncoderType
LOG_STD_MAX = 2

CONTINUOUS_TARGET_ENTROPY_SCALE = 1.0 # TODO: Make these an optional hyperparam.
LOGGER = logging.getLogger("mlagents.trainers")
POLICY_SCOPE = ""
TARGET_SCOPE = "target_network"

14
ml-agents/mlagents/trainers/sac/optimizer.py


EPSILON = 1e-6 # Small value to avoid divide by zero
LOGGER = logging.getLogger("mlagents.trainers")
logger = logging.getLogger("mlagents.trainers")
POLICY_SCOPE = ""
TARGET_SCOPE = "target_network"

self.target_network.value_vars, self.policy_network.value_vars
)
]
LOGGER.debug("value_vars")
logger.debug("value_vars")
LOGGER.debug("targvalue_vars")
logger.debug("targvalue_vars")
LOGGER.debug("critic_vars")
logger.debug("critic_vars")
LOGGER.debug("q_vars")
logger.debug("q_vars")
LOGGER.debug("policy_vars")
logger.debug("policy_vars")
policy_vars = self.policy.get_trainable_variables()
self.print_all_vars(policy_vars)

def print_all_vars(self, variables):
for _var in variables:
LOGGER.debug(_var)
logger.debug(_var)
@timed
def update(self, batch: AgentBuffer, num_sequences: int) -> Dict[str, float]:

3
ml-agents/mlagents/trainers/sac/trainer.py


"""
if self.policy:
logger.warning(
"add_policy has been called twice. {} is not a multi-agent trainer".format(
"Your environment contains multiple teams, but {} doesn't support adversarial games. Enable self-play to \
train adversarial games.".format(
self.__class__.__name__
)
)

3
ml-agents/mlagents/trainers/trainer/rl_trainer.py


# # Unity ML-Agents Toolkit
import logging
from typing import Dict
from collections import defaultdict

from mlagents.trainers.exception import UnityTrainerException
from mlagents.trainers.components.reward_signals import RewardSignalResult
LOGGER = logging.getLogger("mlagents.trainers")
RewardSignalResults = Dict[str, RewardSignalResult]

10
ml-agents/mlagents/trainers/trainer/trainer.py


from mlagents.trainers.exception import UnityTrainerException
from mlagents_envs.timers import hierarchical_timer
LOGGER = logging.getLogger("mlagents.trainers")
logger = logging.getLogger("mlagents.trainers")
class Trainer(abc.ABC):

s = sess.run(s_op)
self.stats_reporter.write_text(s, self.get_step)
except Exception:
LOGGER.info("Could not write text summary for Tensorboard.")
logger.info("Could not write text summary for Tensorboard.")
pass
def _dict_to_str(self, param_dict: Dict[str, Any], num_tabs: int) -> str:

"Environment/Cumulative Reward"
)
if stats_summary.num > 0:
LOGGER.info(
" {}: {}: Step: {}. "
logger.info(
"{}: {}: Step: {}. "
"Time Elapsed: {:0.3f} s "
"Mean "
"Reward: {:0.3f}"

)
set_gauge(f"{self.brain_name}.mean_reward", stats_summary.mean)
else:
LOGGER.info(
logger.info(
" {}: {}: Step: {}. No episode was completed since last summary. {}".format(
self.run_id, self.brain_name, step, is_training
)

48
com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs


using UnityEngine;
using UnityEditor;
using MLAgents.Sensors;
namespace MLAgents.Editor
{
[CustomEditor(typeof(CameraSensorComponent))]
[CanEditMultipleObjects]
internal class CameraSensorComponentEditor : UnityEditor.Editor
{
public override void OnInspectorGUI()
{
var so = serializedObject;
so.Update();
// Drawing the CameraSensorComponent
EditorGUI.BeginChangeCheck();
EditorGUILayout.PropertyField(so.FindProperty("m_Camera"), true);
EditorGUI.BeginDisabledGroup(Application.isPlaying);
{
// These fields affect the sensor order or observation size,
// So can't be changed at runtime.
EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
EditorGUILayout.PropertyField(so.FindProperty("m_Width"), true);
EditorGUILayout.PropertyField(so.FindProperty("m_Height"), true);
EditorGUILayout.PropertyField(so.FindProperty("m_Grayscale"), true);
}
EditorGUI.EndDisabledGroup();
EditorGUILayout.PropertyField(so.FindProperty("m_Compression"), true);
var requireSensorUpdate = EditorGUI.EndChangeCheck();
so.ApplyModifiedProperties();
if (requireSensorUpdate)
{
UpdateSensor();
}
}
void UpdateSensor()
{
var sensorComponent = serializedObject.targetObject as CameraSensorComponent;
sensorComponent?.UpdateSensor();
}
}
}

11
com.unity.ml-agents/Editor/CameraSensorComponentEditor.cs.meta


fileFormatVersion: 2
guid: fdda773c024894cf0ae47d1b1396c38d
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

43
com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs


using UnityEngine;
using UnityEditor;
using MLAgents.Sensors;
namespace MLAgents.Editor
{
[CustomEditor(typeof(RenderTextureSensorComponent))]
[CanEditMultipleObjects]
internal class RenderTextureSensorComponentEditor : UnityEditor.Editor
{
public override void OnInspectorGUI()
{
var so = serializedObject;
so.Update();
// Drawing the RenderTextureComponent
EditorGUI.BeginChangeCheck();
EditorGUI.BeginDisabledGroup(Application.isPlaying);
{
EditorGUILayout.PropertyField(so.FindProperty("m_RenderTexture"), true);
EditorGUILayout.PropertyField(so.FindProperty("m_SensorName"), true);
EditorGUILayout.PropertyField(so.FindProperty("m_Grayscale"), true);
}
EditorGUI.EndDisabledGroup();
EditorGUILayout.PropertyField(so.FindProperty("m_Compression"), true);
var requireSensorUpdate = EditorGUI.EndChangeCheck();
so.ApplyModifiedProperties();
if (requireSensorUpdate)
{
UpdateSensor();
}
}
void UpdateSensor()
{
var sensorComponent = serializedObject.targetObject as RenderTextureSensorComponent;
sensorComponent?.UpdateSensor();
}
}
}

11
com.unity.ml-agents/Editor/RenderTextureSensorComponentEditor.cs.meta


fileFormatVersion: 2
guid: dab309e01d2964f0792de3ef914ca6b9
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

34
com.unity.ml-agents/Tests/Editor/Sensor/RenderTextureSenorTests.cs


using System;
using NUnit.Framework;
using UnityEngine;
using MLAgents.Sensors;
namespace MLAgents.Tests
{
[TestFixture]
public class RenderTextureSensorTest
{
[Test]
public void TestRenderTextureSensor()
{
foreach (var grayscale in new[] { true, false })
{
foreach (SensorCompressionType compression in Enum.GetValues(typeof(SensorCompressionType)))
{
var width = 24;
var height = 16;
var texture = new RenderTexture(width, height, 0);
var sensor = new RenderTextureSensor(texture, grayscale, "TestCameraSensor", compression);
var writeAdapter = new WriteAdapter();
var obs = sensor.GetObservationProto(writeAdapter);
Assert.AreEqual((int) compression, (int) obs.CompressionType);
var expectedShape = new[] { height, width, grayscale ? 1 : 3 };
Assert.AreEqual(expectedShape, obs.Shape);
}
}
}
}
}

11
com.unity.ml-agents/Tests/Editor/Sensor/RenderTextureSenorTests.cs.meta


fileFormatVersion: 2
guid: be9f7d8ce17d8407e92d46fbee2ab809
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

42
com.unity.ml-agents/Tests/Editor/Sensor/RenderTextureSensorComponentTests.cs


using System;
using NUnit.Framework;
using UnityEngine;
using MLAgents.Sensors;
namespace MLAgents.Tests
{
[TestFixture]
public class RenderTextureSensorComponentTest
{
[Test]
public void TestRenderTextureSensorComponent()
{
foreach (var grayscale in new[] { true, false })
{
foreach (SensorCompressionType compression in Enum.GetValues(typeof(SensorCompressionType)))
{
var width = 24;
var height = 16;
var texture = new RenderTexture(width, height, 0);
var agentGameObj = new GameObject("agent");
var renderTexComponent = agentGameObj.AddComponent<RenderTextureSensorComponent>();
renderTexComponent.renderTexture = texture;
renderTexComponent.grayscale = grayscale;
renderTexComponent.compression = compression;
var expectedShape = new[] { height, width, grayscale ? 1 : 3 };
Assert.AreEqual(expectedShape, renderTexComponent.GetObservationShape());
Assert.IsTrue(renderTexComponent.IsVisual());
Assert.IsFalse(renderTexComponent.IsVector());
var sensor = renderTexComponent.CreateSensor();
Assert.AreEqual(expectedShape, sensor.GetObservationShape());
Assert.AreEqual(typeof(RenderTextureSensor), sensor.GetType());
}
}
}
}
}

11
com.unity.ml-agents/Tests/Editor/Sensor/RenderTextureSensorComponentTests.cs.meta


fileFormatVersion: 2
guid: 6be53c3cd01244f179a58c96560c54cf
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

10
ml-agents/mlagents/logging_util.py


import logging
def create_logger(name, log_level):
date_format = "%Y-%m-%d %H:%M:%S"
log_format = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
logging.basicConfig(level=log_level, format=log_format, datefmt=date_format)
logger = logging.getLogger(name=name)
return logger

3
com.unity.ml-agents/Documentation~/TableOfContents.md


* [ML-Agents README](https://github.com/Unity-Technologies/ml-agents/blob/master/README.md)
* [Contributing](../CONTRIBUTING.md)
* [Code of Conduct](https://github.com/Unity-Technologies/ml-agents/blob/master/CODE_OF_CONDUCT.md)

7
com.unity.ml-agents/README.md.meta


fileFormatVersion: 2
guid: 19a5b5ea41db446cb8e00644c44e308d
TextScriptImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

5
com.unity.ml-agents/README.md


# Unity ML-Agents SDK
Contains the ML-Agents Unity Project, including
both the core plugin (in `Scripts`), as well as a set
of example environments (in `Examples`).

/com.unity.ml-agents/Documentation~/index.md → /com.unity.ml-agents/Documentation~/com.unity.ml-agents.md

/docs/localized/KR/docs/Installation-Windows.md → /docs/localized/KR/docs/Installation-Anaconda-Windows.md

/docs/Installation-Windows.md → /docs/Installation-Anaconda-Windows.md

正在加载...
取消
保存