浏览代码

Revert "[add-fire] Merge post-0.19.0 master into add-fire (#4328)" (#4330)

This reverts commit 9913e71b6f35f1e11027a4a571a65533caf285ac.
/develop/add-fire
GitHub 4 年前
当前提交
9d2e4268
共有 65 个文件被更改,包括 1338 次插入1967 次删除
  1. 7
      .pre-commit-config.yaml
  2. 2
      .yamato/com.unity.ml-agents-performance.yml
  3. 2
      .yamato/com.unity.ml-agents-test.yml
  4. 14
      .yamato/gym-interface-test.yml
  5. 5
      .yamato/protobuf-generation-test.yml
  6. 14
      .yamato/python-ll-api-test.yml
  7. 2
      .yamato/standalone-build-test.yml
  8. 2
      .yamato/training-int-tests.yml
  9. 35
      Project/Assets/ML-Agents/Examples/Crawler/Prefabs/Crawler.prefab
  10. 5
      Project/Assets/ML-Agents/Examples/Crawler/Prefabs/FixedPlatform.prefab
  11. 12
      Project/Assets/ML-Agents/Examples/Crawler/Scripts/CrawlerAgent.cs
  12. 1001
      Project/Assets/ML-Agents/Examples/Crawler/TFModels/CrawlerDynamic.nn
  13. 1001
      Project/Assets/ML-Agents/Examples/Crawler/TFModels/CrawlerStatic.nn
  14. 37
      Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/PushBlockVisualArea.prefab
  15. 2
      Project/ProjectSettings/ProjectVersion.txt
  16. 10
      README.md
  17. 2
      com.unity.ml-agents.extensions/README.md
  18. 6
      com.unity.ml-agents.extensions/Runtime/Sensors/ArticulationBodyPoseExtractor.cs
  19. 14
      com.unity.ml-agents.extensions/Runtime/Sensors/PhysicsBodySensor.cs
  20. 31
      com.unity.ml-agents.extensions/Runtime/Sensors/PhysicsSensorSettings.cs
  21. 211
      com.unity.ml-agents.extensions/Runtime/Sensors/PoseExtractor.cs
  22. 77
      com.unity.ml-agents.extensions/Runtime/Sensors/RigidBodyPoseExtractor.cs
  23. 9
      com.unity.ml-agents.extensions/Runtime/Sensors/RigidBodySensorComponent.cs
  24. 11
      com.unity.ml-agents.extensions/Tests/Editor/Sensors/ArticulationBodySensorTests.cs
  25. 88
      com.unity.ml-agents.extensions/Tests/Editor/Sensors/PoseExtractorTests.cs
  26. 58
      com.unity.ml-agents.extensions/Tests/Editor/Sensors/RigidBodyPoseExtractorTests.cs
  27. 25
      com.unity.ml-agents.extensions/Tests/Editor/Sensors/RigidBodySensorTests.cs
  28. 2
      com.unity.ml-agents.extensions/package.json
  29. 25
      com.unity.ml-agents/CHANGELOG.md
  30. 2
      com.unity.ml-agents/Documentation~/com.unity.ml-agents.md
  31. 6
      com.unity.ml-agents/Runtime/Academy.cs
  32. 26
      com.unity.ml-agents/Runtime/Agent.cs
  33. 1
      com.unity.ml-agents/Runtime/AssemblyInfo.cs
  34. 2
      com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs
  35. 2
      com.unity.ml-agents/Runtime/DiscreteActionMasker.cs
  36. 2
      com.unity.ml-agents/package.json
  37. 4
      docs/Installation-Anaconda-Windows.md
  38. 6
      docs/Installation.md
  39. 2
      docs/Training-Configuration-File.md
  40. 81
      docs/Training-ML-Agents.md
  41. 2
      docs/Training-on-Amazon-Web-Service.md
  42. 22
      docs/Unity-Inference-Engine.md
  43. 8
      docs/Using-Tensorboard.md
  44. 2
      gym-unity/gym_unity/__init__.py
  45. 2
      ml-agents-envs/mlagents_envs/__init__.py
  46. 2
      ml-agents-envs/setup.py
  47. 18
      ml-agents/mlagents/model_serialization.py
  48. 2
      ml-agents/mlagents/trainers/__init__.py
  49. 2
      ml-agents/mlagents/trainers/environment_parameter_manager.py
  50. 8
      ml-agents/mlagents/trainers/exception.py
  51. 14
      ml-agents/mlagents/trainers/learn.py
  52. 12
      ml-agents/mlagents/trainers/settings.py
  53. 59
      ml-agents/mlagents/trainers/stats.py
  54. 64
      ml-agents/mlagents/trainers/tests/test_env_param_manager.py
  55. 42
      ml-agents/mlagents/trainers/tests/test_stats.py
  56. 8
      ml-agents/mlagents/trainers/trainer/rl_trainer.py
  57. 3
      ml-agents/tests/yamato/yamato_utils.py
  58. 1
      utils/make_readme_table.py
  59. 3
      com.unity.ml-agents.extensions/Runtime/AssemblyInfo.cs
  60. 11
      com.unity.ml-agents.extensions/Runtime/AssemblyInfo.cs.meta
  61. 8
      com.unity.ml-agents/Runtime/Actuators.meta
  62. 8
      com.unity.ml-agents/Tests/Editor/Actuators.meta
  63. 160
      docs/images/TensorBoard-download.png

7
.pre-commit-config.yaml


hooks:
- id: pyupgrade
args: [--py3-plus, --py36-plus]
exclude: >
(?x)^(
.*barracuda.py|
.*_pb2.py|
.*_pb2_grpc.py
)$
exclude: .*barracuda.py
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.5.0

2
.yamato/com.unity.ml-agents-performance.yml


variables:
UNITY_VERSION: {{ editor.version }}
commands:
- python -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
- python -m pip install unity-downloader-cli --extra-index-url https://artifactory.eu-cph-1.unityops.net/api/pypi/common-python/simple
- unity-downloader-cli -u {{ editor.version }} -c editor --wait --fast
- curl -s https://artifactory.internal.unity3d.com/core-automation/tools/utr-standalone/utr --output utr
- chmod +x ./utr

2
.yamato/com.unity.ml-agents-test.yml


image: {{ platform.image }}
flavor: {{ platform.flavor}}
commands:
- python -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple --upgrade
- python -m pip install unity-downloader-cli --extra-index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/unity-pypi-local/simple --upgrade
- unity-downloader-cli -u trunk -c editor --wait --fast
- npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
- upm-ci project test -u {{ editor.version }} --project-path Project --package-filter {{ package.name }} {{ coverageOptions }}

14
.yamato/gym-interface-test.yml


variables:
UNITY_VERSION: {{ editor.version }}
commands:
- pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
- pip install pyyaml
- python -u -m ml-agents.tests.yamato.setup_venv
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_gym.py --env=artifacts/testPlayer-Basic
dependencies:

expression: |
(pull_request.target eq "master" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
pull_request.changes.any match "Project/**" OR
pull_request.changes.any match "ml-agents/**" OR
pull_request.changes.any match "ml-agents-envs/**" OR
pull_request.changes.any match "gym-unity/**" OR
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
pull_request.changes.any match "Project/**" OR
pull_request.changes.any match "ml-agents/**" OR
pull_request.changes.any match "ml-agents-envs/**" OR
pull_request.changes.any match "gym-unity/**" OR
pull_request.changes.any match ".yamato/gym-interface-test.yml") AND
NOT pull_request.changes.all match "**/*.md"
{% endfor %}

5
.yamato/protobuf-generation-test.yml


nuget install Grpc.Tools -Version $GRPC_VERSION -OutputDirectory protobuf-definitions/
python3 -m venv venv
. venv/bin/activate
pip install --upgrade pip --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
pip install grpcio==1.28.1 grpcio-tools==1.13.0 protobuf==3.11.3 six==1.14.0 mypy-protobuf==1.16.0 --progress-bar=off --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
pip install --upgrade pip
pip install grpcio-tools==1.13.0 --progress-bar=off
pip install mypy-protobuf==1.16.0 --progress-bar=off
cd protobuf-definitions
chmod +x Grpc.Tools.$GRPC_VERSION/tools/macosx_x64/protoc
chmod +x Grpc.Tools.$GRPC_VERSION/tools/macosx_x64/grpc_csharp_plugin

14
.yamato/python-ll-api-test.yml


variables:
UNITY_VERSION: {{ editor.version }}
commands:
- pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
- pip install pyyaml
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_llapi.py
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_llapi.py
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_llapi.py --env=artifacts/testPlayer-Basic
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_llapi.py --env=artifacts/testPlayer-WallJump
- ./venv/bin/python ml-agents/tests/yamato/scripts/run_llapi.py --env=artifacts/testPlayer-Bouncer

expression: |
(pull_request.target eq "master" OR
pull_request.target match "release.+") AND
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
pull_request.changes.any match "Project/**" OR
pull_request.changes.any match "ml-agents/**" OR
pull_request.changes.any match "ml-agents-envs/**" OR
NOT pull_request.draft AND
(pull_request.changes.any match "com.unity.ml-agents/**" OR
pull_request.changes.any match "Project/**" OR
pull_request.changes.any match "ml-agents/**" OR
pull_request.changes.any match "ml-agents-envs/**" OR
pull_request.changes.any match ".yamato/python-ll-api-test.yml") AND
NOT pull_request.changes.all match "**/*.md"
{% endfor %}

2
.yamato/standalone-build-test.yml


variables:
UNITY_VERSION: {{ editor.version }}
commands:
- pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
- pip install pyyaml
- python -u -m ml-agents.tests.yamato.standalone_build_tests
- python -u -m ml-agents.tests.yamato.standalone_build_tests --scene=Assets/ML-Agents/Examples/Basic/Scenes/Basic.unity
- python -u -m ml-agents.tests.yamato.standalone_build_tests --scene=Assets/ML-Agents/Examples/Bouncer/Scenes/Bouncer.unity

2
.yamato/training-int-tests.yml


variables:
UNITY_VERSION: {{ editor.version }}
commands:
- pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
- pip install pyyaml
- python -u -m ml-agents.tests.yamato.training_int_tests
# Backwards-compatibility tests.
# If we make a breaking change to the communication protocol, these will need

35
Project/Assets/ML-Agents/Examples/Crawler/Prefabs/Crawler.prefab


- component: {fileID: 4845971001715176662}
- component: {fileID: 4845971001715176663}
- component: {fileID: 4845971001715176660}
- component: {fileID: 4622120667686875944}
m_Layer: 0
m_Name: Crawler
m_TagString: Untagged

m_Name:
m_EditorClassIdentifier:
m_BrainParameters:
VectorObservationSize: 21
VectorObservationSize: 138
NumStackedVectorObservations: 1
VectorActionSize: 14000000
VectorActionDescriptions: []

m_Name:
m_EditorClassIdentifier:
debugCommandLineOverride:
--- !u!114 &4622120667686875944
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 4845971001715176661}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: df0f8be9a37d6486498061e2cbc4cd94, type: 3}
m_Name:
m_EditorClassIdentifier:
RootBody: {fileID: 4845971001588102145}
VirtualRoot: {fileID: 2270141184585723037}
Settings:
UseModelSpaceTranslations: 1
UseModelSpaceRotations: 1
UseLocalSpaceTranslations: 0
UseLocalSpaceRotations: 1
UseModelSpaceLinearVelocity: 1
UseLocalSpaceLinearVelocity: 0
UseJointPositionsAndAngles: 0
UseJointForces: 0
sensorName:
--- !u!1 &4845971001730692034
GameObject:
m_ObjectHideFlags: 0

objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: 72f745913c5a34df5aaadd5c1f0024cb, type: 3}
--- !u!1 &2270141184585723037 stripped
GameObject:
m_CorrespondingSourceObject: {fileID: 2591864627249999519, guid: 72f745913c5a34df5aaadd5c1f0024cb,
type: 3}
m_PrefabInstance: {fileID: 4357529801223143938}
m_PrefabAsset: {fileID: 0}
--- !u!4 &2270141184585723026 stripped
Transform:
m_CorrespondingSourceObject: {fileID: 2591864627249999504, guid: 72f745913c5a34df5aaadd5c1f0024cb,

type: 3}
m_PrefabInstance: {fileID: 4357529801223143938}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 2270141184585723037}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 771e78c5e980e440e8cd19716b55075f, type: 3}

5
Project/Assets/ML-Agents/Examples/Crawler/Prefabs/FixedPlatform.prefab


propertyPath: targetToLookAt
value:
objectReference: {fileID: 2673081981996998229}
- target: {fileID: 4622120667686875944, guid: 0456c89e8c9c243d595b039fe7aa0bf9,
type: 3}
propertyPath: Settings.UseLocalSpaceLinearVelocity
value: 1
objectReference: {fileID: 0}
- target: {fileID: 4845971000000621469, guid: 0456c89e8c9c243d595b039fe7aa0bf9,
type: 3}
propertyPath: m_ConnectedAnchor.x

12
Project/Assets/ML-Agents/Examples/Crawler/Scripts/CrawlerAgent.cs


//GROUND CHECK
sensor.AddObservation(bp.groundContact.touchingGround); // Is this bp touching the ground
//Get velocities in the context of our orientation cube's space
//Note: You can get these velocities in world space as well but it may not train as well.
sensor.AddObservation(orientationCube.transform.InverseTransformDirection(bp.rb.velocity));
sensor.AddObservation(orientationCube.transform.InverseTransformDirection(bp.rb.angularVelocity));
//Get position relative to hips in the context of our orientation cube's space
sensor.AddObservation(orientationCube.transform.InverseTransformDirection(bp.rb.position - body.position));
sensor.AddObservation(bp.rb.transform.localRotation);
sensor.AddObservation(bp.currentStrength / m_JdController.maxJointForceLimit);
}
}

/// </summary>
public override void CollectObservations(VectorSensor sensor)
{
//Add body rotation delta relative to orientation cube
sensor.AddObservation(Quaternion.FromToRotation(body.forward, orientationCube.transform.forward));
//Add pos of target relative to orientation cube
sensor.AddObservation(orientationCube.transform.InverseTransformPoint(target.transform.position));

1001
Project/Assets/ML-Agents/Examples/Crawler/TFModels/CrawlerDynamic.nn
文件差异内容过多而无法显示
查看文件

1001
Project/Assets/ML-Agents/Examples/Crawler/TFModels/CrawlerStatic.nn
文件差异内容过多而无法显示
查看文件

37
Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/PushBlockVisualArea.prefab


m_Name:
m_EditorClassIdentifier:
m_BrainParameters:
VectorObservationSize: 0
NumStackedVectorObservations: 1
VectorActionSize: 07000000
VectorActionDescriptions: []
VectorActionSpaceType: 0
vectorObservationSize: 0
numStackedVectorObservations: 1
vectorActionSize: 07000000
vectorActionDescriptions: []
vectorActionSpaceType: 0
m_BehaviorName: VisualPushBlock
TeamId: 0
m_UseChildSensors: 1
m_ObservableAttributeHandling: 0
m_BehaviorName: VisualHallway
m_TeamID: 0
m_useChildSensors: 1
--- !u!114 &114812843792483960
MonoBehaviour:
m_ObjectHideFlags: 0

m_Script: {fileID: 11500000, guid: dea8c4f2604b947e6b7b97750dde87ca, type: 3}
m_Name:
m_EditorClassIdentifier:
agentParameters:
maxStep: 0
hasUpgradedFromAgentParameters: 1
MaxStep: 5000
maxStep: 5000
ground: {fileID: 1913379827958244}
area: {fileID: 1632733799967290}
areaBounds:

m_Script: {fileID: 11500000, guid: 282f342c2ab144bf38be65d4d0c4e07d, type: 3}
m_Name:
m_EditorClassIdentifier:
m_Camera: {fileID: 20961401228419460}
m_SensorName: CameraSensor
m_Width: 84
m_Height: 84
m_Grayscale: 0
m_Compression: 1
camera: {fileID: 20961401228419460}
sensorName: CameraSensor
width: 84
height: 84
grayscale: 0
compression: 1
--- !u!114 &9049837659352187721
MonoBehaviour:
m_ObjectHideFlags: 0

m_Name:
m_EditorClassIdentifier:
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
RepeatAction: 1
offsetStep: 0
--- !u!1 &1626651094211584
GameObject:
m_ObjectHideFlags: 0

2
Project/ProjectSettings/ProjectVersion.txt


m_EditorVersion: 2018.4.24f1
m_EditorVersion: 2018.4.20f1

10
README.md


# Unity ML-Agents Toolkit
[![docs badge](https://img.shields.io/badge/docs-reference-blue.svg)](https://github.com/Unity-Technologies/ml-agents/tree/release_5_docs/docs/)
[![docs badge](https://img.shields.io/badge/docs-reference-blue.svg)](https://github.com/Unity-Technologies/ml-agents/tree/release_4_docs/docs/)
[![license badge](https://img.shields.io/badge/license-Apache--2.0-green.svg)](LICENSE)

## Releases & Documentation
**Our latest, stable release is `Release 5`. Click
[here](https://github.com/Unity-Technologies/ml-agents/tree/release_5_docs/docs/Readme.md)
**Our latest, stable release is `Release 4`. Click
[here](https://github.com/Unity-Technologies/ml-agents/tree/release_4_docs/docs/Readme.md)
to get started with the latest release of ML-Agents.**
The table below lists all our releases, including our `master` branch which is

| **Version** | **Release Date** | **Source** | **Documentation** | **Download** |
|:-------:|:------:|:-------------:|:-------:|:------------:|
| **master (unstable)** | -- | [source](https://github.com/Unity-Technologies/ml-agents/tree/master) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/master/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/master.zip) |
| **Release 5** | **July 31, 2020** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/release_5)** | **[docs](https://github.com/Unity-Technologies/ml-agents/tree/release_5_docs/docs/Readme.md)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/release_5.zip)** |
| **Release 4** | July 15, 2020 | [source](https://github.com/Unity-Technologies/ml-agents/tree/release_4) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/release_4_docs/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/release_4.zip) |
| **Release 4** | **July 15, 2020** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/release_4)** | **[docs](https://github.com/Unity-Technologies/ml-agents/tree/release_4_docs/docs/Readme.md)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/release_4.zip)** |
| **0.14.1** | February 26, 2020 | [source](https://github.com/Unity-Technologies/ml-agents/tree/0.14.1) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/0.14.1/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/0.14.1.zip) |
## Citation

2
com.unity.ml-agents.extensions/README.md


# ML-Agents Extensions
This is a source-only package for new features based on ML-Agents.
More details coming soon.

6
com.unity.ml-agents.extensions/Runtime/Sensors/ArticulationBodyPoseExtractor.cs


parentIndices[i] = bodyToIndex[parentArticBody];
}
Setup(parentIndices);
SetParentIndices(parentIndices);
protected internal override Vector3 GetLinearVelocityAt(int index)
protected override Vector3 GetLinearVelocityAt(int index)
protected internal override Pose GetPoseAt(int index)
protected override Pose GetPoseAt(int index)
{
var body = m_Bodies[index];
var go = body.gameObject;

14
com.unity.ml-agents.extensions/Runtime/Sensors/PhysicsBodySensor.cs


/// <summary>
/// Construct a new PhysicsBodySensor
/// </summary>
/// <param name="rootBody">The root Rigidbody. This has no Joints on it (but other Joints may connect to it).</param>
/// <param name="rootGameObject">Optional GameObject used to find Rigidbodies in the hierarchy.</param>
/// <param name="virtualRoot">Optional GameObject used to determine the root of the poses,
/// <param name="rootBody"></param>
public PhysicsBodySensor(
Rigidbody rootBody,
GameObject rootGameObject,
GameObject virtualRoot,
PhysicsSensorSettings settings,
string sensorName=null
)
public PhysicsBodySensor(Rigidbody rootBody, GameObject rootGameObject, PhysicsSensorSettings settings, string sensorName=null)
var poseExtractor = new RigidBodyPoseExtractor(rootBody, rootGameObject, virtualRoot);
var poseExtractor = new RigidBodyPoseExtractor(rootBody, rootGameObject);
m_PoseExtractor = poseExtractor;
m_SensorName = string.IsNullOrEmpty(sensorName) ? $"PhysicsBodySensor:{rootBody?.name}" : sensorName;
m_Settings = settings;

31
com.unity.ml-agents.extensions/Runtime/Sensors/PhysicsSensorSettings.cs


using System;
using Unity.MLAgents.Sensors;
namespace Unity.MLAgents.Extensions.Sensors

var offset = baseOffset;
if (settings.UseModelSpace)
{
foreach (var pose in poseExtractor.GetEnabledModelSpacePoses())
var poses = poseExtractor.ModelSpacePoses;
var vels = poseExtractor.ModelSpaceVelocities;
for(var i=0; i<poseExtractor.NumPoses; i++)
if (settings.UseModelSpaceTranslations)
var pose = poses[i];
if(settings.UseModelSpaceTranslations)
}
foreach(var vel in poseExtractor.GetEnabledModelSpaceVelocities())
{
writer.Add(vel, offset);
writer.Add(vels[i], offset);
offset += 3;
}
}

{
foreach (var pose in poseExtractor.GetEnabledLocalSpacePoses())
var poses = poseExtractor.LocalSpacePoses;
var vels = poseExtractor.LocalSpaceVelocities;
for(var i=0; i<poseExtractor.NumPoses; i++)
if (settings.UseLocalSpaceTranslations)
var pose = poses[i];
if(settings.UseLocalSpaceTranslations)
}
foreach(var vel in poseExtractor.GetEnabledLocalSpaceVelocities())
{
writer.Add(vel, offset);
writer.Add(vels[i], offset);
offset += 3;
}
}

211
com.unity.ml-agents.extensions/Runtime/Sensors/PoseExtractor.cs


Vector3[] m_ModelSpaceLinearVelocities;
Vector3[] m_LocalSpaceLinearVelocities;
bool[] m_PoseEnabled;
/// Read iterator for the enabled model space transforms.
/// Read access to the model space transforms.
public IEnumerable<Pose> GetEnabledModelSpacePoses()
public IList<Pose> ModelSpacePoses
if (m_ModelSpacePoses == null)
{
yield break;
}
for (var i = 0; i < m_ModelSpacePoses.Length; i++)
{
if (m_PoseEnabled[i])
{
yield return m_ModelSpacePoses[i];
}
}
get { return m_ModelSpacePoses; }
/// Read iterator for the enabled local space transforms.
/// Read access to the local space transforms.
public IEnumerable<Pose> GetEnabledLocalSpacePoses()
public IList<Pose> LocalSpacePoses
if (m_LocalSpacePoses == null)
{
yield break;
}
for (var i = 0; i < m_LocalSpacePoses.Length; i++)
{
if (m_PoseEnabled[i])
{
yield return m_LocalSpacePoses[i];
}
}
}
/// <summary>
/// Read iterator for the enabled model space linear velocities.
/// </summary>
public IEnumerable<Vector3> GetEnabledModelSpaceVelocities()
{
if (m_ModelSpaceLinearVelocities == null)
{
yield break;
}
for (var i = 0; i < m_ModelSpaceLinearVelocities.Length; i++)
{
if (m_PoseEnabled[i])
{
yield return m_ModelSpaceLinearVelocities[i];
}
}
get { return m_LocalSpacePoses; }
/// Read iterator for the enabled local space linear velocities.
/// Read access to the model space linear velocities.
public IEnumerable<Vector3> GetEnabledLocalSpaceVelocities()
public IList<Vector3> ModelSpaceVelocities
if (m_LocalSpaceLinearVelocities == null)
{
yield break;
}
for (var i = 0; i < m_LocalSpaceLinearVelocities.Length; i++)
{
if (m_PoseEnabled[i])
{
yield return m_LocalSpaceLinearVelocities[i];
}
}
get { return m_ModelSpaceLinearVelocities; }
/// Number of enabled poses in the hierarchy (read-only).
/// Read access to the local space linear velocities.
public int NumEnabledPoses
public IList<Vector3> LocalSpaceVelocities
get
{
if (m_PoseEnabled == null)
{
return 0;
}
var numEnabled = 0;
for (var i = 0; i < m_PoseEnabled.Length; i++)
{
numEnabled += m_PoseEnabled[i] ? 1 : 0;
}
return numEnabled;
}
get { return m_LocalSpaceLinearVelocities; }
/// Number of total poses in the hierarchy (read-only).
/// Number of poses in the hierarchy (read-only).
get { return m_ModelSpacePoses?.Length ?? 0; }
get { return m_ModelSpacePoses?.Length ?? 0; }
}
/// <summary>

}
return m_ParentIndices[index];
}
/// <summary>
/// Set whether the pose at the given index is enabled or disabled for observations.
/// </summary>
/// <param name="index"></param>
/// <param name="val"></param>
public void SetPoseEnabled(int index, bool val)
{
m_PoseEnabled[index] = val;
}
/// <summary>

/// <param name="parentIndices"></param>
protected void Setup(int[] parentIndices)
protected void SetParentIndices(int[] parentIndices)
#if DEBUG
if (parentIndices[0] != -1)
{
throw new UnityAgentsException($"Expected parentIndices[0] to be -1, got {parentIndices[0]}");
}
#endif
var numPoses = parentIndices.Length;
m_ModelSpacePoses = new Pose[numPoses];
m_LocalSpacePoses = new Pose[numPoses];
var numTransforms = parentIndices.Length;
m_ModelSpacePoses = new Pose[numTransforms];
m_LocalSpacePoses = new Pose[numTransforms];
m_ModelSpaceLinearVelocities = new Vector3[numPoses];
m_LocalSpaceLinearVelocities = new Vector3[numPoses];
m_PoseEnabled = new bool[numPoses];
// All poses are enabled by default. Generally we'll want to disable the root though.
for (var i = 0; i < numPoses; i++)
{
m_PoseEnabled[i] = true;
}
m_ModelSpaceLinearVelocities = new Vector3[numTransforms];
m_LocalSpaceLinearVelocities = new Vector3[numTransforms];
}
/// <summary>

/// <returns></returns>
protected internal abstract Pose GetPoseAt(int index);
protected abstract Pose GetPoseAt(int index);
/// <summary>
/// Return the world space linear velocity of the i'th object.

protected internal abstract Vector3 GetLinearVelocityAt(int index);
protected abstract Vector3 GetLinearVelocityAt(int index);
/// <summary>

{
using (TimerStack.Instance.Scoped("UpdateModelSpacePoses"))
if (m_ModelSpacePoses == null)
if (m_ModelSpacePoses == null)
{
return;
}
return;
}
var rootWorldTransform = GetPoseAt(0);
var worldToModel = rootWorldTransform.Inverse();
var rootLinearVel = GetLinearVelocityAt(0);
var rootWorldTransform = GetPoseAt(0);
var worldToModel = rootWorldTransform.Inverse();
var rootLinearVel = GetLinearVelocityAt(0);
for (var i = 0; i < m_ModelSpacePoses.Length; i++)
{
var currentWorldSpacePose = GetPoseAt(i);
var currentModelSpacePose = worldToModel.Multiply(currentWorldSpacePose);
m_ModelSpacePoses[i] = currentModelSpacePose;
for (var i = 0; i < m_ModelSpacePoses.Length; i++)
{
var currentWorldSpacePose = GetPoseAt(i);
var currentModelSpacePose = worldToModel.Multiply(currentWorldSpacePose);
m_ModelSpacePoses[i] = currentModelSpacePose;
var currentBodyLinearVel = GetLinearVelocityAt(i);
var relativeVelocity = currentBodyLinearVel - rootLinearVel;
m_ModelSpaceLinearVelocities[i] = worldToModel.rotation * relativeVelocity;
}
var currentBodyLinearVel = GetLinearVelocityAt(i);
var relativeVelocity = currentBodyLinearVel - rootLinearVel;
m_ModelSpaceLinearVelocities[i] = worldToModel.rotation * relativeVelocity;
}
}

public void UpdateLocalSpacePoses()
{
using (TimerStack.Instance.Scoped("UpdateLocalSpacePoses"))
if (m_LocalSpacePoses == null)
if (m_LocalSpacePoses == null)
{
return;
}
return;
}
for (var i = 0; i < m_LocalSpacePoses.Length; i++)
for (var i = 0; i < m_LocalSpacePoses.Length; i++)
{
if (m_ParentIndices[i] != -1)
if (m_ParentIndices[i] != -1)
{
var parentTransform = GetPoseAt(m_ParentIndices[i]);
// This is slightly inefficient, since for a body with multiple children, we'll end up inverting
// the transform multiple times. Might be able to trade space for perf here.
var invParent = parentTransform.Inverse();
var currentTransform = GetPoseAt(i);
m_LocalSpacePoses[i] = invParent.Multiply(currentTransform);
var parentTransform = GetPoseAt(m_ParentIndices[i]);
// This is slightly inefficient, since for a body with multiple children, we'll end up inverting
// the transform multiple times. Might be able to trade space for perf here.
var invParent = parentTransform.Inverse();
var currentTransform = GetPoseAt(i);
m_LocalSpacePoses[i] = invParent.Multiply(currentTransform);
var parentLinearVel = GetLinearVelocityAt(m_ParentIndices[i]);
var currentLinearVel = GetLinearVelocityAt(i);
m_LocalSpaceLinearVelocities[i] = invParent.rotation * (currentLinearVel - parentLinearVel);
}
else
{
m_LocalSpacePoses[i] = Pose.identity;
m_LocalSpaceLinearVelocities[i] = Vector3.zero;
}
var parentLinearVel = GetLinearVelocityAt(m_ParentIndices[i]);
var currentLinearVel = GetLinearVelocityAt(i);
m_LocalSpaceLinearVelocities[i] = invParent.rotation * (currentLinearVel - parentLinearVel);
}
else
{
m_LocalSpacePoses[i] = Pose.identity;
m_LocalSpaceLinearVelocities[i] = Vector3.zero;
}
}
}

obsPerPose += settings.UseModelSpaceLinearVelocity ? 3 : 0;
obsPerPose += settings.UseLocalSpaceLinearVelocity ? 3 : 0;
return NumEnabledPoses * obsPerPose;
return NumPoses * obsPerPose;
}
internal void DrawModelSpace(Vector3 offset)

77
com.unity.ml-agents.extensions/Runtime/Sensors/RigidBodyPoseExtractor.cs


Rigidbody[] m_Bodies;
/// <summary>
/// Optional game object used to determine the root of the poses, separate from the actual Rigidbodies
/// in the hierarchy. For locomotion
/// </summary>
GameObject m_VirtualRoot;
/// <summary>
/// <param name="rootBody">The root Rigidbody. This has no Joints on it (but other Joints may connect to it).</param>
/// <param name="rootGameObject">Optional GameObject used to find Rigidbodies in the hierarchy.</param>
/// <param name="virtualRoot">Optional GameObject used to determine the root of the poses,
/// separate from the actual Rigidbodies in the hierarchy. For locomotion tasks, with ragdolls, this provides
/// a stabilized refernece frame, which can improve learning.</param>
public RigidBodyPoseExtractor(Rigidbody rootBody, GameObject rootGameObject = null, GameObject virtualRoot = null)
/// <param name="rootBody"></param>
public RigidBodyPoseExtractor(Rigidbody rootBody, GameObject rootGameObject = null)
{
if (rootBody == null)
{

{
rbs = rootGameObject.GetComponentsInChildren<Rigidbody>();
}
var bodyToIndex = new Dictionary<Rigidbody, int>(rbs.Length);
var parentIndices = new int[rbs.Length];
if (rbs == null || rbs.Length == 0)
{
Debug.Log("No rigid bodies found!");
return;
}
if (rbs[0] != rootBody)
if (rbs[0] != rootBody)
// Adjust the array if we have a virtual root.
// This will be at index 0, and the "real" root will be parented to it.
if (virtualRoot != null)
{
var extendedRbs = new Rigidbody[rbs.Length + 1];
for (var i = 0; i < rbs.Length; i++)
{
extendedRbs[i + 1] = rbs[i];
}
rbs = extendedRbs;
}
var bodyToIndex = new Dictionary<Rigidbody, int>(rbs.Length);
var parentIndices = new int[rbs.Length];
parentIndices[0] = -1;
if(rbs[i] != null)
{
bodyToIndex[rbs[i]] = i;
}
bodyToIndex[rbs[i]] = i;
}
var joints = rootBody.GetComponentsInChildren <Joint>();

parentIndices[childIndex] = parentIndex;
}
if (virtualRoot != null)
{
// Make sure the original root treats the virtual root as its parent.
parentIndices[1] = 0;
m_VirtualRoot = virtualRoot;
}
Setup(parentIndices);
// By default, ignore the root
SetPoseEnabled(0, false);
SetParentIndices(parentIndices);
protected internal override Vector3 GetLinearVelocityAt(int index)
protected override Vector3 GetLinearVelocityAt(int index)
if (index == 0 && m_VirtualRoot != null)
{
// No velocity on the virtual root
return Vector3.zero;
}
protected internal override Pose GetPoseAt(int index)
protected override Pose GetPoseAt(int index)
if (index == 0 && m_VirtualRoot != null)
{
// Use the GameObject's world transform
return new Pose
{
rotation = m_VirtualRoot.transform.rotation,
position = m_VirtualRoot.transform.position
};
}
var body = m_Bodies[index];
return new Pose { rotation = body.rotation, position = body.position };
}

9
com.unity.ml-agents.extensions/Runtime/Sensors/RigidBodySensorComponent.cs


public Rigidbody RootBody;
/// <summary>
/// Optional GameObject used to determine the root of the poses.
/// </summary>
public GameObject VirtualRoot;
/// <summary>
/// Settings defining what types of observations will be generated.
/// </summary>
[SerializeField]

/// <returns></returns>
public override ISensor CreateSensor()
{
return new PhysicsBodySensor(RootBody, gameObject, VirtualRoot, Settings, sensorName);
return new PhysicsBodySensor(RootBody, gameObject, Settings, sensorName);
}
/// <inheritdoc/>

// TODO static method in PhysicsBodySensor?
// TODO only update PoseExtractor when body changes?
var poseExtractor = new RigidBodyPoseExtractor(RootBody, gameObject, VirtualRoot);
var poseExtractor = new RigidBodyPoseExtractor(RootBody, gameObject);
var numPoseObservations = poseExtractor.GetNumPoseObservations(Settings);
var numJointObservations = 0;

11
com.unity.ml-agents.extensions/Tests/Editor/Sensors/ArticulationBodySensorTests.cs


// Local space
0f, 0f, 0f, // Root pos
#if UNITY_2020_2_OR_NEWER
0f, 0f, 0f, // Root vel
#endif
#if UNITY_2020_2_OR_NEWER
-1f, 1f, 0f, // Attached vel
#endif
0f, 0f, 0f, // Root vel
-1f, 1f, 0f, // Attached vel
0f, -1f, 1f // Leaf vel
#endif
};

88
com.unity.ml-agents.extensions/Tests/Editor/Sensors/PoseExtractorTests.cs


{
class UselessPoseExtractor : PoseExtractor
{
protected internal override Pose GetPoseAt(int index)
protected override Pose GetPoseAt(int index)
protected internal override Vector3 GetLinearVelocityAt(int index)
protected override Vector3 GetLinearVelocityAt(int index)
{
return Vector3.zero;
}

Setup(parentIndices);
SetParentIndices(parentIndices);
}
}

{
parents[i] = i - 1;
}
Setup(parents);
SetParentIndices(parents);
protected internal override Pose GetPoseAt(int index)
protected override Pose GetPoseAt(int index)
{
var rotation = Quaternion.identity;
var translation = offset + new Vector3(index, index, index);

};
}
protected internal override Vector3 GetLinearVelocityAt(int index)
protected override Vector3 GetLinearVelocityAt(int index)
{
return Vector3.zero;
}

chain.UpdateModelSpacePoses();
chain.UpdateLocalSpacePoses();
var modelPoseIndex = 0;
foreach (var modelSpace in chain.GetEnabledModelSpacePoses())
{
if (modelPoseIndex == 0)
{
// Root transforms are currently always the identity.
Assert.IsTrue(modelSpace == Pose.identity);
}
else
{
var expectedModelTranslation = new Vector3(modelPoseIndex, modelPoseIndex, modelPoseIndex);
Assert.IsTrue(expectedModelTranslation == modelSpace.position);
}
modelPoseIndex++;
}
Assert.AreEqual(size, modelPoseIndex);
var localPoseIndex = 0;
foreach (var localSpace in chain.GetEnabledLocalSpacePoses())
{
if (localPoseIndex == 0)
{
// Root transforms are currently always the identity.
Assert.IsTrue(localSpace == Pose.identity);
}
else
{
var expectedLocalTranslation = new Vector3(1, 1, 1);
Assert.IsTrue(expectedLocalTranslation == localSpace.position, $"{expectedLocalTranslation} != {localSpace.position}");
}
localPoseIndex++;
}
Assert.AreEqual(size, localPoseIndex);
}
class BadPoseExtractor : PoseExtractor
{
public BadPoseExtractor()
{
var size = 2;
var parents = new int[size];
// Parents are intentionally invalid - expect -1 at root
for (var i = 0; i < size; i++)
{
parents[i] = i;
}
Setup(parents);
}
// Root transforms are currently always the identity.
Assert.IsTrue(chain.ModelSpacePoses[0] == Pose.identity);
Assert.IsTrue(chain.LocalSpacePoses[0] == Pose.identity);
protected internal override Pose GetPoseAt(int index)
// Check the non-root transforms
for (var i = 1; i < size; i++)
return Pose.identity;
}
var modelSpace = chain.ModelSpacePoses[i];
var expectedModelTranslation = new Vector3(i, i, i);
Assert.IsTrue(expectedModelTranslation == modelSpace.position);
protected internal override Vector3 GetLinearVelocityAt(int index)
{
return Vector3.zero;
var localSpace = chain.LocalSpacePoses[i];
var expectedLocalTranslation = new Vector3(1, 1, 1);
Assert.IsTrue(expectedLocalTranslation == localSpace.position);
[Test]
public void TestExpectedRoot()
{
Assert.Throws<UnityAgentsException>(() =>
{
var bad = new BadPoseExtractor();
});
}
}
public class PoseExtensionTests

58
com.unity.ml-agents.extensions/Tests/Editor/Sensors/RigidBodyPoseExtractorTests.cs


using UnityEngine;
using NUnit.Framework;
using Unity.MLAgents.Extensions.Sensors;
using UnityEditor;
namespace Unity.MLAgents.Extensions.Tests.Sensors
{

var poseExtractor = new RigidBodyPoseExtractor(rb1);
Assert.AreEqual(2, poseExtractor.NumPoses);
rb1.position = new Vector3(1, 0, 0);
rb1.rotation = Quaternion.Euler(0, 13.37f, 0);
rb1.velocity = new Vector3(2, 0, 0);
Assert.AreEqual(rb1.position, poseExtractor.GetPoseAt(0).position);
Assert.IsTrue(rb1.rotation == poseExtractor.GetPoseAt(0).rotation);
Assert.AreEqual(rb1.velocity, poseExtractor.GetLinearVelocityAt(0));
}
[Test]
public void TestTwoBodiesVirtualRoot()
{
// * virtualRoot
// * rootObj
// - rb1
// * go2
// - rb2
// - joint
var virtualRoot = new GameObject("I am vroot");
var rootObj = new GameObject();
var rb1 = rootObj.AddComponent<Rigidbody>();
var go2 = new GameObject();
var rb2 = go2.AddComponent<Rigidbody>();
go2.transform.SetParent(rootObj.transform);
var joint = go2.AddComponent<ConfigurableJoint>();
joint.connectedBody = rb1;
var poseExtractor = new RigidBodyPoseExtractor(rb1, null, virtualRoot);
Assert.AreEqual(3, poseExtractor.NumPoses);
// "body" 0 has no parent
Assert.AreEqual(-1, poseExtractor.GetParentIndex(0));
// body 1 has parent 0
Assert.AreEqual(0, poseExtractor.GetParentIndex(1));
var virtualRootPos = new Vector3(0,2,0);
var virtualRootRot = Quaternion.Euler(0, 42, 0);
virtualRoot.transform.position = virtualRootPos;
virtualRoot.transform.rotation = virtualRootRot;
Assert.AreEqual(virtualRootPos, poseExtractor.GetPoseAt(0).position);
Assert.IsTrue(virtualRootRot == poseExtractor.GetPoseAt(0).rotation);
Assert.AreEqual(Vector3.zero, poseExtractor.GetLinearVelocityAt(0));
// Same as above test, but using index 1
rb1.position = new Vector3(1, 0, 0);
rb1.rotation = Quaternion.Euler(0, 13.37f, 0);
rb1.velocity = new Vector3(2, 0, 0);
Assert.AreEqual(rb1.position, poseExtractor.GetPoseAt(1).position);
Assert.IsTrue(rb1.rotation == poseExtractor.GetPoseAt(1).rotation);
Assert.AreEqual(rb1.velocity, poseExtractor.GetLinearVelocityAt(1));
}
}
}

25
com.unity.ml-agents.extensions/Tests/Editor/Sensors/RigidBodySensorTests.cs


var sensor = sensorComponent.CreateSensor();
sensor.Update();
// The root body is ignored since it always generates identity values
// and there are no other bodies to generate observations.
var expected = new float[0];
Assert.AreEqual(expected.Length, sensorComponent.GetObservationShape()[0]);
var expected = new[]
{
0f, 0f, 0f, // ModelSpaceLinearVelocity
0f, 0f, 0f, // LocalSpaceTranslations
0f, 0f, 0f, 1f // LocalSpaceRotations
};
Assert.AreEqual(expected.Length, sensorComponent.GetObservationShape()[0]);
}
[Test]

var joint2 = leafGameObj.AddComponent<ConfigurableJoint>();
joint2.connectedBody = middleRb;
var virtualRoot = new GameObject();
var sensorComponent = rootObj.AddComponent<RigidBodySensorComponent>();
sensorComponent.RootBody = rootRb;

UseLocalSpaceTranslations = true,
UseLocalSpaceLinearVelocity = true
};
sensorComponent.VirtualRoot = virtualRoot;
// Note that the VirtualRoot is ignored from the observations
var expected = new[]
{
// Model space

// Local space
0f, 0f, 0f, // Root pos
13.37f, 0f, 0f, // Attached pos
4.2f, 0f, 0f, // Leaf pos
0f, 0f, 0f, // Root vel
1f, 0f, 0f, // Root vel (relative to virtual root)
13.37f, 0f, 0f, // Attached pos
4.2f, 0f, 0f, // Leaf pos
SensorTestHelper.CompareObservation(sensor, expected);
SensorTestHelper.CompareObservation(sensor, expected);
// Update the settings to only process joint observations
sensorComponent.Settings = new PhysicsSensorSettings

2
com.unity.ml-agents.extensions/package.json


"unity": "2018.4",
"description": "A source-only package for new features based on ML-Agents",
"dependencies": {
"com.unity.ml-agents": "1.3.0-preview"
"com.unity.ml-agents": "1.2.0-preview"
}
}

25
com.unity.ml-agents/CHANGELOG.md


### Major Changes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
### Minor Changes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
### Bug Fixes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
## [1.3.0-preview] 2020-08-12
### Major Changes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
- The minimum supported Python version for ml-agents-envs was changed to 3.6.1. (#4244)
- The minimum supported python version for ml-agents-envs was changed to 3.6.1. (#4244)
- The interaction between EnvManager and TrainerController was changed; EnvManager.advance() was split into to stages,
and TrainerController now uses the results from the first stage to handle new behavior names. This change speeds up
Python training by approximately 5-10%. (#4259)

#### ml-agents / ml-agents-envs / gym-unity (Python)
#### ml-agents / ml-agents-envs / gym-unity (Python)
- The versions of `numpy` supported by ml-agents-envs were changed to disallow 1.19.0 or later. This was done to reflect
a similar change in TensorFlow's requirements. (#4274)
- CSV statistics writer was removed (#4300).
### Bug Fixes
#### com.unity.ml-agents (C#)

- `max_step` in the `TerminalStep` and `TerminalSteps` objects was renamed `interrupted`.
- `beta` and `epsilon` in `PPO` are no longer decayed by default but follow the same schedule as learning rate. (#3940)
- `get_behavior_names()` and `get_behavior_spec()` on UnityEnvironment were replaced by the `behavior_specs` property. (#3946)
- The first version of the Unity Environment Registry (Experimental) has been released. More information [here](https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Unity-Environment-Registry.md)(#3967)
- The first version of the Unity Environment Registry (Experimental) has been released. More information [here](https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Unity-Environment-Registry.md)(#3967)
- `use_visual` and `allow_multiple_visual_obs` in the `UnityToGymWrapper` constructor
were replaced by `allow_multiple_obs` which allows one or more visual observations and
vector observations to be used simultaneously. (#3981) Thank you @shakenes !

- The format for trainer configuration has changed, and the "default" behavior has been deprecated.
See the [Migration Guide](https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Migrating.md) for more details. (#3936)
See the [Migration Guide](https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Migrating.md) for more details. (#3936)
- Training artifacts (trained models, summaries) are now found in the `results/`
directory. (#3829)
- When using Curriculum, the current lesson will resume if training is quit and resumed. As such,

2
com.unity.ml-agents/Documentation~/com.unity.ml-agents.md


[unity ML-Agents Toolkit]: https://github.com/Unity-Technologies/ml-agents
[unity inference engine]: https://docs.unity3d.com/Packages/com.unity.barracuda@latest/index.html
[package manager documentation]: https://docs.unity3d.com/Manual/upm-ui-install.html
[installation instructions]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Installation.md
[installation instructions]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Installation.md
[github repository]: https://github.com/Unity-Technologies/ml-agents
[python package]: https://github.com/Unity-Technologies/ml-agents
[execution order of event functions]: https://docs.unity3d.com/Manual/ExecutionOrder.html

6
com.unity.ml-agents/Runtime/Academy.cs


* API. For more information on each of these entities, in addition to how to
* set-up a learning environment and train the behavior of characters in a
* Unity scene, please browse our documentation pages on GitHub:
* https://github.com/Unity-Technologies/ml-agents/tree/release_5_docs/docs/
* https://github.com/Unity-Technologies/ml-agents/tree/release_4_docs/docs/
*/
namespace Unity.MLAgents

/// fall back to inference or heuristic decisions. (You can also set agents to always use
/// inference or heuristics.)
/// </remarks>
[HelpURL("https://github.com/Unity-Technologies/ml-agents/tree/release_5_docs/" +
[HelpURL("https://github.com/Unity-Technologies/ml-agents/tree/release_4_docs/" +
"docs/Learning-Environment-Design.md")]
public class Academy : IDisposable
{

/// Unity package version of com.unity.ml-agents.
/// This must match the version string in package.json and is checked in a unit test.
/// </summary>
internal const string k_PackageVersion = "1.3.0-preview";
internal const string k_PackageVersion = "1.2.0-preview";
const int k_EditorTrainingPort = 5004;

26
com.unity.ml-agents/Runtime/Agent.cs


/// [OnDisable()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnDisable.html]
/// [OnBeforeSerialize()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnBeforeSerialize.html
/// [OnAfterSerialize()]: https://docs.unity3d.com/ScriptReference/MonoBehaviour.OnAfterSerialize.html
/// [Agents]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design-Agents.md
/// [Reinforcement Learning in Unity]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design.md
/// [Agents]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design-Agents.md
/// [Reinforcement Learning in Unity]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design.md
/// [Unity ML-Agents Toolkit manual]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Readme.md
/// [Unity ML-Agents Toolkit manual]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Readme.md
[HelpURL("https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/" +
[HelpURL("https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/" +
"docs/Learning-Environment-Design-Agents.md")]
[Serializable]
[RequireComponent(typeof(BehaviorParameters))]

/// for information about mixing reward signals from curiosity and Generative Adversarial
/// Imitation Learning (GAIL) with rewards supplied through this method.
///
/// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design-Agents.md#rewards
/// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals
/// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design-Agents.md#rewards
/// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals
/// </remarks>
/// <param name="reward">The new value of the reward.</param>
public void SetReward(float reward)

/// for information about mixing reward signals from curiosity and Generative Adversarial
/// Imitation Learning (GAIL) with rewards supplied through this method.
///
/// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design-Agents.md#rewards
/// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals
/// [Agents - Rewards]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design-Agents.md#rewards
/// [Reward Signals]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/ML-Agents-Overview.md#a-quick-note-on-reward-signals
///</remarks>
/// <param name="increment">Incremental reward value.</param>
public void AddReward(float increment)

/// implementing a simple heuristic function can aid in debugging agent actions and interactions
/// with its environment.
///
/// [Demonstration Recorder]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design-Agents.md#recording-demonstrations
/// [Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design-Agents.md#actions
/// [Demonstration Recorder]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design-Agents.md#recording-demonstrations
/// [Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design-Agents.md#actions
/// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
/// </remarks>
/// <example>

/// For more information about observations, see [Observations and Sensors].
///
/// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
/// [Observations and Sensors]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design-Agents.md#observations-and-sensors
/// [Observations and Sensors]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design-Agents.md#observations-and-sensors
/// </remarks>
public virtual void CollectObservations(VectorSensor sensor)
{

///
/// See [Agents - Actions] for more information on masking actions.
///
/// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design-Agents.md#actions
/// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design-Agents.md#actions
/// </remarks>
/// <seealso cref="OnActionReceived(float[])"/>
public virtual void CollectDiscreteActionMasks(DiscreteActionMasker actionMasker)

///
/// For more information about implementing agent actions see [Agents - Actions].
///
/// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design-Agents.md#actions
/// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design-Agents.md#actions
/// </remarks>
/// <param name="vectorAction">
/// An array containing the action vector. The length of the array is specified

1
com.unity.ml-agents/Runtime/AssemblyInfo.cs


[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor.Tests")]
[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor")]
[assembly: InternalsVisibleTo("Unity.ML-Agents.Extensions")]

2
com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs


/// See [Imitation Learning - Recording Demonstrations] for more information.
///
/// [GameObject]: https://docs.unity3d.com/Manual/GameObjects.html
/// [Imitation Learning - Recording Demonstrations]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs//Learning-Environment-Design-Agents.md#recording-demonstrations
/// [Imitation Learning - Recording Demonstrations]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs//Learning-Environment-Design-Agents.md#recording-demonstrations
/// </remarks>
[RequireComponent(typeof(Agent))]
[AddComponentMenu("ML Agents/Demonstration Recorder", (int)MenuGroup.Default)]

2
com.unity.ml-agents/Runtime/DiscreteActionMasker.cs


///
/// See [Agents - Actions] for more information on masking actions.
///
/// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_5_docs/docs/Learning-Environment-Design-Agents.md#actions
/// [Agents - Actions]: https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/docs/Learning-Environment-Design-Agents.md#actions
/// </remarks>
/// <param name="branch">The branch for which the actions will be masked.</param>
/// <param name="actionIndices">The indices of the masked actions.</param>

2
com.unity.ml-agents/package.json


{
"name": "com.unity.ml-agents",
"displayName": "ML Agents",
"version": "1.3.0-preview",
"version": "1.2.0-preview",
"unity": "2018.4",
"description": "Use state-of-the-art machine learning to create intelligent character behaviors in any Unity environment (games, robotics, film, etc.).",
"dependencies": {

4
docs/Installation-Anaconda-Windows.md


the ml-agents Conda environment by typing `activate ml-agents`)_:
```sh
git clone --branch release_5 https://github.com/Unity-Technologies/ml-agents.git
git clone --branch release_4 https://github.com/Unity-Technologies/ml-agents.git
The `--branch release_5` option will switch to the tag of the latest stable
The `--branch release_4` option will switch to the tag of the latest stable
release. Omitting that will get the `master` branch which is potentially
unstable.

6
docs/Installation.md


of our tutorials / guides assume you have access to our example environments).
```sh
git clone --branch release_5 https://github.com/Unity-Technologies/ml-agents.git
git clone --branch release_4 https://github.com/Unity-Technologies/ml-agents.git
The `--branch release_5` option will switch to the tag of the latest stable
The `--branch release_4` option will switch to the tag of the latest stable
release. Omitting that will get the `master` branch which is potentially
unstable.

ML-Agents Toolkit for your purposes. If you plan to contribute those changes
back, make sure to clone the `master` branch (by omitting `--branch release_5`
back, make sure to clone the `master` branch (by omitting `--branch release_4`
from the command above). See our
[Contributions Guidelines](../com.unity.ml-agents/CONTRIBUTING.md) for more
information on contributing to the ML-Agents Toolkit.

2
docs/Training-Configuration-File.md


| `hyperparameters -> beta` | (default = `5.0e-3`) Strength of the entropy regularization, which makes the policy "more random." This ensures that agents properly explore the action space during training. Increasing this will ensure more random actions are taken. This should be adjusted such that the entropy (measurable from TensorBoard) slowly decreases alongside increases in reward. If entropy drops too quickly, increase beta. If entropy drops too slowly, decrease `beta`. <br><br>Typical range: `1e-4` - `1e-2` |
| `hyperparameters -> epsilon` | (default = `0.2`) Influences how rapidly the policy can evolve during training. Corresponds to the acceptable threshold of divergence between the old and new policies during gradient descent updating. Setting this value small will result in more stable updates, but will also slow the training process. <br><br>Typical range: `0.1` - `0.3` |
| `hyperparameters -> lambd` | (default = `0.95`) Regularization parameter (lambda) used when calculating the Generalized Advantage Estimate ([GAE](https://arxiv.org/abs/1506.02438)). This can be thought of as how much the agent relies on its current value estimate when calculating an updated value estimate. Low values correspond to relying more on the current value estimate (which can be high bias), and high values correspond to relying more on the actual rewards received in the environment (which can be high variance). The parameter provides a trade-off between the two, and the right value can lead to a more stable training process. <br><br>Typical range: `0.9` - `0.95` |
| `hyperparameters -> num_epoch` | (default = `3`) Number of passes to make through the experience buffer when performing gradient descent optimization.The larger the batch_size, the larger it is acceptable to make this. Decreasing this will ensure more stable updates, at the cost of slower learning. <br><br>Typical range: `3` - `10` |
| `hyperparameters -> num_epoch` | Number of passes to make through the experience buffer when performing gradient descent optimization.The larger the batch_size, the larger it is acceptable to make this. Decreasing this will ensure more stable updates, at the cost of slower learning. <br><br>Typical range: `3` - `10` |
### SAC-specific Configurations

81
docs/Training-ML-Agents.md


mlagents-learn config/ppo/3DBall_randomize.yaml --run-id=3D-Ball-randomize
```
We can observe progress and metrics via TensorBoard.
#### Curriculum
To enable curriculum learning, you need to add a `curriculum` sub-section to your environment
parameter. Here is one example with the environment parameter `my_environment_parameter` :
```yml
behaviors:
BehaviorY:
# < Same as above >
# Add this section
environment_parameters:
my_environment_parameter:
curriculum:
- name: MyFirstLesson # The '-' is important as this is a list
completion_criteria:
measure: progress
behavior: my_behavior
signal_smoothing: true
min_lesson_length: 100
threshold: 0.2
value: 0.0
- name: MySecondLesson # This is the start of the second lesson
completion_criteria:
measure: progress
behavior: my_behavior
signal_smoothing: true
min_lesson_length: 100
threshold: 0.6
require_reset: true
value:
sampler_type: uniform
sampler_parameters:
min_value: 4.0
max_value: 7.0
- name: MyLastLesson
value: 8.0
```
Note that this curriculum __only__ applies to `my_environment_parameter`. The `curriculum` section
contains a list of `Lessons`. In the example, the lessons are named `MyFirstLesson`, `MySecondLesson`
and `MyLastLesson`.
Each `Lesson` has 3 fields :
- `name` which is a user defined name for the lesson (The name of the lesson will be displayed in
the console when the lesson changes)
- `completion_criteria` which determines what needs to happen in the simulation before the lesson
can be considered complete. When that condition is met, the curriculum moves on to the next
`Lesson`. Note that you do not need to specify a `completion_criteria` for the last `Lesson`
- `value` which is the value the environment parameter will take during the lesson. Note that this
can be a float or a sampler.
There are the different settings of the `completion_criteria` :
| **Setting** | **Description** |
| :------------------ | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `measure` | What to measure learning progress, and advancement in lessons by.<br><br> `reward` uses a measure received reward, while `progress` uses the ratio of steps/max_steps. |
| `behavior` | Specifies which behavior is being tracked. There can be multiple behaviors with different names, each at different points of training. This setting allows the curriculum to track only one of them. |
| `threshold` | Determines at what point in value of `measure` the lesson should be increased. |
| `min_lesson_length` | The minimum number of episodes that should be completed before the lesson can change. If `measure` is set to `reward`, the average cumulative reward of the last `min_lesson_length` episodes will be used to determine if the lesson should change. Must be nonnegative. <br><br> **Important**: the average reward that is compared to the thresholds is different than the mean reward that is logged to the console. For example, if `min_lesson_length` is `100`, the lesson will increment after the average cumulative reward of the last `100` episodes exceeds the current threshold. The mean reward logged to the console is dictated by the `summary_freq` parameter defined above. |
| `signal_smoothing` | Whether to weight the current progress measure by previous values. |
| `require_reset` | Whether changing lesson requires the environment to reset (default: false) |
##### Training with a Curriculum
Once we have specified our metacurriculum and curricula, we can launch
`mlagents-learn` to point to the config file containing
our curricula and PPO will train using Curriculum Learning. For example, to
train agents in the Wall Jump environment with curriculum learning, we can run:
```sh
mlagents-learn config/ppo/WallJump_curriculum.yaml --run-id=wall-jump-curriculum
```
We can then keep track of the current lessons and progresses via TensorBoard. If you've terminated
the run, you can resume it using `--resume` and lesson progress will start off where it
ended.
We can observe progress and metrics via Tensorboard.
#### Curriculum

2
docs/Training-on-Amazon-Web-Service.md


2. Clone the ML-Agents repo and install the required Python packages
```sh
git clone --branch release_5 https://github.com/Unity-Technologies/ml-agents.git
git clone --branch release_4 https://github.com/Unity-Technologies/ml-agents.git
cd ml-agents/ml-agents/
pip3 install -e .
```

22
docs/Unity-Inference-Engine.md


[compute shaders](https://docs.unity3d.com/Manual/class-ComputeShader.html) to
run the neural network within Unity.
**Note**: The ML-Agents Toolkit only supports the models created with our
trainers.
## Supported devices
See the Unity Inference Engine documentation for a list of the

**Note:** For most of the models generated with the ML-Agents Toolkit, CPU will
be faster than GPU. You should use the GPU only if you use the ResNet visual
encoder or have a large number of agents with visual observations.
# Unsupported use cases
## Externally trained models
The ML-Agents Toolkit only supports the models created with our trainers. Model
loading expects certain conventions for constants and tensor names. While it is
possible to construct a model that follows these conventions, we don't provide
any additional help for this. More details can be found in
[TensorNames.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/com.unity.ml-agents/Runtime/Inference/TensorNames.cs)
and
[BarracudaModelParamLoader.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/com.unity.ml-agents/Runtime/Inference/BarracudaModelParamLoader.cs).
If you wish to run inference on an externally trained model, you should use
Barracuda directly, instead of trying to run it through ML-Agents.
## Model inference outside of Unity
We do not provide support for inference anywhere outside of Unity. The
`frozen_graph_def.pb` and `.onnx` files produced by training are open formats
for TensorFlow and ONNX respectively; if you wish to convert these to another
format or run inference with them, refer to their documentation.

8
docs/Using-Tensorboard.md


skill level between two players. In a proper training run, the ELO of the
agent should steadily increase.
## Exporting Data from TensorBoard
To export timeseries data in CSV or JSON format, check the "Show data download
links" in the upper left. This will enable download links below each chart.
![Example TensorBoard Run](images/TensorBoard-download.png)
To get custom metrics from a C# environment into TensorBoard, you can use the
To get custom metrics from a C# environment into Tensorboard, you can use the
`StatsRecorder`:
```csharp

2
gym-unity/gym_unity/__init__.py


# Version of the library that will be used to upload to pypi
__version__ = "0.20.0.dev0"
__version__ = "0.19.0.dev0"
# Git tag that will be checked to determine whether to trigger upload to pypi
__release_tag__ = None

2
ml-agents-envs/mlagents_envs/__init__.py


# Version of the library that will be used to upload to pypi
__version__ = "0.20.0.dev0"
__version__ = "0.19.0.dev0"
# Git tag that will be checked to determine whether to trigger upload to pypi
__release_tag__ = None

2
ml-agents-envs/setup.py


install_requires=[
"cloudpickle",
"grpcio>=1.11.0",
"numpy>=1.14.1,<1.19.0",
"numpy>=1.14.1,<2.0",
"Pillow>=4.2.1",
"protobuf>=3.6",
"pyyaml>=3.1.0",

18
ml-agents/mlagents/model_serialization.py


from distutils.util import strtobool
import os
import shutil
from typing import Any, List, Set, NamedTuple
from distutils.version import LooseVersion

return strtobool(val)
except Exception:
return False
def copy_model_files(source_nn_path: str, destination_nn_path: str) -> None:
"""
Copy the .nn file at the given source to the destination.
Also copies the corresponding .onnx file if it exists.
"""
shutil.copyfile(source_nn_path, destination_nn_path)
logger.info(f"Copied {source_nn_path} to {destination_nn_path}.")
# Copy the onnx file if it exists
source_onnx_path = os.path.splitext(source_nn_path)[0] + ".onnx"
destination_onnx_path = os.path.splitext(destination_nn_path)[0] + ".onnx"
try:
shutil.copyfile(source_onnx_path, destination_onnx_path)
logger.info(f"Copied {source_onnx_path} to {destination_onnx_path}.")
except OSError:
pass

2
ml-agents/mlagents/trainers/__init__.py


# Version of the library that will be used to upload to pypi
__version__ = "0.20.0.dev0"
__version__ = "0.19.0.dev0"
# Git tag that will be checked to determine whether to trigger upload to pypi
__release_tag__ = None

2
ml-agents/mlagents/trainers/environment_parameter_manager.py


lesson = settings.curriculum[lesson_num]
if (
lesson.completion_criteria is not None
and len(settings.curriculum) > lesson_num + 1
and len(settings.curriculum) > lesson_num
):
behavior_to_consider = lesson.completion_criteria.behavior
if behavior_to_consider in trainer_steps:

8
ml-agents/mlagents/trainers/exception.py


pass
class TrainerConfigWarning(Warning):
"""
Any warning related to the configuration of trainers in the ML-Agents Toolkit.
"""
pass
class CurriculumError(TrainerError):
"""
Any error related to training with a curriculum.

14
ml-agents/mlagents/trainers/learn.py


from mlagents.trainers.trainer_util import TrainerFactory, handle_existing_directories
from mlagents.trainers.stats import (
TensorboardWriter,
CSVWriter,
StatsReporter,
GaugeWriter,
ConsoleWriter,

os.path.join(run_logs_dir, "training_status.json")
)
# Configure Tensorboard Writers and StatsReporter
# Configure CSV, Tensorboard Writers and StatsReporter
# We assume reward and episode length are needed in the CSV.
csv_writer = CSVWriter(
write_path,
required_fields=[
"Environment/Cumulative Reward",
"Environment/Episode Length",
],
)
tb_writer = TensorboardWriter(
write_path, clear_past_data=not checkpoint_settings.resume
)

StatsReporter.add_writer(csv_writer)
StatsReporter.add_writer(gauge_write)
StatsReporter.add_writer(console_writer)

add_timer_metadata("mlagents_envs_version", mlagents_envs.__version__)
add_timer_metadata("communication_protocol_version", UnityEnvironment.API_VERSION)
add_timer_metadata("tensorflow_version", tf_utils.tf.__version__)
add_timer_metadata("numpy_version", np.__version__)
logger.info(f"run_seed set to {run_seed}")
run_training(run_seed, options)

12
ml-agents/mlagents/trainers/settings.py


import warnings
import attr
import cattr
from typing import Dict, Optional, List, Any, DefaultDict, Mapping, Tuple, Union

from mlagents.trainers.cli_utils import StoreConfigFile, DetectDefault, parser
from mlagents.trainers.cli_utils import load_config
from mlagents.trainers.exception import TrainerConfigError, TrainerConfigWarning
from mlagents.trainers.exception import TrainerConfigError
from mlagents_envs import logging_util
from mlagents_envs.side_channel.environment_parameters_channel import (

def _check_lesson_chain(lessons, parameter_name):
"""
Ensures that when using curriculum, all non-terminal lessons have a valid
CompletionCriteria, and that the terminal lesson does not contain a CompletionCriteria.
CompletionCriteria
"""
num_lessons = len(lessons)
for index, lesson in enumerate(lessons):

)
if index == num_lessons - 1 and lesson.completion_criteria is not None:
warnings.warn(
f"Your final lesson definition contains completion_criteria for {parameter_name}."
f"It will be ignored.",
TrainerConfigWarning,
)
@staticmethod

59
ml-agents/mlagents/trainers/stats.py


from typing import List, Dict, NamedTuple, Any, Optional
import numpy as np
import abc
import csv
import os
import time
from threading import RLock

"""
Add a generic property to the StatsWriter. This could be e.g. a Dict of hyperparameters,
a max step count, a trainer type, etc. Note that not all StatsWriters need to be compatible
with all types of properties. For instance, a TB writer doesn't need a max step.
with all types of properties. For instance, a TB writer doesn't need a max step, nor should
we write hyperparameters to the CSV.
:param category: The category that the property belongs to.
:param type: The type of property.
:param value: The property itself.

return None
class CSVWriter(StatsWriter):
def __init__(self, base_dir: str, required_fields: List[str] = None):
"""
A StatsWriter that writes to a Tensorboard summary.
:param base_dir: The directory within which to place the CSV file, which will be {base_dir}/{category}.csv.
:param required_fields: If provided, the CSV writer won't write until these fields have statistics to write for
them.
"""
# We need to keep track of the fields in the CSV, as all rows need the same fields.
self.csv_fields: Dict[str, List[str]] = {}
self.required_fields = required_fields if required_fields else []
self.base_dir: str = base_dir
def write_stats(
self, category: str, values: Dict[str, StatsSummary], step: int
) -> None:
if self._maybe_create_csv_file(category, list(values.keys())):
row = [str(step)]
# Only record the stats that showed up in the first valid row
for key in self.csv_fields[category]:
_val = values.get(key, None)
row.append(str(_val.mean) if _val else "None")
with open(self._get_filepath(category), "a") as file:
writer = csv.writer(file)
writer.writerow(row)
def _maybe_create_csv_file(self, category: str, keys: List[str]) -> bool:
"""
If no CSV file exists and the keys have the required values,
make the CSV file and write hte title row.
Returns True if there is now (or already is) a valid CSV file.
"""
if category not in self.csv_fields:
summary_dir = self.base_dir
os.makedirs(summary_dir, exist_ok=True)
# Only store if the row contains the required fields
if all(item in keys for item in self.required_fields):
self.csv_fields[category] = keys
with open(self._get_filepath(category), "w") as file:
title_row = ["Steps"]
title_row.extend(keys)
writer = csv.writer(file)
writer.writerow(title_row)
return True
return False
return True
def _get_filepath(self, category: str) -> str:
file_dir = os.path.join(self.base_dir, category + ".csv")
return file_dir
class StatsReporter:
writers: List[StatsWriter] = []
stats_dict: Dict[str, Dict[str, List]] = defaultdict(lambda: defaultdict(list))

"""
Add a generic property to the StatsReporter. This could be e.g. a Dict of hyperparameters,
a max step count, a trainer type, etc. Note that not all StatsWriters need to be compatible
with all types of properties. For instance, a TB writer doesn't need a max step.
with all types of properties. For instance, a TB writer doesn't need a max step, nor should
we write hyperparameters to the CSV.
:param key: The type of property.
:param value: The property itself.
"""

64
ml-agents/mlagents/trainers/tests/test_env_param_manager.py


import yaml
from mlagents.trainers.exception import TrainerConfigError, TrainerConfigWarning
from mlagents.trainers.exception import TrainerConfigError
from mlagents.trainers.environment_parameter_manager import EnvironmentParameterManager
from mlagents.trainers.settings import (
RunOptions,

"""
test_bad_curriculum_all_competion_criteria_config_yaml = """
environment_parameters:
param_1:
curriculum:
- name: Lesson1
completion_criteria:
measure: reward
behavior: fake_behavior
threshold: 30
min_lesson_length: 100
require_reset: true
value: 1
- name: Lesson2
completion_criteria:
measure: reward
behavior: fake_behavior
threshold: 30
min_lesson_length: 100
require_reset: true
value: 2
- name: Lesson3
completion_criteria:
measure: reward
behavior: fake_behavior
threshold: 30
min_lesson_length: 100
require_reset: true
value:
sampler_type: uniform
sampler_parameters:
min_value: 1
max_value: 3
"""
def test_curriculum_raises_all_completion_criteria_conversion():
with pytest.warns(TrainerConfigWarning):
run_options = RunOptions.from_dict(
yaml.safe_load(test_bad_curriculum_all_competion_criteria_config_yaml)
)
param_manager = EnvironmentParameterManager(
run_options.environment_parameters, 1337, False
)
assert param_manager.update_lessons(
trainer_steps={"fake_behavior": 500},
trainer_max_steps={"fake_behavior": 1000},
trainer_reward_buffer={"fake_behavior": [1000] * 101},
) == (True, True)
assert param_manager.update_lessons(
trainer_steps={"fake_behavior": 500},
trainer_max_steps={"fake_behavior": 1000},
trainer_reward_buffer={"fake_behavior": [1000] * 101},
) == (True, True)
assert param_manager.update_lessons(
trainer_steps={"fake_behavior": 500},
trainer_max_steps={"fake_behavior": 1000},
trainer_reward_buffer={"fake_behavior": [1000] * 101},
) == (False, False)
assert param_manager.get_current_lesson_number() == {"param_1": 2}
test_everything_config_yaml = """

42
ml-agents/mlagents/trainers/tests/test_stats.py


import pytest
import tempfile
import unittest
import csv
CSVWriter,
StatsSummary,
GaugeWriter,
ConsoleWriter,

tb_writer = TensorboardWriter(tmp_path, clear_past_data=True)
tb_writer.write_stats("category1", {"key1": statssummary1}, 10)
assert len(os.listdir(os.path.join(tmp_path, "category1"))) == 1
def test_csv_writer():
# Test write_stats
category = "category1"
with tempfile.TemporaryDirectory(prefix="unittest-") as base_dir:
csv_writer = CSVWriter(base_dir, required_fields=["key1", "key2"])
statssummary1 = StatsSummary(mean=1.0, std=1.0, num=1)
csv_writer.write_stats("category1", {"key1": statssummary1}, 10)
# Test that the filewriter has been created and the directory has been created.
filewriter_dir = "{basedir}/{category}.csv".format(
basedir=base_dir, category=category
)
# The required keys weren't in the stats
assert not os.path.exists(filewriter_dir)
csv_writer.write_stats(
"category1", {"key1": statssummary1, "key2": statssummary1}, 10
)
csv_writer.write_stats(
"category1", {"key1": statssummary1, "key2": statssummary1}, 20
)
# The required keys were in the stats
assert os.path.exists(filewriter_dir)
with open(filewriter_dir) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
line_count = 0
for row in csv_reader:
if line_count == 0:
assert "key1" in row
assert "key2" in row
assert "Steps" in row
line_count += 1
else:
assert len(row) == 3
line_count += 1
assert line_count == 3
def test_gauge_stat_writer_sanitize():

8
ml-agents/mlagents/trainers/trainer/rl_trainer.py


import abc
import time
import attr
from mlagents.model_serialization import SerializationSettings, copy_model_files
from mlagents.model_serialization import SerializationSettings
from mlagents.trainers.policy.checkpoint_manager import (
NNCheckpoint,
NNCheckpointManager,

logger.warning("Trainer has no policies, not saving anything.")
return
policy = list(self.policies.values())[0]
settings = SerializationSettings(policy.model_path, self.brain_name)
# Copy the checkpointed model files to the final output location
copy_model_files(model_checkpoint.file_path, f"{policy.model_path}.nn")
policy.save(policy.model_path, settings)
NNCheckpointManager.track_final_checkpoint(self.brain_name, final_checkpoint)
@abc.abstractmethod

3
ml-agents/tests/yamato/yamato_utils.py


if extra_packages:
pip_commands += extra_packages
for cmd in pip_commands:
pip_index_url = "--index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple"
f"source {venv_path}/bin/activate; python -m pip install -q {cmd} {pip_index_url}",
f"source {venv_path}/bin/activate; python -m pip install -q {cmd}",
shell=True,
)
return venv_path

1
utils/make_readme_table.py


ReleaseInfo("release_2", "1.0.2", "0.16.1", "May 20, 2020"),
ReleaseInfo("release_3", "1.1.0", "0.17.0", "June 10, 2020"),
ReleaseInfo("release_4", "1.2.0", "0.18.0", "July 15, 2020"),
ReleaseInfo("release_5", "1.2.1", "0.18.1", "July 31, 2020"),
]
MAX_DAYS = 150 # do not print releases older than this many days

3
com.unity.ml-agents.extensions/Runtime/AssemblyInfo.cs


using System.Runtime.CompilerServices;
[assembly: InternalsVisibleTo("Unity.ML-Agents.Extensions.EditorTests")]

11
com.unity.ml-agents.extensions/Runtime/AssemblyInfo.cs.meta


fileFormatVersion: 2
guid: 48c8790647c3345e19c57d6c21065112
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

8
com.unity.ml-agents/Runtime/Actuators.meta


fileFormatVersion: 2
guid: 26733e59183b6479e8f0e892a8bf09a4
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

8
com.unity.ml-agents/Tests/Editor/Actuators.meta


fileFormatVersion: 2
guid: c7e705f7d549e43c6be18ae809cd6f54
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

160
docs/images/TensorBoard-download.png

之前 之后
正在加载...
取消
保存