浏览代码

Merge remote-tracking branch 'origin/master' into MLA-1734-demo-provider

/MLA-1734-demo-provider
Chris Elion 3 年前
当前提交
49a27695
共有 226 个文件被更改,包括 7125 次插入1271 次删除
  1. 3
      .pre-commit-config.yaml
  2. 2
      .yamato/com.unity.ml-agents-test.yml
  3. 16
      DevProject/Packages/manifest.json
  4. 42
      DevProject/ProjectSettings/ProjectSettings.asset
  5. 4
      DevProject/ProjectSettings/ProjectVersion.txt
  6. 929
      Project/Assets/ML-Agents/Examples/Soccer/Prefabs/SoccerFieldTwos.prefab
  7. 935
      Project/Assets/ML-Agents/Examples/Soccer/Prefabs/StrikersVsGoalieField.prefab
  8. 22
      Project/Assets/ML-Agents/Examples/Soccer/Scenes/SoccerTwos.unity
  9. 1
      Project/Assets/ML-Agents/Examples/Soccer/Scenes/StrikersVsGoalie.unity
  10. 2
      Project/Assets/ML-Agents/Examples/Sorter/Prefabs/Area.prefab
  11. 4
      Project/Packages/manifest.json
  12. 2
      Project/ProjectSettings/ProjectVersion.txt
  13. 126
      com.unity.ml-agents.extensions/Documentation~/Grid-Sensor.md
  14. 13
      com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md
  15. 3
      com.unity.ml-agents.extensions/Runtime/Unity.ML-Agents.Extensions.asmdef
  16. 2
      com.unity.ml-agents.extensions/package.json
  17. 4
      com.unity.ml-agents/CHANGELOG.md
  18. 8
      com.unity.ml-agents/Documentation~/com.unity.ml-agents.md
  19. 10
      com.unity.ml-agents/Runtime/Academy.cs
  20. 45
      com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs
  21. 12
      com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs
  22. 10
      com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs
  23. 8
      com.unity.ml-agents/Runtime/Agent.deprecated.cs
  24. 3
      com.unity.ml-agents/Runtime/Analytics/Events.cs
  25. 1
      com.unity.ml-agents/Runtime/AssemblyInfo.cs
  26. 49
      com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
  27. 28
      com.unity.ml-agents/Runtime/Policies/BehaviorParameters.cs
  28. 2
      com.unity.ml-agents/Tests/Editor/Analytics/InferenceAnalyticsTests.cs
  29. 46
      com.unity.ml-agents/Tests/Editor/BehaviorParameterTests.cs
  30. 12
      com.unity.ml-agents/Tests/Editor/ParameterLoaderTest.cs
  31. 4
      com.unity.ml-agents/package.json
  32. 24
      docs/Installation.md
  33. 4
      docs/Learning-Environment-Design-Agents.md
  34. 2
      docs/Learning-Environment-Examples.md
  35. 2
      gym-unity/gym_unity/__init__.py
  36. 2
      ml-agents-envs/mlagents_envs/__init__.py
  37. 8
      ml-agents/mlagents/plugins/stats_writer.py
  38. 2
      ml-agents/mlagents/trainers/__init__.py
  39. 4
      ml-agents/tests/yamato/standalone_build_tests.py
  40. 11
      ml-agents/tests/yamato/yamato_utils.py
  41. 30
      .yamato/standalone-build-webgl-test.yml
  42. 2
      DevProject/DevProject.sln.DotSettings
  43. 519
      DevProject/Packages/packages-lock.json
  44. 57
      com.unity.ml-agents.extensions/Documentation~/InputActuatorComponent.md
  45. 3
      com.unity.ml-agents.extensions/Editor/Input.meta
  46. 3
      com.unity.ml-agents.extensions/Runtime/Input.meta
  47. 3
      com.unity.ml-agents.extensions/Tests/Runtime/Input.meta
  48. 35
      com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs
  49. 3
      com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs.meta
  50. 40
      com.unity.ml-agents/Tests/Editor/Actuators/ActionSpecTests.cs
  51. 3
      com.unity.ml-agents/Tests/Editor/Actuators/ActionSpecTests.cs.meta
  52. 7
      DevProject/ProjectSettings/Packages/com.unity.testtools.codecoverage/Settings.json
  53. 70
      ML-Agents-Input-Example/.gitignore
  54. 33
      ML-Agents-Input-Example/Assets/InputSystem.inputsettings.asset
  55. 8
      ML-Agents-Input-Example/Assets/InputSystem.inputsettings.asset.meta
  56. 8
      ML-Agents-Input-Example/Assets/ML-Agents.meta
  57. 8
      ML-Agents-Input-Example/Assets/ML-Agents/Examples.meta
  58. 10
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock.meta
  59. 8
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Meshes.meta
  60. 1001
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Meshes/PushBlockCourt.fbx
  61. 120
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Meshes/PushBlockCourt.fbx.meta
  62. 10
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Prefabs.meta
  63. 1001
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Prefabs/PushBlockArea.prefab
  64. 10
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Prefabs/PushBlockArea.prefab.meta
  65. 179
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/PushBlockActions.inputactions
  66. 14
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/PushBlockActions.inputactions.meta
  67. 10
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scenes.meta
  68. 1001
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scenes/PushBlock.unity
  69. 9
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scenes/PushBlock.unity.meta
  70. 10
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts.meta
  71. 26
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/GoalDetect.cs
  72. 13
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/GoalDetect.cs.meta
  73. 192
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushAgentBasic.cs
  74. 12
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushAgentBasic.cs.meta
  75. 316
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockActions.cs
  76. 11
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockActions.cs.meta
  77. 112
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockPlayerController.cs
  78. 11
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockPlayerController.cs.meta
  79. 39
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockSettings.cs
  80. 11
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockSettings.cs.meta
  81. 8
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/TFModels.meta
  82. 1001
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/TFModels/PushBlock.onnx
  83. 14
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/TFModels/PushBlock.onnx.meta
  84. 8
      ML-Agents-Input-Example/Assets/ML-Agents/Examples/SharedAssets.meta

3
.pre-commit-config.yaml


args: [--py3-plus, --py36-plus]
exclude: >
(?x)^(
.*barracuda.py|
.*_pb2.py|
.*_pb2_grpc.py
)$

args: [--assume-in-merge]
- id: check-yaml
# Won't handle the templating in yamato
exclude: \.yamato/*
exclude: \.yamato/.*
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.4.2

2
.yamato/com.unity.ml-agents-test.yml


assembly: Unity.ML-Agents
minCoveragePct: 72
- name: com.unity.ml-agents.extensions
assembly: Unity.ML-Agents.Extensions
assembly: Unity.ML-Agents.Extensions*
minCoveragePct: 75
---

16
DevProject/Packages/manifest.json


"dependencies": {
"com.unity.2d.sprite": "1.0.0",
"com.unity.2d.tilemap": "1.0.0",
"com.unity.ads": "3.4.9",
"com.unity.ads": "3.6.1",
"com.unity.ide.vscode": "1.2.1",
"com.unity.ide.vscode": "1.2.3",
"com.unity.inputsystem": "1.1.0-preview.3",
"com.unity.multiplayer-hlapi": "1.0.6",
"com.unity.multiplayer-hlapi": "1.0.8",
"com.unity.purchasing": "2.1.0",
"com.unity.test-framework": "1.1.16",
"com.unity.purchasing": "2.2.1",
"com.unity.test-framework": "1.1.20",
"com.unity.xr.legacyinputhelpers": "2.1.4",
"com.unity.xr.legacyinputhelpers": "2.1.7",
"com.unity.modules.ai": "1.0.0",
"com.unity.modules.androidjni": "1.0.0",
"com.unity.modules.animation": "1.0.0",

"registry": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates",
"testables": [
"com.unity.ml-agents",
"com.unity.ml-agents.extensions"
"com.unity.ml-agents.extensions",
"com.unity.inputsystem"
]
}

42
DevProject/ProjectSettings/ProjectSettings.asset


androidBlitType: 0
defaultIsNativeResolution: 1
macRetinaSupport: 1
runInBackground: 0
runInBackground: 1
captureSingleScreen: 0
muteOtherAudioSources: 0
Prepare IOS For Recording: 0

xboxOneMonoLoggingLevel: 0
xboxOneLoggingLevel: 1
xboxOneDisableEsram: 0
xboxOneEnableTypeOptimization: 0
xboxOnePresentImmediateThreshold: 0
switchQueueCommandMemory: 1048576
switchQueueControlMemory: 16384

switchNVNOtherPoolsGranularity: 16777216
switchNVNMaxPublicTextureIDCount: 0
switchNVNMaxPublicSamplerIDCount: 0
stadiaPresentMode: 0
stadiaTargetFramerate: 0
vulkanEnableLateAcquireNextImage: 0
m_SupportedAspectRatios:
4:3: 1
5:4: 1

useHDRDisplay: 0
D3DHDRBitDepth: 0
m_ColorGamuts: 00000000
targetPixelDensity: 0
targetPixelDensity: 30
resolutionScalingMode: 0
androidSupportedAspectRatio: 1
androidMaxAspectRatio: 2.1

StripUnusedMeshComponents: 0
VertexChannelCompressionMask: 4054
iPhoneSdkVersion: 988
iOSTargetOSVersionString:
iOSTargetOSVersionString: 10.0
tvOSTargetOSVersionString:
tvOSTargetOSVersionString: 10.0
uIPrerenderedIcon: 0
uIRequiresPersistentWiFi: 0
uIRequiresFullScreen: 1

iPhoneSplashScreen: {fileID: 0}
iPhoneHighResSplashScreen: {fileID: 0}
iPhoneTallHighResSplashScreen: {fileID: 0}
iPhone47inSplashScreen: {fileID: 0}
iPhone55inPortraitSplashScreen: {fileID: 0}
iPhone55inLandscapeSplashScreen: {fileID: 0}
iPhone58inPortraitSplashScreen: {fileID: 0}
iPhone58inLandscapeSplashScreen: {fileID: 0}
iPadPortraitSplashScreen: {fileID: 0}
iPadHighResPortraitSplashScreen: {fileID: 0}
iPadLandscapeSplashScreen: {fileID: 0}
iPadHighResLandscapeSplashScreen: {fileID: 0}
iPhone65inPortraitSplashScreen: {fileID: 0}
iPhone65inLandscapeSplashScreen: {fileID: 0}
iPhone61inPortraitSplashScreen: {fileID: 0}
iPhone61inLandscapeSplashScreen: {fileID: 0}
appleTVSplashScreen: {fileID: 0}
appleTVSplashScreen2x: {fileID: 0}
tvOSSmallIconLayers: []

metalEditorSupport: 1
metalAPIValidation: 1
iOSRenderExtraFrameOnPause: 0
iosCopyPluginsCodeInsteadOfSymlink: 0
appleDeveloperTeamID:
iOSManualSigningProvisioningProfileID:
tvOSManualSigningProvisioningProfileID:

ps4ShareFilePath:
ps4ShareOverlayImagePath:
ps4PrivacyGuardImagePath:
ps4ExtraSceSysFile:
ps4NPtitleDatPath:
ps4RemotePlayKeyAssignment: -1
ps4RemotePlayKeyMappingDir:

ps4UseResolutionFallback: 0
ps4ReprojectionSupport: 0
ps4UseAudio3dBackend: 0
ps4UseLowGarlicFragmentationMode: 1
ps4SocialScreenEnabled: 0
ps4ScriptOptimizationLevel: 2
ps4Audio3dVirtualSpeakerCount: 14

ps4disableAutoHideSplash: 0
ps4videoRecordingFeaturesUsed: 0
ps4contentSearchFeaturesUsed: 0
ps4CompatibilityPS5: 0
ps4GPU800MHz: 1
ps4attribEyeToEyeDistanceSettingVR: 0
ps4IncludedModules: []
ps4attribVROutputEnabled: 0

additionalIl2CppArgs:
scriptingRuntimeVersion: 1
gcIncremental: 0
assemblyVersionValidation: 1
gcWBarrierValidation: 0
apiCompatibilityLevelPerPlatform: {}
m_RenderingPath: 1

XboxOneCapability: []
XboxOneGameRating: {}
XboxOneIsContentPackage: 0
XboxOneEnhancedXboxCompatibilityMode: 0
XboxOneEnableGPUVariability: 1
XboxOneSockets: {}
XboxOneSplashScreen: {fileID: 0}

XboxOneOverrideIdentityName:
XboxOneOverrideIdentityPublisher:
vrEditorSettings:
daydream:
daydreamIconForeground: {fileID: 0}

projectName:
organizationId:
cloudEnabled: 0
enableNativePlatformBackendsForNewInputSystem: 0
disableOldInputManagerSupport: 0
enableNativePlatformBackendsForNewInputSystem: 1
disableOldInputManagerSupport: 1
legacyClampBlendShapeWeights: 0

4
DevProject/ProjectSettings/ProjectVersion.txt


m_EditorVersion: 2019.4.7f1
m_EditorVersionWithRevision: 2019.4.7f1 (e992b1a16e65)
m_EditorVersion: 2019.4.19f1
m_EditorVersionWithRevision: 2019.4.19f1 (ca5b14067cec)

929
Project/Assets/ML-Agents/Examples/Soccer/Prefabs/SoccerFieldTwos.prefab
文件差异内容过多而无法显示
查看文件

935
Project/Assets/ML-Agents/Examples/Soccer/Prefabs/StrikersVsGoalieField.prefab
文件差异内容过多而无法显示
查看文件

22
Project/Assets/ML-Agents/Examples/Soccer/Scenes/SoccerTwos.unity


m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 1141134673700168, guid: 54f3340298537426e96a6cc530e2d5d8, type: 3}
propertyPath: m_Name
value: SoccerFieldTwos
objectReference: {fileID: 0}
- target: {fileID: 4558743310993102, guid: 54f3340298537426e96a6cc530e2d5d8, type: 3}
propertyPath: m_LocalPosition.x
value: 0

- target: {fileID: 4558743310993102, guid: 54f3340298537426e96a6cc530e2d5d8, type: 3}
propertyPath: m_RootOrder
value: 4
objectReference: {fileID: 0}
- target: {fileID: 4558743310993102, guid: 54f3340298537426e96a6cc530e2d5d8, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4558743310993102, guid: 54f3340298537426e96a6cc530e2d5d8, type: 3}
propertyPath: m_LocalEulerAnglesHint.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4558743310993102, guid: 54f3340298537426e96a6cc530e2d5d8, type: 3}
propertyPath: m_LocalEulerAnglesHint.z
value: 0
objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: 54f3340298537426e96a6cc530e2d5d8, type: 3}

m_Name:
m_EditorClassIdentifier:
gravityMultiplier: 1
monitorVerticalOffset: 0
reuseCollisionCallbacks: 1
--- !u!114 &1574236051
MonoBehaviour:
m_ObjectHideFlags: 0

blueMaterial: {fileID: 2100000, guid: c9fa44c2c3f8ce74ca39a3355ea42631, type: 2}
randomizePlayersTeamForTraining: 0
agentRunSpeed: 2
strikerPunish: -0.1
strikerReward: 1
goaliePunish: -1
goalieReward: 0.1
--- !u!1001 &1606160104
PrefabInstance:
m_ObjectHideFlags: 0

1
Project/Assets/ML-Agents/Examples/Soccer/Scenes/StrikersVsGoalie.unity


maximumDeltaTime: 0.33333334
solverIterations: 6
solverVelocityIterations: 1
reuseCollisionCallbacks: 1
--- !u!114 &1574236051
MonoBehaviour:
m_ObjectHideFlags: 0

2
Project/Assets/ML-Agents/Examples/Sorter/Prefabs/Area.prefab


VectorActionSpaceType: 0
hasUpgradedBrainParametersWithActionSpec: 1
m_Model: {fileID: 11400000, guid: a2b17bcb0df4d448893e800c34d87c4c, type: 3}
m_InferenceDevice: 0
m_InferenceDevice: 2
m_BehaviorType: 0
m_BehaviorName: Sorter
TeamId: 0

4
Project/Packages/manifest.json


"com.unity.collab-proxy": "1.2.15",
"com.unity.ml-agents": "file:../../com.unity.ml-agents",
"com.unity.ml-agents.extensions": "file:../../com.unity.ml-agents.extensions",
"com.unity.package-manager-ui": "2.0.8",
"com.unity.purchasing": "2.0.3",
"com.unity.package-manager-ui": "2.0.13",
"com.unity.purchasing": "2.2.1",
"com.unity.textmeshpro": "1.4.1",
"com.unity.modules.ai": "1.0.0",
"com.unity.modules.animation": "1.0.0",

2
Project/ProjectSettings/ProjectVersion.txt


m_EditorVersion: 2018.4.24f1
m_EditorVersion: 2018.4.32f1

126
com.unity.ml-agents.extensions/Documentation~/Grid-Sensor.md


# Summary
The Grid Sensor combines the generality of data extraction from Raycasts with the image processing power of Convolutional Neural Networks. The Grid Sensor can be used to collect data in the general form of a "Width x Height x Channel" matrix which can be used for training Reinforcement Learning agents or for data analysis.
The Grid Sensor is an alternative method for collecting observations which combines the generality of data extraction from Raycasts with the image processing power of Convolutional Neural Networks. The Grid Sensor can be used to collect data in the general form of a "Width x Height x Channel" matrix which can be used for training agent policies or for data analysis.
In MLAgents there are 2 main sensors for observing information that is "physically" around the agent.
In ML-Agents there are two main sensors for observing information that is "physically" around the agent.
This is simple to implement and provides enough information for most simple games. When few are used, they are computationally fast. However, there are multiple limiting factors:
* The rays need to be at the same height as the things the agent should observe
* Objects can remain hidden by line of sight and if the knowledge of those objects is crucial to the success of the agent, then this limitation must be compensated for by the agents networks capacity (i.e., need a bigger brain with memory)
Raycasts are simple to implement and provides enough information for most simple games. When few are used, they are also computationally lightweight. However, there are multiple limiting factors:
* The rays need to be at the same height as the things the agent should observe.
* Objects can remain hidden by line of sight and if the knowledge of those objects is crucial to the success of the agent, then this limitation must be compensated for by the agents networks capacity (i.e., need a bigger brain with memory).
* Typically the length of the raycasts is limited because the agent need not know about objects that are at the other side of the level. Combined with few raycasts for computational efficiency, this means that an agent may not observe objects that fall between these rays and the issue becomes worse as the objects reduce in size.
* Typically, the length of the raycasts is limited because the agent need not know about objects that are at the other side of the level. Combined with few raycasts for computational efficiency, this means that an agent may not observe objects that fall between these rays and the issue becomes worse as the objects reduce in size.
The Camera provides the agent with either a grayscale or an RGB image of the game environment. It goes without saying that there non-linear relationships between nearby pixels in an image. It is this intuition that helps form the basis of Convolutional Neural Networks (CNNs) and established the literature of designing networks that take advantage of these relationships between pixels. Following this established literature of CNNs on image based data, the MLAgent's Camera Sensor provides a means by which the agent can include high dimensional inputs (images) into its observation stream.
The Camera provides the agent with either a grayscale or an RGB image of the game environment. In many cases, what we want to extract from a set of pixels is invariant to the location of those pixels in the image. It is this intuition that helps form the basis of Convolutional Neural Networks (CNNs) and established the literature of designing networks that take advantage of these relationships between pixels. Following this established literature of CNNs on image based data, the ML-Agent's Camera Sensor provides a means by which the agent can include high dimensional inputs (images) into its observation stream.
* It requires render the scene and thus is computationally slower than alternatives that do not use rendering
* It has yet been shown that the Camera Sensor can be used on a headless machine which means it is not yet possible (if at all) to train an agent on a headless infrastructure.
* It requires rendering the scene and thus is computationally slower than alternatives that do not use rendering.
* The RGB of the camera only provides a maximum of 3 channels to the agent.
* The RGB of the camera only provides a maximum of three channels to the agent.
An image can be thought of as a matrix of a predefined width (W) and a height (H) and each pixel can be thought of as simply an array of length 3 (in the case of RGB), `[Red, Green, Blue]` holding the different channel information of the color (channel) intensities at that pixel location. Thus an image is just a 3 dimensional matrix of size WxHx3. A Grid Observation can be thought of as a generalization of this setup where in place of a pixel there is a "cell" which is an array of length N representing different channel intensities at that cell position. From a Convolutional Neural Network point of view, the introduction of multiple channels in an "image" isn't a new concept. One such example is using an RGB-Depth image which is used in several robotics applications. The distinction of Grid Observations is what the data within the channels represents. Instead of limiting the channels to color intensities, the channels within a cell of a Grid Observation generalize to any data that can be represented by a single number (float or int).
Before jumping into the details of the Grid Sensor, an important thing to note is the agent performance and qualitatively different behavior over raycasts. Unity MLAgent's comes with a suite of example environments. One in particular, the [Food Collector](https://github.com/Unity-Technologies/ml-agents/tree/release_12_docs/docs/Learning-Environment-Examples.md#food-collector), has been the focus of the Grid Sensor development.
## Overview
The Food Collector environment can be described as:
* Set-up: A multi-agent environment where agents compete to collect food.
* Goal: The agents must learn to collect as many green food spheres as possible while avoiding red spheres.
* Agents: The environment contains 5 agents with same Behavior Parameters.
There are three main phases to the observation process of the Grid Sensor:
When applying the Grid Sensor to this environment, in place of the Raycast Vector Sensor or the Camera Sensor, a Mean Reward of 40-50 is observed. This performance is on par with what is seen by agents trained with RayCasts but the side-by-side comparison of trained agents, shows a qualitative difference in behavior. A deeper study and interpretation of the qualitative differences between agents trained with Raycasts and Vector Sensors verses Grid Sensors is left to future studies.
<img src="images/gridobs-vs-vectorobs.gif" align="middle" width="3000"/>
## Overview
There are 3 main phases to the Grid Sensor:
1. **Collection** - data is extracted from observed objects
2. **Encoding** - the extracted data is encoded into a grid observation
3. **Communication** - the grid observation is sent to python or used by a trained model

## Collection
A Grid Sensor is the Grid Observation analog of a Unity Camera but with some notable differences. The sensor is made up of a grid of identical Box Colliders which designate the "cells" of the grid. The Grid Sensor also has a list of "detectable objects" in the form of Unity gameobject tags. When an object that is tagged as a detectable object is present within a cell's Box Collider, that cell is "activated" and a method on the Grid Sensor extracts data from said object and associates that data with the position of the activated cell. Thus the Grid Sensor is always orthographic:
A Grid Sensor is the Grid Observation analog of a Unity Camera but with some notable differences. The sensor is made up of a grid of identical Box Colliders which designate the "cells" of the grid. The Grid Sensor also has a list of "detectable objects" in the form of Unity GameObject tags. When an object that is tagged as a detectable object is present within a cell's Box Collider, that cell is "activated" and a method on the Grid Sensor extracts data from said object and associates that data with the position of the activated cell. Thus the Grid Sensor is always orthographic:
<img src="images/persp_ortho_proj.png" width="500">
<cite><a href="https://www.geofx.com/graphics/nehe-three-js/lessons17-24/lesson21/lesson21.html">geofx.com</a></cite>

Just like the Raycasts mentioned earlier, the Grid Sensor can extract any kind of data from a detected object and just like the Camera, the Grid Sensor maintains the spacial relationship between nearby cells that allows one to take advantage of the CNN literature. Thus the Grid Sensor tries to take the best of both sensors and combines them to something that is more expressive.
Just like the Raycasts mentioned earlier, the Grid Sensor can extract any kind of data from a detected object, and just like the Camera, the Grid Sensor maintains the spacial relationship between nearby cells that allows one to take advantage of the computational properties of CNNs. Thus the Grid Sensor tries to take the best of both sensors and combines them to something that is more expressive.
Lets imagine a scenario where an agent is faced with 2 enemies and there are 2 "equipable" weapons somewhat behind the agent. Lets also keep in mind some important properties of the enemies and weapons that would be useful for the agent to know. For simplicity, lets assume enemies represent their health as a percentage (0-100%). Lets also assume that enemies and weapons are the only 2 kind of objects that the agent would see in the entire game.
Let's imagine a scenario where an agent is faced with two enemies and there are two "equipable" weapons somewhat behind the agent. It would be helpful for the agent to know the location and properties of both the enemies as well as the equippable items. For simplicity, let's assume enemies represent their health as a percentage (0-100%). Also assume that enemies and weapons are the only two kinds of objects that the agent would see in the entire game.
<img src="images/gridsensor-example.png" align="middle" width="3000"/>
<img src="images/gridsensor-example.png" align="middle" width="512"/>
If a raycast hits an object, not only could we get the distance (normalized by the maximum raycast distance) we would be able to extract its type (enemy vs weapon) and if its an enemy then we could get its health (e.g., .6).
If a raycast hits an object, not only could we get the distance (normalized by the maximum raycast distance) we would be able to extract its type (enemy vs weapon) and any attribute associate with it (e.g. an enemy's health).
There are many ways in which one could encode this information but one reasonable encoding is this:
```

For example, if the raycast hit nothing then this would be represented by `[0, 0, 0, 1]`.
If instead the raycast hit an enemy with 60% health that is 50% of the maximum raycast distance, the data would be represented by `[0, 1, .6, .5]`.
The limitations of raycasts which were presented above are easy to visualize in the below image. The agent is unable to see where the weapons are and only sees one of the enemies. Typically in the MLAgents examples, this situation is mitigated by including previous frames of data so that the agent observes changes through time. However, in more complex games, it is not difficult to imagine scenarios where an agent would not be able to observe important information using only Raycasts.
The limitations of raycasts which were presented above are easy to visualize in the below image. The agent is unable to see where the weapons are and only sees one of the enemies. Typically in the ML-Agents examples, this situation is mitigated by including previous frames of data so that the agent observes changes through time. However, in more complex games, it is not difficult to imagine scenarios where an agent might miss important information using only Raycasts.
<img src="images/gridsensor-example-raycast.png" align="middle" width="3000"/>
<img src="images/gridsensor-example-raycast.png" align="middle" width="512"/>
<img src="images/gridsensor-example-camera.png" align="middle" width="3000"/>
<img src="images/gridsensor-example-camera.png" align="middle" width="512"/>
#### Grid Sensor

Following the same data extraction method presented in the section on raycasts, if a Grid Sensor was used instead of Raycasts or a Camera, then not only would the agent be able to extract the health value of the enemies but it would also be able to encode the relative positions of those objects as is done with Camera. Additionally, as the texture of the objects is not used, this data can be collected without rendering the scene.
<img src="images/gridsensor-example-gridsensor.png" align="middle" width="3000"/>
In our example, we can collect data in the form of [objectType, health] by overriding `GetObjectData` as the following:
```csharp
protected override float[] GetObjectData(GameObject currentColliderGo, float type_index, float normalized_distance)
{
float[] channelValues = new float[ChannelDepth.Length]; // ChannelDepth.Length = 2 in this example
channelValues[0] = type_index; // this is the observation collected in default implementation
if (currentColliderGo.tag == "enemy")
{
var enemy = currentColliderGo.GetComponent<EnemyClass>();
channelValues[1] = enemy.health; // the value may have to be normalized depends on the type of GridSensor encoding you use (see sections below)
}
return channelValues;
}
```
At the end of the Collection phase, each cell with an object inside of it has `GetObjectData` called and the returned values (named `channelValues`) is then processed in the Encoding phase which is described in the next section.
<img src="images/gridsensor-example-gridsensor.png" align="middle" width="512"/>
At the end of the Collection phase, each cell with an object inside of it has `GetObjectData` called and the returned values is then processed in the Encoding phase which is described in the next section.
The CountingGridSensor builds on the GridSesnor to perform the specific job of counting the number of object types that are based on the different detectable object tags. The encoding and is meant to exploit a key feature of the Grid Sensor. In both the Channel and the Channel Hot DepthTypes, the closest detectable object, in relation to the agent, that lays within a cell is used for encoding the value for that cell. In the CountingGridSensor, the number of each type of object is recorded and then normalized according to a max count, stored in the ChannelDepth.
The CountingGridSensor builds on the GridSensor to perform the specific job of counting the number of object types that are based on the different detectable object tags. The encoding is meant to exploit a key feature of the GridSensor. In original GridSensor, only the closest detectable object, in relation to the agent, that lies within a cell is used for encoding the value for that cell. In the CountingGridSensor, the number of each type of object is recorded and then normalized according to a max count.
An example of the CountingGridSensor can be found below.

In order to support different ways of representing the data extracted from an object, multiple "depth types" were implemented. Each has pros and cons and, depending on the use-case of the Grid Sensor, one may be more beneficial than the others.
The data stored that is extracted during the *Collection* phase, and stored in `channelValues`, may come from different sources. For instance, going back the Enemy/Weapon example in the previous section, an enemy's health is continuous whereas the object type (enemy or weapon) is categorical data. This distinction is important as categorical data requires a different encoding mechanism than continuous data.
The stored data that is extracted during the *Collection* phase may come from different sources, and thus be of a different nature. For instance, going back to the Enemy/Weapon example in the previous section, an enemy's health is continuous whereas the object type (enemy or weapon) is categorical data. This distinction is important as categorical data requires a different encoding mechanism than continuous data.
The Grid Sensor handles this distinction with 4 properties that define how this data is to be encoded:
* DepthType - Enum signifying the encoding mode: Channel, ChannelHot
* ObservationPerCell - the total number of values that are in each cell of the grid observation
* ChannelDepth - int[] describing the range of each data within the `channelValues`
* ChannelOffset - int[] describing the number of encoded values that come before each data within `channelValues`
The GridSensor handles this distinction with two user defined properties that define how this data is to be encoded:
The ChannelDepth and the DepthType are user defined and gives control to the developer to how they can encode their data. The ChannelDepth and ChannelOffset are both initialized and used in different ways depending on the ChannelDepth and the DepthType.
* DepthType - Enum signifying the encoding mode: Channel, ChannelHot
* ChannelDepth - `int[]` describing the range of each data and is used differently with different DepthType
How categorical and continuous data is treated is different between the different DepthTypes as will be explored in the sections below. The sections will use an on-going example similar to example mentioned earlier where, within a cell, the sensor observes: `an enemy with 60% health`. Thus the cell contains 2 kinds of data: categorical data (object type) and the continuous data (health). Additionally, the order of the observed tags is important as it allows one to encode the tag of the observed object by its index within list of observed tags. Note that in the example, the observed tags is defined as ["weapon", "enemy"].
How categorical and continuous data is treated is different between the different DepthTypes as will be explored in the sections below. The sections will use an on-going example similar to the example mentioned earlier where, within a cell, the sensor observes: `an enemy with 60% health`. Thus the cell contains two kinds of data: categorical data (object type) and the continuous data (health). Additionally, the order of the observed tags is important as it allows one to encode the tag of the observed object by its index within the list of observed tags. Note that in the example, the observed tags is defined as ["weapon", "enemy"].
The Channel Based Grid Observations is perhaps the simplest in terms of usability and similarity with other machine learning applications. Each grid is of size WxHxC where C is the number of channels. To distinguish between categorical and continuous data, one would use the ChannelDepth array to signify the ranges that the values in the `channelValues` array could take. If one sets ChannelDepth[i] to be 1, it is assumed that the value of `channelValues[i]` is already normalized. Else ChannelDepth[i] represents the total number of possible values that `channelValues[i]` can take.
The Channel Based Grid Observations represent obsevations in a normalized form with 0 to 1. To distinguish between categorical and continuous data, one would use the ChannelDepth array to signify the ranges that the values in the `channelValues` array could take. If one sets ChannelDepth[i] to be 1, it is assumed that the value of `channelValues[i]` is already normalized. Else ChannelDepth[i] represents the total number of possible values that `channelValues[i]` can take and will be used for normalization.
Using the example described earlier, if one was using Channel Based Grid Observations, they would have a ChannelDepth = {2, 1} to describe that there are two possible values for the first channel and the 1 represents that the second channel is already normalized.
As the "enemy" is in the second position of the observed tags, its value can be normalized by:
For continuous data, you should specify `ChannelDepth[i]` to 1 and the collected data should be already normalized by its min/max range. For discrete data, you should specify `ChannelDepth[i]` to be the total number of possible values, and the collected data should be an integer value within range of `ChannelDepth[i]`.
Using the example described earlier, if one was using Channel Based Grid Observations, they would have a ChannelDepth = {2, 1} to describe that there are two possible values for the first channel (ObjectType) and the 1 represents that the second channel (EnemyHealth) is continuous and should be already normalized.
For ObjectType, "weapon", "enemy" will be represented respectively as:
num = detectableObjects.IndexOfTag("enemy")/ChannelDepth[0] = 2/2 = 1;
weapon = DetectableObjects.IndexOfTag("weapon")/ChannelDepth[0] = 1/2 = 0.5;
enemy = DetectableObjects.IndexOfTag("enemy")/ChannelDepth[0] = 2/2 = 1;
`[1, .6]`
`[1, .6]`. If the health in the game is not represented in a normalized form, for example if the health is represented in an integer ranging from -100 to 100, you'll need to manully nomalize it during collection. That is, If you get value 50, you need to normalize it by `50/(100- (-100))=0.25` and collect 0.25 instead of 50.
The Channel Hot DepthType generalizes the classic OneHot encoding to differentiate combinations of different data. Rather than normalizing the data like in the Channel Based section, each element of `channelValues` is represented by an encoding based on the ChannelDepth. If ChannelDepth[i] = 1, then this represents that `channelValues[i]` is already normalized (between 0-1) and will be used directly within the encoding. However if ChannelDepth[i] is an integer greater than 1, then the value in `channelValues[i]` will be converted into a OneHot encoding based on the following:
The Channel Hot DepthType generalizes the classic OneHot encoding to differentiate combinations of different data. Rather than normalizing the data like in the Channel Based section, each element of `channelValues` is represented by an encoding based on the ChannelDepth. If ChannelDepth[i] = 1, then this represents that `channelValues[i]` is already normalized (between 0-1) and will be used directly within the encoding which is same as with Channel Based. However if ChannelDepth[i] is an integer greater than 1, then the value in `channelValues[i]` will be converted into a OneHot encoding based on the following:
```
float[] arr = new float[ChannelDepth[i] + 1];

The encoding of each channel is then concatenated together. Clearly using this setup allows the developer to be able to encode values using the classic OneHot encoding. Below are some different variations of the ChannelDepth which create different encodings of the example:
##### ChannelDepth = {3, 1}
The first element, 3, signifies that there are 3 possibilities for the first channel and as the "enemy" is 2nd in the detected objects list, the "enemy" in the example is encoded as `[0, 0, 1]` where the first index represents "no object". The second element, 1, signifies that the health is already normalized and, following the table, is used directly. The resulting encoding is thus:
The first element, 3, signifies that there are three possibilities for the first channel and as the "enemy" is 2nd in the detected objects list, the "enemy" in the example is encoded as `[0, 0, 1]` where the first index represents "no object". The second element, 1, signifies that the health is already normalized and, following the table, is used directly. The resulting encoding is thus:
```
[0, 0, 1, 0.6]
```

### CountingGridSensor
As introduced above, the CountingGridSensor inherits from the GridSensor for the sole purpose of counting the different objects that lay within a cell. In order to normalize the counts so that the grid can be properly encoded as PNG, the ChannelDepth is used to represent the "maximum count" of each type. For the working example, if the ChannelDepth is set as {50, 10}, which represents that the maximum count for objects with the "weapon" and "enemy" tag is 50 and 10, respectively, then the resulting data would be:
As mentioned above, the CountingGridSensor inherits from the GridSensor for the sole purpose of counting the different objects that lay within a cell. In order to normalize the counts so that the grid can be properly encoded as PNG, the ChannelDepth is used to represent the "maximum count" of each type. For the working example, if the ChannelDepth is set as {50, 10}, which represents that the maximum count for objects with the "weapon" and "enemy" tag is 50 and 10, respectively, then the resulting data would be:
```
encoding = [0 weapons/ 50 weapons, 1 enemy / 10 enemies] = [0, .1]
```

At the end of the Encoding phase, all of the data for a Grid Observation is placed into a float[] referred to as the perception buffer. Now the data is ready to be sent to either the python side for training or to be used by a trained model within Unity. This is where the Grid Sensor takes advantage of 2D textures and the PNG encoding schema to reduce the number of bytes that are being sent.
At the end of the Encoding phase, all the Grid Observations will be sent to either the python side for training or to be used by a trained model within Unity. Since the data format is similar to images collected by Camera Sensors, Grid Observations also have the CompressionType option to specify whether to send the data directly or send in PNG compressed form for better communication efficiency.
The 2D texture is a Unity class that encodes the colors of an image. It is used for many ways through out Unity but it has 2 specific methods that the Grid Sensor takes advantage of:
`SetPixels` takes a 2D array of Colors and assigns the color values to the texture.
`EncodeToPNG` returns a byte[] containing the PNG encoding of the colors of the texture.
Together these 2 functions allow one to "push" a WxHx3 normalized array to a PNG byte[]. And indeed, this is how the Camera Sensor in Unity MLAgents sends its data to python. However, the grid sensor can have N channels so there needs to be a more generic way to send the data.
The core idea behind how a Grid Observation is encoded is the following:
1. split the channels of a Grid Observation into groups of 3
2. encode each of these groups as a PNG byte[]
3. concatenate all byte[] and send the combined array to python
4. reconstruct the Grid Observation by splitting up the array and decoding the sections
Once the bytes are sent to python, they are then decoded and used as a tensor of the correct shape within the mlagents python codebase.
Once the bytes are sent to Python, they are then decoded and provided as a tensor of the correct shape.

13
com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md


* [Match-3 sensor and actuator](Match3.md)
* [Grid-based sensor](Grid-Sensor.md)
* Physics-based sensors
* [Input System Package Integration](InputActuatorComponent.md)
## Installation
The ML-Agents Extensions package is not currently available in the Package Manager. There are two

### Github via Package Manager
In Unity 2019.4 or later, open the Package Manager, hit the "+" button, and select "Add package from git URL".
![Package Manager git URL](https://github.com/Unity-Technologies/ml-agents/tree/release_12_docs/docs/images/unity_package_manager_git_url.png)
![Package Manager git URL](https://github.com/Unity-Technologies/ml-agents/blob/release_12_docs/docs/images/unity_package_manager_git_url.png)
In the dialog that appears, enter
```
git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents.extensions

This version of the Unity ML-Agents Extensions package is compatible with the
following versions of the Unity Editor:
- 2018.4 and later
- If using the `InputActuatorComponent`
- 2019.4 or later
- install the `com.unity.inputsystem` package version `1.1.0-preview.3` or later.
- Else 2018.4 and later
none
- For the `InputActuatorComponent`
- Limited implementation of `InputControls`
- No way to customize the action space of the `InputActuatorComponent`
## Need Help?
The main [README](https://github.com/Unity-Technologies/ml-agents/tree/release_12_docs/README.md) contains links for contacting the team or getting support.

3
com.unity.ml-agents.extensions/Runtime/Unity.ML-Agents.Extensions.asmdef


"name": "Unity.ML-Agents.Extensions",
"references": [
"Unity.Barracuda",
"Unity.ML-Agents"
"Unity.ML-Agents",
"Unity.ML-Agents.Extensions.Input"
],
"includePlatforms": [],
"excludePlatforms": []

2
com.unity.ml-agents.extensions/package.json


"unity": "2018.4",
"description": "A source-only package for new features based on ML-Agents",
"dependencies": {
"com.unity.ml-agents": "1.7.2-preview"
"com.unity.ml-agents": "1.8.0-preview"
}
}

4
com.unity.ml-agents/CHANGELOG.md


- `InferenceDevice.Burst` was added, indicating that Agent's model will be run using Barracuda's Burst backend.
This is the default for new Agents, but existing ones that use `InferenceDevice.CPU` should update to
`InferenceDevice.Burst`. (#4925)
- Add an InputActuatorComponent to allow the generation of Agent action spaces from an InputActionAsset.
Projects wanting to use this feature will need to add the
[Input System Package](https://docs.unity3d.com/Packages/com.unity.inputsystem@1.1/manual/index.html)
at version 1.1.0-preview.3 or later. (#4881)
#### ml-agents / ml-agents-envs / gym-unity (Python)
- Tensorboard now logs the Environment Reward as both a scalar and a histogram. (#4878)

8
com.unity.ml-agents/Documentation~/com.unity.ml-agents.md


# About ML-Agents package (`com.unity.ml-agents`)
The Unity ML-Agents package contains the C# SDK for the [Unity ML-Agents
The _ML-Agents_ package contains the primary C# SDK for the [Unity ML-Agents
Toolkit].
The package allows you to convert any Unity scene to into a learning environment

instrumenting a Unity scene, setting it up for training, and then embedding the
trained model back into your Unity scene. The machine learning algorithms that
orchestrate training are part of the companion [Python package].
Note that we also provide an _ML-Agents Extensions_ package
(`com.unity.ml-agents.extensions`) that contains early/experimental features
that you may find useful. This package is only available from the [ML-Agents
GitHub repo].
## Package contents

[execution order of event functions]: https://docs.unity3d.com/Manual/ExecutionOrder.html
[connect with us]: https://github.com/Unity-Technologies/ml-agents#community-and-feedback
[ml-agents forum]: https://forum.unity.com/forums/ml-agents.453/
[ML-Agents GitHub repo]: https://github.com/Unity-Technologies/ml-agents/blob/release_12_docs/com.unity.ml-agents.extensions

10
com.unity.ml-agents/Runtime/Academy.cs


/// Unity package version of com.unity.ml-agents.
/// This must match the version string in package.json and is checked in a unit test.
/// </summary>
internal const string k_PackageVersion = "1.7.2-preview";
internal const string k_PackageVersion = "1.8.0-preview";
const int k_EditorTrainingPort = 5004;

var port = ReadPortFromArgs();
if (port > 0)
{
Communicator = new RpcCommunicator(
new CommunicatorInitParameters
{
port = port
}
);
Communicator = CommunicatorFactory.Create();
}
if (Communicator != null)

bool initSuccessful = false;
var communicatorInitParams = new CommunicatorInitParameters
{
port = port,
unityCommunicationVersion = k_ApiVersion,
unityPackageVersion = k_PackageVersion,
name = "AcademySingleton",

45
com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs


using System;
using System.Collections.Generic;
using Unity.MLAgents.Policies;
using UnityEngine;
namespace Unity.MLAgents.Actuators

public ActionSpec(int numContinuousActions = 0, int[] discreteBranchSizes = null)
{
m_NumContinuousActions = numContinuousActions;
BranchSizes = discreteBranchSizes;
BranchSizes = discreteBranchSizes ?? Array.Empty<int>();
}
/// <summary>

"ActionSpecs must be all continuous or all discrete."
);
}
}
/// <summary>
/// Combines a list of actions specs and allocates a new array of branch sizes if needed.
/// </summary>
/// <param name="specs">The list of action specs to combine.</param>
/// <returns>An ActionSpec which represents the aggregate of the ActionSpecs passed in.</returns>
public static ActionSpec Combine(params ActionSpec[] specs)
{
var numContinuous = 0;
var numDiscrete = 0;
for (var i = 0; i < specs.Length; i++)
{
var spec = specs[i];
numContinuous += spec.NumContinuousActions;
numDiscrete += spec.NumDiscreteActions;
}
if (numDiscrete <= 0)
{
return MakeContinuous(numContinuous);
}
var branchSizes = new int[numDiscrete];
var offset = 0;
for (var i = 0; i < specs.Length; i++)
{
var spec = specs[i];
if (spec.BranchSizes.Length == 0)
{
continue;
}
var branchSizesLength = spec.BranchSizes.Length;
Array.Copy(spec.BranchSizes,
0,
branchSizes,
offset,
branchSizesLength);
offset += branchSizesLength;
}
return new ActionSpec(numContinuous, branchSizes);
}
}
}

12
com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs


using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Profiling;
namespace Unity.MLAgents.Actuators
{

/// actions for the IActuators in this list.</param>
public void UpdateActions(ActionBuffers actions)
{
Profiler.BeginSample("ActuatorManager.UpdateActions");
Profiler.EndSample();
}
static void UpdateActionArray<T>(ActionSegment<T> sourceActionBuffer, ActionSegment<T> destination)

/// </summary>
public void ApplyHeuristic(in ActionBuffers actionBuffersOut)
{
Profiler.BeginSample("ActuatorManager.ApplyHeuristic");
var continuousStart = 0;
var discreteStart = 0;
for (var i = 0; i < m_Actuators.Count; i++)

continuousStart += numContinuousActions;
discreteStart += numDiscreteActions;
}
Profiler.EndSample();
}
/// <summary>

/// </summary>
public void ExecuteActions()
{
Profiler.BeginSample("ActuatorManager.ExecuteActions");
ReadyActuatorsForExecution();
var continuousStart = 0;
var discreteStart = 0;

var numContinuousActions = actuator.ActionSpec.NumContinuousActions;
var numDiscreteActions = actuator.ActionSpec.NumDiscreteActions;
if (numContinuousActions == 0 && numDiscreteActions == 0)
{
continue;
}
var continuousActions = ActionSegment<float>.Empty;
if (numContinuousActions > 0)
{

continuousStart += numContinuousActions;
discreteStart += numDiscreteActions;
}
Profiler.EndSample();
}
/// <summary>

10
com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs


using UnityEngine.Profiling;
namespace Unity.MLAgents.Actuators
{
/// <summary>

/// <inheritdoc />
public void OnActionReceived(ActionBuffers actionBuffers)
{
ActionBuffers = actionBuffers;
m_ActionReceiver.OnActionReceived(ActionBuffers);
Profiler.BeginSample("VectorActuator.OnActionReceived");
m_ActionBuffers = actionBuffers;
m_ActionReceiver.OnActionReceived(m_ActionBuffers);
Profiler.EndSample();
Profiler.BeginSample("VectorActuator.Heuristic");
Profiler.EndSample();
}
/// <inheritdoc />

8
com.unity.ml-agents/Runtime/Agent.deprecated.cs


using System;
using UnityEngine;
using UnityEngine.Profiling;
namespace Unity.MLAgents
{

[Obsolete("GetAction has been deprecated, please use GetStoredActionBuffers instead.")]
public float[] GetAction()
{
Profiler.BeginSample("Agent.GetAction.Deprecated");
var actionSpec = m_PolicyFactory.BrainParameters.ActionSpec;
// For continuous and discrete actions together, this shouldn't be called because we can only return one.
if (actionSpec.NumContinuousActions > 0 && actionSpec.NumDiscreteActions > 0)

{
return storedAction.ContinuousActions.Array;
}
else
{
return Array.ConvertAll(storedAction.DiscreteActions.Array, x => (float)x);
}
Profiler.EndSample();
return Array.ConvertAll(storedAction.DiscreteActions.Array, x => (float)x);
}
}
}

3
com.unity.ml-agents/Runtime/Analytics/Events.cs


public static EventObservationSpec FromSensor(ISensor sensor)
{
var shape = sensor.GetObservationShape();
var dimProps = (sensor as IDimensionPropertiesSensor)?.GetDimensionProperties();
// TODO copy flags when we have them
dimInfos[i].Flags = dimProps != null ? (int)dimProps[i] : 0;
}
var builtInSensorType =

1
com.unity.ml-agents/Runtime/AssemblyInfo.cs


[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor.Tests")]
[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor")]
[assembly: InternalsVisibleTo("Unity.ML-Agents.Extensions")]
[assembly: InternalsVisibleTo("Unity.ML-Agents.Extensions.Input")]

49
com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs


# if UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX
#if UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX
#endif
#if UNITY_EDITOR
using UnityEditor;
#endif

Dictionary<string, ActionSpec> m_UnsentBrainKeys = new Dictionary<string, ActionSpec>();
#if UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX
#endif
/// The communicator parameters sent at construction
CommunicatorInitParameters m_CommunicatorInitParameters;
/// <param name="communicatorInitParameters">Communicator parameters.</param>
public RpcCommunicator(CommunicatorInitParameters communicatorInitParameters)
public RpcCommunicator()
m_CommunicatorInitParameters = communicatorInitParameters;
#region Initialization
#region Initialization
internal static bool CheckCommunicationVersionsAreCompatible(
string unityCommunicationVersion,

try
{
initializationInput = Initialize(
initParameters.port,
new UnityOutputProto
{
RlInitializationOutput = academyParameters

SendCommandEvent(rlInput.Command);
}
UnityInputProto Initialize(UnityOutputProto unityOutput, out UnityInputProto unityInput)
UnityInputProto Initialize(int port, UnityOutputProto unityOutput, out UnityInputProto unityInput)
#if UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX
var channel = new Channel(
"localhost:" + m_CommunicatorInitParameters.port,
ChannelCredentials.Insecure);
var channel = new Channel($"localhost:{port}", ChannelCredentials.Insecure);
m_Client = new UnityToExternalProto.UnityToExternalProtoClient(channel);
var result = m_Client.Exchange(WrapMessage(unityOutput, 200));

QuitCommandReceived?.Invoke();
}
return result.UnityInput;
#else
throw new UnityAgentsException("You cannot perform training on this platform.");
#endif
#endregion
#endregion
#region Destruction
#region Destruction
/// <summary>
/// Close the communicator gracefully on both sides of the communication.

#if UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX
if (!m_IsOpen)
{
return;

{
// ignored
}
#else
throw new UnityAgentsException(
"You cannot perform training on this platform.");
#endif
#endregion
#endregion
#region Sending Events
#region Sending Events
void SendCommandEvent(CommandProto command)
{

}
}
#endregion
#endregion
#region Sending and retreiving data
#region Sending and retreiving data
public void DecideBatch()
{

/// <param name="unityOutput">The UnityOutput to be sent.</param>
UnityInputProto Exchange(UnityOutputProto unityOutput)
{
#if UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX
if (!m_IsOpen)
{
return null;

QuitCommandReceived?.Invoke();
return null;
}
#else
throw new UnityAgentsException(
"You cannot perform training on this platform.");
#endif
}
/// <summary>

}
}
#endregion
#endregion
#if UNITY_EDITOR
/// <summary>

#endif
}
}
#endif // UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX

28
com.unity.ml-agents/Runtime/Policies/BehaviorParameters.cs


[HideInInspector, SerializeField]
BrainParameters m_BrainParameters = new BrainParameters();
public delegate void PolicyUpdated(bool isInHeuristicMode);
internal event PolicyUpdated OnPolicyUpdated;
/// <summary>
/// The associated <see cref="Policies.BrainParameters"/> for this behavior.
/// </summary>

get { return m_BehaviorName + "?team=" + TeamId; }
}
void Awake()
{
OnPolicyUpdated += mode => { };
}
internal IPolicy GeneratePolicy(ActionSpec actionSpec, ActuatorManager actuatorManager)
{
switch (m_BehaviorType)

}
}
/// <summary>
/// Query the behavior parameters in order to see if the Agent is running in Heuristic Mode.
/// </summary>
/// <returns>true if the Agent is running in Heuristic mode.</returns>
public bool IsInHeuristicMode()
{
if (BehaviorType == BehaviorType.HeuristicOnly)
{
return true;
}
return BehaviorType == BehaviorType.Default &&
ReferenceEquals(Model, null) &&
(!Academy.IsInitialized ||
Academy.IsInitialized &&
!Academy.Instance.IsCommunicatorOn);
}
internal void UpdateAgentPolicy()
{
var agent = GetComponent<Agent>();

}
agent.ReloadPolicy();
OnPolicyUpdated?.Invoke(IsInHeuristicMode());
}
}
}

2
com.unity.ml-agents/Tests/Editor/Analytics/InferenceAnalyticsTests.cs


Assert.AreEqual(2, continuousEvent.ObservationSpecs.Count);
Assert.AreEqual(3, continuousEvent.ObservationSpecs[0].DimensionInfos.Length);
Assert.AreEqual(20, continuousEvent.ObservationSpecs[0].DimensionInfos[0].Size);
Assert.AreEqual((int)DimensionProperty.TranslationalEquivariance, continuousEvent.ObservationSpecs[0].DimensionInfos[0].Flags);
Assert.AreEqual((int)DimensionProperty.None, continuousEvent.ObservationSpecs[0].DimensionInfos[2].Flags);
Assert.AreEqual("None", continuousEvent.ObservationSpecs[0].CompressionType);
Assert.AreEqual(Test3DSensor.k_BuiltInSensorType, continuousEvent.ObservationSpecs[0].BuiltInSensorType);
Assert.AreNotEqual(null, continuousEvent.ModelHash);

46
com.unity.ml-agents/Tests/Editor/BehaviorParameterTests.cs


using NUnit.Framework;
using Unity.Barracuda;
using UnityEditor;
using UnityEngine.TestTools;
namespace Unity.MLAgents.Tests
{

const string k_continuousONNXPath = "Packages/com.unity.ml-agents/Tests/Editor/TestModels/continuous2vis8vec2action.onnx";
public void Heuristic(in ActionBuffers actionsOut)
{
// No-op

{
bp.GeneratePolicy(actionSpec, new ActuatorManager());
});
}
[Test]
public void TestIsInHeuristicMode()
{
var gameObj = new GameObject();
var bp = gameObj.AddComponent<BehaviorParameters>();
bp.Model = null;
gameObj.AddComponent<Agent>();
bp.BehaviorType = BehaviorType.HeuristicOnly;
Assert.IsTrue(bp.IsInHeuristicMode());
bp.BehaviorType = BehaviorType.Default;
Assert.IsTrue(bp.IsInHeuristicMode());
bp.Model = ScriptableObject.CreateInstance<NNModel>();
Assert.IsFalse(bp.IsInHeuristicMode());
}
[Test]
public void TestPolicyUpdateEventFired()
{
var gameObj = new GameObject();
var bp = gameObj.AddComponent<BehaviorParameters>();
gameObj.AddComponent<Agent>().LazyInitialize();
bp.OnPolicyUpdated += delegate (bool isInHeuristicMode) { Debug.Log($"OnPolicyChanged:{isInHeuristicMode}"); };
bp.BehaviorType = BehaviorType.HeuristicOnly;
LogAssert.Expect(LogType.Log, $"OnPolicyChanged:{true}");
bp.BehaviorType = BehaviorType.Default;
LogAssert.Expect(LogType.Log, $"OnPolicyChanged:{true}");
Assert.Throws<UnityAgentsException>(() =>
{
bp.BehaviorType = BehaviorType.InferenceOnly;
});
bp.Model = AssetDatabase.LoadAssetAtPath<NNModel>(k_continuousONNXPath);
LogAssert.Expect(LogType.Log, $"OnPolicyChanged:{false}");
bp.BehaviorType = BehaviorType.HeuristicOnly;
LogAssert.Expect(LogType.Log, $"OnPolicyChanged:{true}");
}
}
}

12
com.unity.ml-agents/Tests/Editor/ParameterLoaderTest.cs


return Sensor.GetObservationShape();
}
}
public class Test3DSensor : ISensor, IBuiltInSensor
public class Test3DSensor : ISensor, IBuiltInSensor, IDimensionPropertiesSensor
{
int m_Width;
int m_Height;

public BuiltInSensorType GetBuiltInSensorType()
{
return (BuiltInSensorType)k_BuiltInSensorType;
}
public DimensionProperty[] GetDimensionProperties()
{
return new[]
{
DimensionProperty.TranslationalEquivariance,
DimensionProperty.TranslationalEquivariance,
DimensionProperty.None
};
}
}

4
com.unity.ml-agents/package.json


{
"name": "com.unity.ml-agents",
"displayName": "ML Agents",
"version": "1.7.2-preview",
"version": "1.8.0-preview",
"com.unity.barracuda": "1.3.0-preview",
"com.unity.barracuda": "1.3.1-preview",
"com.unity.modules.imageconversion": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0",
"com.unity.modules.physics": "1.0.0",

24
docs/Installation.md


- Unity package ([`com.unity.ml-agents`](../com.unity.ml-agents/)) contains the
Unity C# SDK that will be integrated into your Unity scene.
- Unity package
([`com.unity.ml-agents.extensions`](../com.unity.ml-agents.extensions/))
contains experimental C#/Unity components that are not yet ready to be part
of the base `com.unity.ml-agents` package. `com.unity.ml-agents.extensions`
has a direct dependency on `com.unity.ml-agents`.
- Three Python packages:
- [`mlagents`](../ml-agents/) contains the machine learning algorithms that
enables you to train behaviors in your Unity scene. Most users of ML-Agents

- Install Python (3.6.1 or higher)
- Clone this repository (Optional)
- __Note:__ If you do not clone the repository, then you will not be
able to access the example environments and training configurations.
Additionally, the [Getting Started Guide](Getting-Started.md) assumes that
you have cloned the repository.
able to access the example environments and training configurations or the
`com.unity.ml-agents.extensions` package. Additionally, the
[Getting Started Guide](Getting-Started.md) assumes that you have cloned the
repository.
- Install the `com.unity.ml-agents.extensions` Unity package (Optional)
- Install the `mlagents` Python package
### Install **Unity 2018.4** or Later

`com.unity.ml-agents` package
[directly from the Package Manager registry](https://docs.unity3d.com/Manual/upm-ui-install.html).
Please make sure you enable 'Preview Packages' in the 'Advanced' dropdown in
order to find it.
order to find the latest Preview release of the package.
**NOTE:** If you do not see the ML-Agents package listed in the Package Manager
please follow the [advanced installation instructions](#advanced-local-installation-for-development) below.

If you are going to follow the examples from our documentation, you can open the
`Project` folder in Unity and start tinkering immediately.
### Install the `com.unity.ml-agents.extensions` Unity package (Optional)
To install the `com.unity.ml-agents.extensions` package, you need to first
clone the repo and then complete a local installation similar to what was
outlined in the previous
[Advanced: Local Installation for Development](#advanced-local-installation-for-development-1)
section. Complete installation steps can be found in the
[package documentation](../com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md#installation).
### Install the `mlagents` Python package

4
docs/Learning-Environment-Design-Agents.md


```csharp
public override void WriteDiscreteActionMask(IDiscreteActionMask actionMask)
{
actionMasker.WriteMask(branch, actionIndices)
actionMask.WriteMask(branch, actionIndices);
}
```

are masked)
```csharp
WriteMask(0, new int[2]{1,2})
WriteMask(0, new int[2]{1,2});
```
Notes:

2
docs/Learning-Environment-Examples.md


- Recommended minimum: 6
- Recommended maximum: 20
# Strikers Vs. Goalie
## Strikers Vs. Goalie
![StrikersVsGoalie](images/strikersvsgoalie.png)

2
gym-unity/gym_unity/__init__.py


# Version of the library that will be used to upload to pypi
__version__ = "0.24.0.dev0"
__version__ = "0.25.0.dev0"
# Git tag that will be checked to determine whether to trigger upload to pypi
__release_tag__ = None

2
ml-agents-envs/mlagents_envs/__init__.py


# Version of the library that will be used to upload to pypi
__version__ = "0.24.0.dev0"
__version__ = "0.25.0.dev0"
# Git tag that will be checked to determine whether to trigger upload to pypi
__release_tag__ = None

8
ml-agents/mlagents/plugins/stats_writer.py


and evaluates them, and returns the list of all the StatsWriter implementations.
"""
all_stats_writers: List[StatsWriter] = []
if ML_AGENTS_STATS_WRITER not in importlib_metadata.entry_points():
logger.warning(
f"Unable to find any entry points for {ML_AGENTS_STATS_WRITER}, even the default ones. "
"Uninstalling and reinstalling ml-agents via pip should resolve. "
"Using default plugins for now."
)
return get_default_stats_writers(run_options)
entry_points = importlib_metadata.entry_points()[ML_AGENTS_STATS_WRITER]
for entry_point in entry_points:

2
ml-agents/mlagents/trainers/__init__.py


# Version of the library that will be used to upload to pypi
__version__ = "0.24.0.dev0"
__version__ = "0.25.0.dev0"
# Git tag that will be checked to determine whether to trigger upload to pypi
__release_tag__ = None

4
ml-agents/tests/yamato/standalone_build_tests.py


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--scene", default=None)
parser.add_argument("--build-target", default="mac", choices=["mac", "linux"])
parser.add_argument(
"--build-target", default="mac", choices=["mac", "linux", "ios", "webgl"]
)
args = parser.parse_args()
main(args.scene, args.build_target)

11
ml-agents/tests/yamato/yamato_utils.py


import subprocess
import yaml
from sys import platform
from typing import List, Optional
from typing import List, Optional, Mapping
def get_unity_executable_path():

print(f"Running BuildStandalonePlayer via {unity_exe}")
# enum values from https://docs.unity3d.com/2019.4/Documentation/ScriptReference/BuildTarget.html
build_target_to_enum = {
build_target_to_enum: Mapping[Optional[str], str] = {
# Convert the short name to the official enum
# Just pass through if it's not on the list.
build_target_enum = build_target_to_enum.get(build_target, build_target)
test_args = [
unity_exe,

os.makedirs(os.path.dirname(output_path), exist_ok=True)
if scene_path is not None:
test_args += ["--mlagents-build-scene-path", scene_path]
if build_target is not None:
test_args += ["--mlagents-build-target", build_target_to_enum[build_target]]
if build_target_enum is not None:
test_args += ["--mlagents-build-target", build_target_enum]
print(f"{' '.join(test_args)} ...")
timeout = 30 * 60 # 30 minutes, just in case

30
.yamato/standalone-build-webgl-test.yml


{% capture editor_version %}2020.2{% endcapture %}
test_webgl_standalone_{{ editor_version }}:
name: Test WebGL Standalone {{ editor_version }}
agent:
type: Unity::VM
image: package-ci/ubuntu:stable
flavor: b1.large
variables:
UNITY_VERSION: {{ editor_version }}
commands:
- |
python3 -m venv venv && source venv/bin/activate
python -m pip install pyyaml --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple
python -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple --upgrade
unity-downloader-cli -u {{ editor_version }} -c editor -c WebGL --wait --fast
python -u -m ml-agents.tests.yamato.standalone_build_tests --build-target=webgl
triggers:
cancel_old_ci: true
recurring:
- branch: master
frequency: weekly
artifacts:
logs:
paths:
- "artifacts/standalone_build.txt"
standalonebuild:
paths:
- "artifacts/testPlayer*/**"
- "artifacts/**/UnityPlayer.so"

2
DevProject/DevProject.sln.DotSettings


<wpf:ResourceDictionary xml:space="preserve" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:s="clr-namespace:System;assembly=mscorlib" xmlns:ss="urn:shemas-jetbrains-com:settings-storage-xaml" xmlns:wpf="http://schemas.microsoft.com/winfx/2006/xaml/presentation">
<s:String x:Key="/Default/CodeStyle/Naming/CSharpNaming/Abbreviations/=RL/@EntryIndexedValue">RL</s:String></wpf:ResourceDictionary>

519
DevProject/Packages/packages-lock.json


{
"dependencies": {
"com.unity.2d.sprite": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.2d.tilemap": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.ads": {
"version": "3.6.1",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.ugui": "1.0.0"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.analytics": {
"version": "3.3.5",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.ugui": "1.0.0"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.barracuda": {
"version": "1.3.0-preview",
"depth": 1,
"source": "registry",
"dependencies": {
"com.unity.burst": "1.3.4",
"com.unity.modules.jsonserialize": "1.0.0",
"com.unity.modules.imageconversion": "1.0.0"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.burst": {
"version": "1.3.4",
"depth": 2,
"source": "registry",
"dependencies": {
"com.unity.mathematics": "1.2.1"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.coding": {
"version": "0.1.0-preview.13",
"depth": 0,
"source": "registry",
"dependencies": {
"nuget.moq": "1.0.0"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.collab-proxy": {
"version": "1.2.16",
"depth": 0,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.ext.nunit": {
"version": "1.0.6",
"depth": 1,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.ide.rider": {
"version": "1.1.4",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.test-framework": "1.1.1"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.ide.vscode": {
"version": "1.2.3",
"depth": 0,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.inputsystem": {
"version": "1.1.0-preview.3",
"depth": 0,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.mathematics": {
"version": "1.2.1",
"depth": 3,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.ml-agents": {
"version": "file:../../com.unity.ml-agents",
"depth": 0,
"source": "local",
"dependencies": {
"com.unity.barracuda": "1.3.0-preview",
"com.unity.modules.imageconversion": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0",
"com.unity.modules.physics": "1.0.0",
"com.unity.modules.physics2d": "1.0.0",
"com.unity.modules.unityanalytics": "1.0.0"
}
},
"com.unity.ml-agents.extensions": {
"version": "file:../../com.unity.ml-agents.extensions",
"depth": 0,
"source": "local",
"dependencies": {
"com.unity.ml-agents": "1.7.2-preview"
}
},
"com.unity.multiplayer-hlapi": {
"version": "1.0.8",
"depth": 0,
"source": "registry",
"dependencies": {
"nuget.mono-cecil": "0.1.6-preview"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.nuget.mono-cecil": {
"version": "0.1.6-preview.2",
"depth": 1,
"source": "registry",
"dependencies": {
"nuget.mono-cecil": "0.1.6-preview"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.nuget.newtonsoft-json": {
"version": "2.0.0-preview",
"depth": 1,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.package-manager-doctools": {
"version": "1.7.0-preview",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.package-validation-suite": "0.10.0-preview",
"com.unity.nuget.newtonsoft-json": "2.0.0-preview"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.package-validation-suite": {
"version": "0.19.0-preview",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.nuget.mono-cecil": "0.1.6-preview.2"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.purchasing": {
"version": "2.2.1",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.ugui": "1.0.0"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.settings-manager": {
"version": "1.0.1",
"depth": 1,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.test-framework": {
"version": "1.1.20",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.ext.nunit": "1.0.6",
"com.unity.modules.imgui": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.test-framework.performance": {
"version": "2.2.0-preview",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.test-framework": "1.1.0",
"com.unity.nuget.newtonsoft-json": "2.0.0-preview"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.testtools.codecoverage": {
"version": "1.0.0-pre.3",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.test-framework": "1.0.16",
"com.unity.settings-manager": "1.0.1"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.textmeshpro": {
"version": "2.0.1",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.ugui": "1.0.0"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.timeline": {
"version": "1.2.12",
"depth": 0,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.ugui": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.ui": "1.0.0",
"com.unity.modules.imgui": "1.0.0"
}
},
"com.unity.xr.legacyinputhelpers": {
"version": "2.1.7",
"depth": 0,
"source": "registry",
"dependencies": {
"com.unity.modules.vr": "1.0.0",
"com.unity.modules.xr": "1.0.0"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"nuget.castle-core": {
"version": "1.0.1",
"depth": 2,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"nuget.mono-cecil": {
"version": "0.1.6-preview",
"depth": 1,
"source": "registry",
"dependencies": {},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"nuget.moq": {
"version": "1.0.0",
"depth": 1,
"source": "registry",
"dependencies": {
"nuget.castle-core": "1.0.1"
},
"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.modules.ai": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.androidjni": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.animation": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.assetbundle": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.audio": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.cloth": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.physics": "1.0.0"
}
},
"com.unity.modules.director": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.audio": "1.0.0",
"com.unity.modules.animation": "1.0.0"
}
},
"com.unity.modules.imageconversion": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.imgui": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.jsonserialize": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.particlesystem": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.physics": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.physics2d": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.screencapture": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.imageconversion": "1.0.0"
}
},
"com.unity.modules.subsystems": {
"version": "1.0.0",
"depth": 1,
"source": "builtin",
"dependencies": {
"com.unity.modules.jsonserialize": "1.0.0"
}
},
"com.unity.modules.terrain": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.terrainphysics": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.physics": "1.0.0",
"com.unity.modules.terrain": "1.0.0"
}
},
"com.unity.modules.tilemap": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.physics2d": "1.0.0"
}
},
"com.unity.modules.ui": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.uielements": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.imgui": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0"
}
},
"com.unity.modules.umbra": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.unityanalytics": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.unitywebrequest": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0"
}
},
"com.unity.modules.unitywebrequest": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.unitywebrequestassetbundle": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.assetbundle": "1.0.0",
"com.unity.modules.unitywebrequest": "1.0.0"
}
},
"com.unity.modules.unitywebrequestaudio": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.unitywebrequest": "1.0.0",
"com.unity.modules.audio": "1.0.0"
}
},
"com.unity.modules.unitywebrequesttexture": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.unitywebrequest": "1.0.0",
"com.unity.modules.imageconversion": "1.0.0"
}
},
"com.unity.modules.unitywebrequestwww": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.unitywebrequest": "1.0.0",
"com.unity.modules.unitywebrequestassetbundle": "1.0.0",
"com.unity.modules.unitywebrequestaudio": "1.0.0",
"com.unity.modules.audio": "1.0.0",
"com.unity.modules.assetbundle": "1.0.0",
"com.unity.modules.imageconversion": "1.0.0"
}
},
"com.unity.modules.vehicles": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.physics": "1.0.0"
}
},
"com.unity.modules.video": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.audio": "1.0.0",
"com.unity.modules.ui": "1.0.0",
"com.unity.modules.unitywebrequest": "1.0.0"
}
},
"com.unity.modules.vr": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.jsonserialize": "1.0.0",
"com.unity.modules.physics": "1.0.0",
"com.unity.modules.xr": "1.0.0"
}
},
"com.unity.modules.wind": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {}
},
"com.unity.modules.xr": {
"version": "1.0.0",
"depth": 0,
"source": "builtin",
"dependencies": {
"com.unity.modules.physics": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0",
"com.unity.modules.subsystems": "1.0.0"
}
}
}
}

57
com.unity.ml-agents.extensions/Documentation~/InputActuatorComponent.md


# Integration of the Input System Package with ML-Agents
## Overview
One area we are always trying to improve is getting developers up and running with ML-Agents. With this in mind,
we have implemented an `InputActuatorComponent`. This component integrates with the
[Input System Package](https://docs.unity3d.com/Packages/com.unity.inputsystem@1.1/manual/QuickStartGuide.html)
to set up an action space for your `Agent` based on an `InputActionAsset` that is referenced by the
`IInputActionAssetProvider` interface, or the `PlayerInput` component that may be living on your player controlled
`Agent`. This means that if you have code outside of your agent that handles input, you will not need to implement
the Heuristic function in agent as well. The `InputActuatorComponent` will handle this for you. You can now train and
run inference on `Agents` with an action space defined by an `InputActionAsset`.
This implementation includes:
* C# `InputActuatorComponent` you can attach to your Agent.
* Implement the `IInputActionAssetProvider` in the `Componenet` where you handle player input.
* An example environment where the input handling code is not in the Heuristic function of the Agent subclass.
### Feedback
We have only implemented a small subset of `InputControl` types that we thought would cover a large portion of what
most developers would use. Please let us know if you want more control types implemented by posting in the [ML-Agents
forum.](https://forum.unity.com/forums/ml-agents.453/)
We would also like your feedback on the workflow of integrating this into your games. If you run
into workflow issues please let us know in the ML-Agents forums, or if you've discovered a bug,
please file a bug on our GitHub page.
## Getting started
The C# code for the `InputActuatorComponent` exists inside of the extensions package (com.unity.ml-agents.extensions). A good first step would be to familiarize with the extensions package by reading the document [here](com.unity.ml-agents.extensions.md). The second step would be to take a look at how we have implemented the C# code in the example Input Integration scene (located under ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/). Once you have some familiarity, then the next step would be to add the InputActuatorComponent to your player Agent. The example we have implemented uses C# Events to send information from the Input System.
Additionally, see below for additional technical specifications on the C# code for the InputActuatorComponent.
## Technical specifications for the InputActuatorComponent
### `IInputActionsAssetProvider` Interface
The `InputActuatorComponent` searches for a `Component` that implements
`IInputActionAssetProvider` on the `GameObject` they both are attached to. It is important to note
that if multiple `Components` on your `GameObject` need to access an `InputActionAsset` to handle events,
they will need to share the same instance of the `InputActionAsset` that is returned from the
`IInputActionAssetProvider`.
### `InputActuatorComponent` class
The `InputActuatorComponent` is the bridge between ML-Agents and the Input System.. It allows ML-Agents to
* create an `ActionSpec` for your Agent based on an `InputActionAsset` that comes from an
`IInputActionAssetProvider`.
* send simulated input from a training process or a neural network
* let developers keep their input handling code in one place
This is accomplished by adding the `InputActuatorComponenet` to an Agent which already has the PlayerInput component attached.
### Setting up a scene using the `InputActuatorComponent`
1. Add the `com.unity.inputsystem` version 1.1.0-preview.3 or later to your project via the Package Manager window.
2. If you have already setup an InputActionAsset skip to Step 3, otherwise follow these sub steps:
1. Create an InputActionAsset to allow your Agent to be controlled by the Input System.
2. Handle the events from the Input System where you normally would (i.e. a script external to your Agent class).
3. Add the InputSystemActuatorComponent to the GameObject that has the `PlayerInput` and `Agent` components attached.

3
com.unity.ml-agents.extensions/Editor/Input.meta


fileFormatVersion: 2
guid: 1f773a20e85042999e87f5d3c7b55281
timeCreated: 1613637190

3
com.unity.ml-agents.extensions/Runtime/Input.meta


fileFormatVersion: 2
guid: 1694e881b9ec420ba1c201f0612392d6
timeCreated: 1610754907

3
com.unity.ml-agents.extensions/Tests/Runtime/Input.meta


fileFormatVersion: 2
guid: 27f8e1ce37d7485f814ce50a37101203
timeCreated: 1612908869

35
com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs


namespace Unity.MLAgents
{
/// <summary>
/// Factory class for an ICommunicator instance. This is used to the <see cref="Academy"/> at startup.
/// By default, on desktop platforms, an ICommunicator will be created and attempt to connect
/// to a trainer. This behavior can be prevented by setting <see cref="CommunicatorFactory.Enabled"/> to false
/// *before* the <see cref="Academy"/> is initialized.
/// </summary>
public static class CommunicatorFactory
{
static bool s_Enabled = true;
/// <summary>
/// Whether or not an ICommunicator instance will be created when the <see cref="Academy"/> is initialized.
/// Changing this has no effect after the <see cref="Academy"/> has already been initialized.
/// </summary>
public static bool Enabled
{
get => s_Enabled;
set => s_Enabled = value;
}
internal static ICommunicator Create()
{
#if UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX
if (s_Enabled)
{
return new RpcCommunicator();
}
#endif
// Non-desktop or disabled
return null;
}
}
}

3
com.unity.ml-agents/Runtime/Communicator/CommunicatorFactory.cs.meta


fileFormatVersion: 2
guid: 0b604cddc07e4484a2cdaba630a971ea
timeCreated: 1613617949

40
com.unity.ml-agents/Tests/Editor/Actuators/ActionSpecTests.cs


using System;
using System.Collections.Generic;
using System.Linq;
using NUnit.Framework;
using Unity.MLAgents.Actuators;
using UnityEngine.TestTools.Constraints;
using Is = UnityEngine.TestTools.Constraints.Is;
namespace Unity.MLAgents.Tests.Actuators
{
[TestFixture]
public class ActionSpecTests
{
[Test]
public void ActionSpecCombineTest()
{
var as0 = new ActionSpec(3, new[] { 3, 2, 1 });
var as1 = new ActionSpec(1, new[] { 35, 122, 1, 3, 8, 3 });
var as0NumCon = 3;
var as0NumDis = as0.NumDiscreteActions;
var as1NumCon = 1;
var as1NumDis = as1.NumDiscreteActions;
var branchSizes = new List<int>();
branchSizes.AddRange(as0.BranchSizes);
branchSizes.AddRange(as1.BranchSizes);
var asc = ActionSpec.Combine(as0, as1);
Assert.AreEqual(as0NumCon + as1NumCon, asc.NumContinuousActions);
Assert.AreEqual(as0NumDis + as1NumDis, asc.NumDiscreteActions);
Assert.IsTrue(branchSizes.ToArray().SequenceEqual(asc.BranchSizes));
as0 = new ActionSpec(3);
as1 = new ActionSpec(1);
asc = ActionSpec.Combine(as0, as1);
Assert.IsEmpty(asc.BranchSizes);
}
}
}

3
com.unity.ml-agents/Tests/Editor/Actuators/ActionSpecTests.cs.meta


fileFormatVersion: 2
guid: 99d76ec04c944b75bc6b85abfff4ac4e
timeCreated: 1613680505

7
DevProject/ProjectSettings/Packages/com.unity.testtools.codecoverage/Settings.json


{
"m_Name": "Settings",
"m_Path": "ProjectSettings/Packages/com.unity.testtools.codecoverage/Settings.json",
"m_Dictionary": {
"m_DictionaryValues": []
}
}

70
ML-Agents-Input-Example/.gitignore


/[Ll]ibrary/
/Logs/
/[Tt]emp/
/[Oo]bj/
/[Bb]uild/
/[Bb]uilds/
/Assets/AssetStoreTools*
/Assets/Plugins*
/Assets/Demonstrations*
/Assets/ML-Agents/Timers*
/*_timers.json
# Environemnt logfile
*Project.log
# Visual Studio 2015 cache directory
/.vs/
# Autogenerated VS/MD/Consulo solution and project files
/ProjectExportedObj/
/Project.consulo/
*.csproj
*.unityproj
*.sln
*.suo
*.tmp
*.user
*.userprefs
*.pidb
*.booproj
*.svd
*.pdb
# Unity3D generated meta files
*.pidb.meta
# Unity3D Generated File On Crash Reports
/sysinfo.txt
# Builds
*.apk
*.unitypackage
*.app
*.exe
*.x86_64
*.x86
# Plugins
/Assets/VideoRecorder*
# Mac hidden files
*.DS_Store
*/.ipynb_checkpoints
*/.idea
*.pyc
*.idea/misc.xml
*.idea/modules.xml
*.idea/
*.iml
*.cache
*/build/
*/dist/
*.egg-info*
*.eggs*
*.gitignore.swp
# VSCode hidden files
*.vscode/
.DS_Store

33
ML-Agents-Input-Example/Assets/InputSystem.inputsettings.asset


%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: c46f07b5ed07e4e92aa78254188d3d10, type: 3}
m_Name: InputSystem.inputsettings
m_EditorClassIdentifier:
m_SupportedDevices:
- Keyboard
- Gamepad
m_UpdateMode: 2
m_CompensateForScreenOrientation: 0
m_FilterNoiseOnCurrent: 0
m_DefaultDeadzoneMin: 0.125
m_DefaultDeadzoneMax: 0.925
m_DefaultButtonPressPoint: 0.5
m_ButtonReleaseThreshold: 0.75
m_DefaultTapTime: 0.2
m_DefaultSlowTapTime: 0.5
m_DefaultHoldTime: 0.4
m_TapRadius: 5
m_MultiTapDelayTime: 0.75
m_iOSSettings:
m_MotionUsage:
m_Enabled: 0
m_Description:

8
ML-Agents-Input-Example/Assets/InputSystem.inputsettings.asset.meta


fileFormatVersion: 2
guid: 778a63fac82734ee1b70a0b7c3e975c0
NativeFormatImporter:
externalObjects: {}
mainObjectFileID: 0
userData:
assetBundleName:
assetBundleVariant:

8
ML-Agents-Input-Example/Assets/ML-Agents.meta


fileFormatVersion: 2
guid: 11630fa83cc8b4194b94352e3e6cdb9d
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

8
ML-Agents-Input-Example/Assets/ML-Agents/Examples.meta


fileFormatVersion: 2
guid: 6584f096f53dc43eeb32803b91f36c5c
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

10
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock.meta


fileFormatVersion: 2
guid: 4197ece8ca6d74a00adc6bafbabda158
folderAsset: yes
timeCreated: 1506303336
licenseType: Pro
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

8
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Meshes.meta


fileFormatVersion: 2
guid: e806d68316eff0e46a3fda372ae42c44
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

1001
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Meshes/PushBlockCourt.fbx
文件差异内容过多而无法显示
查看文件

120
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Meshes/PushBlockCourt.fbx.meta


fileFormatVersion: 2
guid: c639386c12f5f7841892163a199dfacc
ModelImporter:
serializedVersion: 22
fileIDToRecycleName:
100000: GoalArea
100002: Ground
100004: //RootNode
100006: WallsOuter
400000: GoalArea
400002: Ground
400004: //RootNode
400006: WallsOuter
2100000: rep_WhiteWalls
2100002: rep_Floor
2100004: rep_Checkers
2300000: GoalArea
2300002: Ground
2300004: WallsOuter
3300000: GoalArea
3300002: Ground
3300004: WallsOuter
4300000: WallsOuter
4300002: Ground
4300004: GoalArea
externalObjects:
- first:
type: UnityEngine:Material
assembly: UnityEngine.CoreModule
name: rep_Checkers
second: {fileID: 2100000, guid: 36c7baa347d68f347a9aa9698aa1bcdd, type: 2}
- first:
type: UnityEngine:Material
assembly: UnityEngine.CoreModule
name: rep_Floor
second: {fileID: 2100000, guid: bc723809e6ff3174fad3e774cae1aed0, type: 2}
- first:
type: UnityEngine:Material
assembly: UnityEngine.CoreModule
name: rep_WhiteWalls
second: {fileID: 2100000, guid: 6a39c0407dd85684384bf0277294e9b6, type: 2}
materials:
importMaterials: 1
materialName: 0
materialSearch: 1
materialLocation: 1
animations:
legacyGenerateAnimations: 4
bakeSimulation: 0
resampleCurves: 1
optimizeGameObjects: 0
motionNodeName:
rigImportErrors:
rigImportWarnings:
animationImportErrors:
animationImportWarnings:
animationRetargetingWarnings:
animationDoRetargetingWarnings: 0
importAnimatedCustomProperties: 0
animationCompression: 1
animationRotationError: 0.5
animationPositionError: 0.5
animationScaleError: 0.5
animationWrapMode: 0
extraExposedTransformPaths: []
extraUserProperties: []
clipAnimations: []
isReadable: 1
meshes:
lODScreenPercentages: []
globalScale: 1
meshCompression: 0
addColliders: 0
importVisibility: 1
importBlendShapes: 1
importCameras: 1
importLights: 1
swapUVChannels: 0
generateSecondaryUV: 1
useFileUnits: 1
optimizeMeshForGPU: 1
keepQuads: 0
weldVertices: 1
preserveHierarchy: 0
indexFormat: 0
secondaryUVAngleDistortion: 8
secondaryUVAreaDistortion: 15.000001
secondaryUVHardAngle: 88
secondaryUVPackMargin: 4
useFileScale: 1
tangentSpace:
normalSmoothAngle: 60
normalImportMode: 0
tangentImportMode: 3
normalCalculationMode: 4
importAnimation: 1
copyAvatar: 0
humanDescription:
serializedVersion: 2
human: []
skeleton: []
armTwist: 0.5
foreArmTwist: 0.5
upperLegTwist: 0.5
legTwist: 0.5
armStretch: 0.05
legStretch: 0.05
feetSpacing: 0
rootMotionBoneName:
rootMotionBoneRotation: {x: 0, y: 0, z: 0, w: 1}
hasTranslationDoF: 0
hasExtraRoot: 0
skeletonHasParents: 1
lastHumanDescriptionAvatarSource: {instanceID: 0}
animationType: 0
humanoidOversampling: 1
additionalBone: 0
userData:
assetBundleName:
assetBundleVariant:

10
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Prefabs.meta


fileFormatVersion: 2
guid: 3d2b9d5547d934200a786212743850c4
folderAsset: yes
timeCreated: 1514922259
licenseType: Free
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

1001
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Prefabs/PushBlockArea.prefab
文件差异内容过多而无法显示
查看文件

10
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Prefabs/PushBlockArea.prefab.meta


fileFormatVersion: 2
guid: 03bcc81e249714a22bb411dddcc5d15e
timeCreated: 1515023875
licenseType: Free
NativeFormatImporter:
externalObjects: {}
mainObjectFileID: 100100000
userData:
assetBundleName:
assetBundleVariant:

179
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/PushBlockActions.inputactions


{
"name": "PushBlockActions",
"maps": [
{
"name": "Movement",
"id": "03a2e5d4-ae81-47f1-a575-0779fb7da538",
"actions": [
{
"name": "movement",
"type": "Value",
"id": "5f47cbc6-de46-4d33-93e2-2b1af4f5996d",
"expectedControlType": "Vector2",
"processors": "",
"interactions": ""
},
{
"name": "jump",
"type": "Button",
"id": "ca5eb833-5dfb-4b7c-880d-6118bd5d1378",
"expectedControlType": "Button",
"processors": "",
"interactions": ""
}
],
"bindings": [
{
"name": "gamepad_move",
"id": "477500ef-6d32-4b84-b9f8-158f18bcb906",
"path": "2DVector",
"interactions": "",
"processors": "",
"groups": "",
"action": "movement",
"isComposite": true,
"isPartOfComposite": false
},
{
"name": "up",
"id": "6d2537b8-2266-4a50-8575-fb0fe310daa5",
"path": "<Gamepad>/dpad/up",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "movement",
"isComposite": false,
"isPartOfComposite": true
},
{
"name": "down",
"id": "50584c83-beb6-4e90-a453-a635c03a761e",
"path": "<Gamepad>/dpad/down",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "movement",
"isComposite": false,
"isPartOfComposite": true
},
{
"name": "left",
"id": "44408b8f-27e7-4c6d-b078-7536ba020d1a",
"path": "<Gamepad>/dpad/left",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "movement",
"isComposite": false,
"isPartOfComposite": true
},
{
"name": "right",
"id": "f5681423-d3e3-41a5-b85e-0a7642c774aa",
"path": "<Gamepad>/dpad/right",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "movement",
"isComposite": false,
"isPartOfComposite": true
},
{
"name": "keyboard_move",
"id": "6bcba4bf-5ce0-4005-9e6a-0de2487211b0",
"path": "2DVector",
"interactions": "",
"processors": "",
"groups": "",
"action": "movement",
"isComposite": true,
"isPartOfComposite": false
},
{
"name": "up",
"id": "63da699e-b354-4e63-b0f8-26fb92abea41",
"path": "<Keyboard>/w",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "movement",
"isComposite": false,
"isPartOfComposite": true
},
{
"name": "down",
"id": "39409748-9002-4aff-9a09-cdc05b9708ad",
"path": "<Keyboard>/s",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "movement",
"isComposite": false,
"isPartOfComposite": true
},
{
"name": "left",
"id": "0afe45fc-dc45-4310-9c73-7dc3c503addf",
"path": "<Keyboard>/a",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "movement",
"isComposite": false,
"isPartOfComposite": true
},
{
"name": "right",
"id": "69fe0335-9e0c-495d-a90d-4b0fcbfd2b34",
"path": "<Keyboard>/d",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "movement",
"isComposite": false,
"isPartOfComposite": true
},
{
"name": "",
"id": "ab696218-63cd-4eb8-9fe1-48a68e32e92f",
"path": "<Keyboard>/space",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "jump",
"isComposite": false,
"isPartOfComposite": false
},
{
"name": "",
"id": "7adcb138-5175-4cc4-addc-d2b02cb5f0de",
"path": "<Gamepad>/buttonSouth",
"interactions": "",
"processors": "",
"groups": "Keyboard",
"action": "jump",
"isComposite": false,
"isPartOfComposite": false
}
]
}
],
"controlSchemes": [
{
"name": "Keyboard",
"bindingGroup": "Keyboard",
"devices": [
{
"devicePath": "<Keyboard>",
"isOptional": true,
"isOR": false
},
{
"devicePath": "<Gamepad>",
"isOptional": true,
"isOR": false
}
]
}
]
}

14
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/PushBlockActions.inputactions.meta


fileFormatVersion: 2
guid: fcfa5a3b2c4254b2baad1ae474fc4b93
ScriptedImporter:
internalIDToNameTable: []
externalObjects: {}
serializedVersion: 2
userData:
assetBundleName:
assetBundleVariant:
script: {fileID: 11500000, guid: 8404be70184654265930450def6a9037, type: 3}
generateWrapperCode: 1
wrapperCodePath: Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockActions.cs
wrapperClassName:
wrapperCodeNamespace:

10
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scenes.meta


fileFormatVersion: 2
guid: 9da26ec059778432080bf5fa24374960
folderAsset: yes
timeCreated: 1516234013
licenseType: Free
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

1001
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scenes/PushBlock.unity
文件差异内容过多而无法显示
查看文件

9
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scenes/PushBlock.unity.meta


fileFormatVersion: 2
guid: ae8cc75939e3e4d07a79c8c6a08b54f4
timeCreated: 1506808980
licenseType: Pro
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

10
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts.meta


fileFormatVersion: 2
guid: ca44e3d53154a4ff0a1279be30b23bdf
folderAsset: yes
timeCreated: 1514922284
licenseType: Free
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

26
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/GoalDetect.cs


//Detect when the orange block has touched the goal.
//Detect when the orange block has touched an obstacle.
//Put this script onto the orange block. There's nothing you need to set in the editor.
//Make sure the goal is tagged with "goal" in the editor.
using UnityEngine;
public class GoalDetect : MonoBehaviour
{
/// <summary>
/// The associated agent.
/// This will be set by the agent script on Initialization.
/// Don't need to manually set.
/// </summary>
[HideInInspector]
public PushAgentBasic agent; //
void OnCollisionEnter(Collision col)
{
// Touched goal.
if (col.gameObject.CompareTag("goal"))
{
agent.ScoredAGoal();
}
}
}

13
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/GoalDetect.cs.meta


fileFormatVersion: 2
guid: 7d079d09ceed84ff49cf6841c66cf7ec
timeCreated: 1513645763
licenseType: Free
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

192
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushAgentBasic.cs


//Put this script on your blue cube.
using System;
using System.Collections;
using UnityEngine;
using Unity.MLAgents;
using Unity.MLAgents.Actuators;
using Random = UnityEngine.Random;
public class PushAgentBasic : Agent
{
/// <summary>
/// The ground. The bounds are used to spawn the elements.
/// </summary>
public GameObject ground;
public GameObject area;
/// <summary>
/// The area bounds.
/// </summary>
[HideInInspector]
public Bounds areaBounds;
PushBlockSettings m_PushBlockSettings;
/// <summary>
/// The block to be pushed to the goal.
/// </summary>
public GameObject block;
/// <summary>
/// Detects when the block touches the goal.
/// </summary>
[HideInInspector]
public GoalDetect goalDetect;
Rigidbody m_BlockRb; //cached on initialization
Rigidbody m_AgentRb; //cached on initialization
Material m_GroundMaterial; //cached on Awake()
/// <summary>
/// We will be changing the ground material based on success/failue
/// </summary>
Renderer m_GroundRenderer;
EnvironmentParameters m_ResetParams;
void Awake()
{
m_PushBlockSettings = FindObjectOfType<PushBlockSettings>();
goalDetect = block.GetComponent<GoalDetect>();
goalDetect.agent = this;
// Cache the agent rigidbody
m_AgentRb = GetComponent<Rigidbody>();
// Cache the block rigidbody
m_BlockRb = block.GetComponent<Rigidbody>();
// Get the ground's bounds
areaBounds = ground.GetComponent<Collider>().bounds;
// Get the ground renderer so we can change the material when a goal is scored
m_GroundRenderer = ground.GetComponent<Renderer>();
// Starting material
m_GroundMaterial = m_GroundRenderer.material;
}
public override void Initialize()
{
m_ResetParams = Academy.Instance.EnvironmentParameters;
SetResetParameters();
}
/// <summary>
/// Use the ground's bounds to pick a random spawn position.
/// </summary>
public Vector3 GetRandomSpawnPos()
{
var foundNewSpawnLocation = false;
var randomSpawnPos = Vector3.zero;
var tries = 0;
while (foundNewSpawnLocation == false && tries < 50)
{
var randomPosX = Random.Range(-areaBounds.extents.x * m_PushBlockSettings.spawnAreaMarginMultiplier,
areaBounds.extents.x * m_PushBlockSettings.spawnAreaMarginMultiplier);
var randomPosZ = Random.Range(-areaBounds.extents.z * m_PushBlockSettings.spawnAreaMarginMultiplier,
areaBounds.extents.z * m_PushBlockSettings.spawnAreaMarginMultiplier);
randomSpawnPos = ground.transform.position + new Vector3(randomPosX, 1f, randomPosZ);
if (Physics.CheckBox(randomSpawnPos, new Vector3(2.5f, 0.01f, 2.5f)) == false)
{
foundNewSpawnLocation = true;
}
tries++;
}
return randomSpawnPos;
}
/// <summary>
/// Called when the agent moves the block into the goal.
/// </summary>
public void ScoredAGoal()
{
// We use a reward of 5.
AddReward(5f);
// By marking an agent as done AgentReset() will be called automatically.
EndEpisode();
// Swap ground material for a bit to indicate we scored.
StartCoroutine(GoalScoredSwapGroundMaterial(m_PushBlockSettings.goalScoredMaterial, 0.5f));
}
/// <summary>
/// Swap ground material, wait time seconds, then swap back to the regular material.
/// </summary>
IEnumerator GoalScoredSwapGroundMaterial(Material mat, float time)
{
m_GroundRenderer.material = mat;
yield return new WaitForSeconds(time); // Wait for 2 sec
m_GroundRenderer.material = m_GroundMaterial;
}
/// <summary>
/// Called every step of the engine. Here the agent takes an action.
/// </summary>
public override void OnActionReceived(ActionBuffers actionBuffers)
{
// Penalty given each step to encourage agent to finish task quickly.
AddReward(-1f / MaxStep);
}
/// <summary>
/// Resets the block position and velocities.
/// </summary>
void ResetBlock()
{
// Get a random position for the block.
block.transform.position = GetRandomSpawnPos();
// Reset block velocity back to zero.
m_BlockRb.velocity = Vector3.zero;
// Reset block angularVelocity back to zero.
m_BlockRb.angularVelocity = Vector3.zero;
}
/// <summary>
/// In the editor, if "Reset On Done" is checked then AgentReset() will be
/// called automatically anytime we mark done = true in an agent script.
/// </summary>
public override void OnEpisodeBegin()
{
var rotation = Random.Range(0, 4);
var rotationAngle = rotation * 90f;
area.transform.Rotate(new Vector3(0f, rotationAngle, 0f));
ResetBlock();
transform.position = GetRandomSpawnPos();
m_AgentRb.velocity = Vector3.zero;
m_AgentRb.angularVelocity = Vector3.zero;
SetResetParameters();
}
public void SetGroundMaterialFriction()
{
var groundCollider = ground.GetComponent<Collider>();
groundCollider.material.dynamicFriction = m_ResetParams.GetWithDefault("dynamic_friction", 0);
groundCollider.material.staticFriction = m_ResetParams.GetWithDefault("static_friction", 0);
}
public void SetBlockProperties()
{
var scale = m_ResetParams.GetWithDefault("block_scale", 2);
//Set the scale of the block
m_BlockRb.transform.localScale = new Vector3(scale, 0.75f, scale);
// Set the drag of the block
m_BlockRb.drag = m_ResetParams.GetWithDefault("block_drag", 0.5f);
}
void SetResetParameters()
{
SetGroundMaterialFriction();
SetBlockProperties();
}
}

12
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushAgentBasic.cs.meta


fileFormatVersion: 2
guid: dea8c4f2604b947e6b7b97750dde87ca
timeCreated: 1506829537
licenseType: Pro
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

316
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockActions.cs


//------------------------------------------------------------------------------
// <auto-generated>
// This code was auto-generated by com.unity.inputsystem:InputActionCodeGenerator
// version 1.1.0
// from Assets/ML-Agents/Examples/PushBlock/PushBlockActions.inputactions
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine.InputSystem;
using UnityEngine.InputSystem.Utilities;
public partial class @PushBlockActions : IInputActionCollection2, IDisposable
{
public InputActionAsset asset { get; }
public @PushBlockActions()
{
asset = InputActionAsset.FromJson(@"{
""name"": ""PushBlockActions"",
""maps"": [
{
""name"": ""Movement"",
""id"": ""03a2e5d4-ae81-47f1-a575-0779fb7da538"",
""actions"": [
{
""name"": ""movement"",
""type"": ""Value"",
""id"": ""5f47cbc6-de46-4d33-93e2-2b1af4f5996d"",
""expectedControlType"": ""Vector2"",
""processors"": """",
""interactions"": """"
},
{
""name"": ""jump"",
""type"": ""Button"",
""id"": ""ca5eb833-5dfb-4b7c-880d-6118bd5d1378"",
""expectedControlType"": ""Button"",
""processors"": """",
""interactions"": """"
}
],
""bindings"": [
{
""name"": ""gamepad_move"",
""id"": ""477500ef-6d32-4b84-b9f8-158f18bcb906"",
""path"": ""2DVector"",
""interactions"": """",
""processors"": """",
""groups"": """",
""action"": ""movement"",
""isComposite"": true,
""isPartOfComposite"": false
},
{
""name"": ""up"",
""id"": ""6d2537b8-2266-4a50-8575-fb0fe310daa5"",
""path"": ""<Gamepad>/dpad/up"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""movement"",
""isComposite"": false,
""isPartOfComposite"": true
},
{
""name"": ""down"",
""id"": ""50584c83-beb6-4e90-a453-a635c03a761e"",
""path"": ""<Gamepad>/dpad/down"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""movement"",
""isComposite"": false,
""isPartOfComposite"": true
},
{
""name"": ""left"",
""id"": ""44408b8f-27e7-4c6d-b078-7536ba020d1a"",
""path"": ""<Gamepad>/dpad/left"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""movement"",
""isComposite"": false,
""isPartOfComposite"": true
},
{
""name"": ""right"",
""id"": ""f5681423-d3e3-41a5-b85e-0a7642c774aa"",
""path"": ""<Gamepad>/dpad/right"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""movement"",
""isComposite"": false,
""isPartOfComposite"": true
},
{
""name"": ""keyboard_move"",
""id"": ""6bcba4bf-5ce0-4005-9e6a-0de2487211b0"",
""path"": ""2DVector"",
""interactions"": """",
""processors"": """",
""groups"": """",
""action"": ""movement"",
""isComposite"": true,
""isPartOfComposite"": false
},
{
""name"": ""up"",
""id"": ""63da699e-b354-4e63-b0f8-26fb92abea41"",
""path"": ""<Keyboard>/w"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""movement"",
""isComposite"": false,
""isPartOfComposite"": true
},
{
""name"": ""down"",
""id"": ""39409748-9002-4aff-9a09-cdc05b9708ad"",
""path"": ""<Keyboard>/s"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""movement"",
""isComposite"": false,
""isPartOfComposite"": true
},
{
""name"": ""left"",
""id"": ""0afe45fc-dc45-4310-9c73-7dc3c503addf"",
""path"": ""<Keyboard>/a"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""movement"",
""isComposite"": false,
""isPartOfComposite"": true
},
{
""name"": ""right"",
""id"": ""69fe0335-9e0c-495d-a90d-4b0fcbfd2b34"",
""path"": ""<Keyboard>/d"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""movement"",
""isComposite"": false,
""isPartOfComposite"": true
},
{
""name"": """",
""id"": ""ab696218-63cd-4eb8-9fe1-48a68e32e92f"",
""path"": ""<Keyboard>/space"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""jump"",
""isComposite"": false,
""isPartOfComposite"": false
},
{
""name"": """",
""id"": ""7adcb138-5175-4cc4-addc-d2b02cb5f0de"",
""path"": ""<Gamepad>/buttonSouth"",
""interactions"": """",
""processors"": """",
""groups"": ""Keyboard"",
""action"": ""jump"",
""isComposite"": false,
""isPartOfComposite"": false
}
]
}
],
""controlSchemes"": [
{
""name"": ""Keyboard"",
""bindingGroup"": ""Keyboard"",
""devices"": [
{
""devicePath"": ""<Keyboard>"",
""isOptional"": true,
""isOR"": false
},
{
""devicePath"": ""<Gamepad>"",
""isOptional"": true,
""isOR"": false
}
]
}
]
}");
// Movement
m_Movement = asset.FindActionMap("Movement", throwIfNotFound: true);
m_Movement_movement = m_Movement.FindAction("movement", throwIfNotFound: true);
m_Movement_jump = m_Movement.FindAction("jump", throwIfNotFound: true);
}
public void Dispose()
{
UnityEngine.Object.Destroy(asset);
}
public InputBinding? bindingMask
{
get => asset.bindingMask;
set => asset.bindingMask = value;
}
public ReadOnlyArray<InputDevice>? devices
{
get => asset.devices;
set => asset.devices = value;
}
public ReadOnlyArray<InputControlScheme> controlSchemes => asset.controlSchemes;
public bool Contains(InputAction action)
{
return asset.Contains(action);
}
public IEnumerator<InputAction> GetEnumerator()
{
return asset.GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return GetEnumerator();
}
public void Enable()
{
asset.Enable();
}
public void Disable()
{
asset.Disable();
}
public IEnumerable<InputBinding> bindings => asset.bindings;
public InputAction FindAction(string actionNameOrId, bool throwIfNotFound = false)
{
return asset.FindAction(actionNameOrId, throwIfNotFound);
}
public int FindBinding(InputBinding bindingMask, out InputAction action)
{
return asset.FindBinding(bindingMask, out action);
}
// Movement
private readonly InputActionMap m_Movement;
private IMovementActions m_MovementActionsCallbackInterface;
private readonly InputAction m_Movement_movement;
private readonly InputAction m_Movement_jump;
public struct MovementActions
{
private @PushBlockActions m_Wrapper;
public MovementActions(@PushBlockActions wrapper) { m_Wrapper = wrapper; }
public InputAction @movement => m_Wrapper.m_Movement_movement;
public InputAction @jump => m_Wrapper.m_Movement_jump;
public InputActionMap Get() { return m_Wrapper.m_Movement; }
public void Enable() { Get().Enable(); }
public void Disable() { Get().Disable(); }
public bool enabled => Get().enabled;
public static implicit operator InputActionMap(MovementActions set) { return set.Get(); }
public void SetCallbacks(IMovementActions instance)
{
if (m_Wrapper.m_MovementActionsCallbackInterface != null)
{
@movement.started -= m_Wrapper.m_MovementActionsCallbackInterface.OnMovement;
@movement.performed -= m_Wrapper.m_MovementActionsCallbackInterface.OnMovement;
@movement.canceled -= m_Wrapper.m_MovementActionsCallbackInterface.OnMovement;
@jump.started -= m_Wrapper.m_MovementActionsCallbackInterface.OnJump;
@jump.performed -= m_Wrapper.m_MovementActionsCallbackInterface.OnJump;
@jump.canceled -= m_Wrapper.m_MovementActionsCallbackInterface.OnJump;
}
m_Wrapper.m_MovementActionsCallbackInterface = instance;
if (instance != null)
{
@movement.started += instance.OnMovement;
@movement.performed += instance.OnMovement;
@movement.canceled += instance.OnMovement;
@jump.started += instance.OnJump;
@jump.performed += instance.OnJump;
@jump.canceled += instance.OnJump;
}
}
}
public MovementActions @Movement => new MovementActions(this);
private int m_KeyboardSchemeIndex = -1;
public InputControlScheme KeyboardScheme
{
get
{
if (m_KeyboardSchemeIndex == -1) m_KeyboardSchemeIndex = asset.FindControlSchemeIndex("Keyboard");
return asset.controlSchemes[m_KeyboardSchemeIndex];
}
}
public interface IMovementActions
{
void OnMovement(InputAction.CallbackContext context);
void OnJump(InputAction.CallbackContext context);
}
}

11
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockActions.cs.meta


fileFormatVersion: 2
guid: d6f125a5c899346259be7aac2df8daa0
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

112
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockPlayerController.cs


using Unity.MLAgents.Extensions.Input;
using UnityEngine;
using UnityEngine.InputSystem;
/// <summary>
/// This class handles the input for the PushBlock Cube character in the PushBlock scene.
/// Note that the only ML-Agents code here is the implementation of the <see cref="IInputActionAssetProvider"/>.
/// The <see cref="InputActuatorComponent"/> looks for a component that implements that interface in order to
/// rebind actions to virtual controllers when training agents or running inference. This means that you can
/// keep your input handling code separate from ML-Agents, and have your agent's action space defined by the
/// actions defined in your project's <see cref="GetInputActionAsset"/>.
///
/// If you don't implement <see cref="IInputActionAssetProvider"/> the <see cref="InputActuatorComponent"/> will
/// look for a <see cref="PlayerInput"/> component on the GameObject it live on. It will rebind the actions of that
/// instance of the asset.
///
/// It is important to note that if you have multiple components on the same GameObject handling input, you will
/// need to share the instance of the generated C# <see cref="IInputActionCollection2"/> (named <see cref="m_PushBlockActions"/>
/// here) in order to ensure that all of your actions are bound correctly for ml-agents training and inference.
/// </summary>
public class PushBlockPlayerController : MonoBehaviour, IInputActionAssetProvider
{
PushBlockSettings m_PushBlockSettings;
public float JumpTime = 0.5f;
float m_JumpTimeRemaining;
Rigidbody m_PlayerRb; //cached on initialization
PushBlockActions m_PushBlockActions;
float m_JumpCoolDownStart;
void Awake()
{
m_PushBlockSettings = FindObjectOfType<PushBlockSettings>();
LazyInitializeActions();
// Cache the agent rigidbody
m_PlayerRb = GetComponent<Rigidbody>();
}
void LazyInitializeActions()
{
if (m_PushBlockActions != null)
{
return;
}
m_PushBlockActions = new PushBlockActions();
m_PushBlockActions.Enable();
// You can listen to C# events.
m_PushBlockActions.Movement.jump.performed += JumpOnperformed;
}
void JumpOnperformed(InputAction.CallbackContext callbackContext)
{
InnerJump(gameObject.transform);
}
void FixedUpdate()
{
// Or you can poll the action itself like we do here.
InnerMove(gameObject.transform, m_PushBlockActions.Movement.movement.ReadValue<Vector2>());
if (m_JumpTimeRemaining < 0)
{
m_PlayerRb.AddForce(-transform.up * (m_PushBlockSettings.agentJumpForce * 3), ForceMode.Acceleration);
}
m_JumpTimeRemaining -= Time.fixedDeltaTime;
}
void InnerJump(Transform t)
{
if (Time.realtimeSinceStartup - m_JumpCoolDownStart > m_PushBlockSettings.agentJumpCoolDown)
{
m_JumpTimeRemaining = JumpTime;
m_PlayerRb.AddForce(t.up * m_PushBlockSettings.agentJumpForce, ForceMode.VelocityChange);
m_JumpCoolDownStart = Time.realtimeSinceStartup;
}
}
void InnerMove(Transform t, Vector2 v)
{
var forward = CreateForwardVector(v);
var up = CreateUpVector(v);
var dirToGo = t.forward * forward;
var rotateDir = t.up * up;
t.Rotate(rotateDir, Time.deltaTime * 200f);
m_PlayerRb.AddForce(dirToGo * m_PushBlockSettings.agentRunSpeed,
ForceMode.VelocityChange);
}
static float CreateUpVector(Vector2 move)
{
return Mathf.Abs(move.x) > Mathf.Abs(move.y) ? move.x : 0f;
}
static float CreateForwardVector(Vector2 move)
{
return Mathf.Abs(move.y) > Mathf.Abs(move.x) ? move.y : 0f;
}
/// <summary>
/// This is the implementation of the <see cref="IInputActionAssetProvider"/> for this class. We need
/// both the <see cref="GetInputActionAsset"/> and the <see cref="IInputActionCollection2"/> if you are
/// listening to C# events, Unity Events, or receiving Messages from the Input System Package as those callbacks
/// are set up through the generated <see cref="IInputActionCollection2"/>.
/// </summary>
/// <returns></returns>
public (InputActionAsset, IInputActionCollection2) GetInputActionAsset()
{
LazyInitializeActions();
return (m_PushBlockActions.asset, m_PushBlockActions);
}
}

11
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockPlayerController.cs.meta


fileFormatVersion: 2
guid: bb9a2d951ffa44f53ba04a31f2712f5f
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

39
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockSettings.cs


using UnityEngine;
public class PushBlockSettings : MonoBehaviour
{
/// <summary>
/// The "walking speed" of the agents in the scene.
/// </summary>
public float agentRunSpeed;
/// <summary>
/// The agent rotation speed.
/// Every agent will use this setting.
/// </summary>
public float agentRotationSpeed;
public float agentJumpForce;
public float agentJumpCoolDown;
/// <summary>
/// The spawn area margin multiplier.
/// ex: .9 means 90% of spawn area will be used.
/// .1 margin will be left (so players don't spawn off of the edge).
/// The higher this value, the longer training time required.
/// </summary>
public float spawnAreaMarginMultiplier;
/// <summary>
/// When a goal is scored the ground will switch to this
/// material for a few seconds.
/// </summary>
public Material goalScoredMaterial;
/// <summary>
/// When an agent fails, the ground will turn this material for a few seconds.
/// </summary>
public Material failMaterial;
}

11
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockSettings.cs.meta


fileFormatVersion: 2
guid: e5ed63dbfa25542ecb8bc013adfba183
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

8
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/TFModels.meta


fileFormatVersion: 2
guid: 5855121f0ded74dad8e1dd15a8bcdca1
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

1001
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/TFModels/PushBlock.onnx
文件差异内容过多而无法显示
查看文件

14
ML-Agents-Input-Example/Assets/ML-Agents/Examples/PushBlock/TFModels/PushBlock.onnx.meta


fileFormatVersion: 2
guid: bee50ae7fd03e4e16a4ac45db4d96f6e
ScriptedImporter:
internalIDToNameTable: []
externalObjects: {}
serializedVersion: 2
userData:
assetBundleName:
assetBundleVariant:
script: {fileID: 11500000, guid: 683b6cb6d0a474744822c888b46772c9, type: 3}
optimizeModel: 1
forceArbitraryBatchSize: 1
treatErrorsAsWarnings: 0
importMode: 1

8
ML-Agents-Input-Example/Assets/ML-Agents/Examples/SharedAssets.meta


fileFormatVersion: 2
guid: 0f83c851cdce246a0b97472ec6d35622
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

部分文件因为文件数量过多而无法显示

正在加载...
取消
保存