比较提交

...
此合并请求有变更与目标分支冲突。
/protobuf-definitions/proto/mlagents_envs/communicator_objects/observation.proto
/config/imitation/CrawlerStatic.yaml
/config/ppo/CrawlerDynamic.yaml
/config/ppo/FoodCollector.yaml
/config/ppo/CrawlerDynamicVariableSpeed.yaml
/config/ppo/WormDynamic.yaml
/config/sac/FoodCollector.yaml
/config/sac/CrawlerDynamic.yaml
/config/sac/WalkerDynamic.yaml
/config/sac/WormDynamic.yaml
/DevProject/Packages/packages-lock.json
/utils/validate_release_links.py
/utils/validate_versions.py
/utils/make_readme_table.py
/.yamato/com.unity.ml-agents-pack.yml
/.github/workflows/pytest.yml
/Project/Packages/manifest.json
/Project/ProjectSettings/GraphicsSettings.asset
/Project/Assets/ML-Agents/Examples/Crawler/Prefabs/CrawlerBase.prefab
/Project/Assets/ML-Agents/Examples/Crawler/Scenes/CrawlerDynamicVariableSpeed.unity
/Project/Assets/ML-Agents/Examples/Crawler/Scripts/CrawlerAgent.cs
/Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpCrawlerDynVS.demo.meta
/Project/Assets/ML-Agents/Examples/GridWorld/Demos/ExpertGrid.demo.meta
/Project/Assets/ML-Agents/Examples/PushBlock/Demos/ExpertPush.demo.meta
/Project/Assets/ML-Agents/Examples/Sorter/Scripts/SorterAgent.cs
/Project/Assets/ML-Agents/Examples/Walker/Demos/ExpertWalkerDyVS.demo.meta
/Project/Assets/ML-Agents/Examples/Worm/Scripts/WormAgent.cs
/Project/Assets/ML-Agents/Examples/Match3/Prefabs/Match3VectorObs.prefab
/Project/Assets/ML-Agents/Examples/Match3/Prefabs/Match3VisualObs.prefab
/Project/Assets/ML-Agents/Examples/FoodCollector/TFModels/FoodCollector.onnx
/Project/Assets/ML-Agents/Examples/FoodCollector/Scenes/FoodCollector.unity
/Project/Assets/ML-Agents/Examples/FoodCollector/Prefabs/FoodCollectorArea.prefab
/com.unity.ml-agents.extensions/package.json
/com.unity.ml-agents.extensions/Runtime/Input/InputActuatorComponent.cs
/com.unity.ml-agents.extensions/Runtime/Input/Adaptors/DoubleInputActionAdaptor.cs
/com.unity.ml-agents.extensions/Runtime/Input/Adaptors/FloatInputActionAdaptor.cs
/com.unity.ml-agents.extensions/Runtime/Input/Adaptors/IntegerInputActionAdaptor.cs
/com.unity.ml-agents.extensions/Runtime/Input/Adaptors/ButtonInputActionAdaptor.cs
/com.unity.ml-agents.extensions/Runtime/Input/Adaptors/Vector2InputActionAdaptor.cs
/com.unity.ml-agents.extensions/Runtime/Input/IRLActionInputAdaptor.cs
/com.unity.ml-agents.extensions/Runtime/Input/InputActionActuator.cs
/com.unity.ml-agents.extensions/Tests/Runtime/Input/InputActionActuatorTests.cs
/com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/ButtonInputActionAdaptorTests.cs
/com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/DoubleInputActionAdaptorTests.cs
/com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/FloatInputActionAdapatorTests.cs
/com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/Vector2InputActionAdaptorTests.cs
/com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/IntegerInputActionAdaptorTests.cs
/com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md
/com.unity.ml-agents/package.json
/com.unity.ml-agents/Documentation~/com.unity.ml-agents.md
/com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs
/com.unity.ml-agents/Runtime/Academy.cs
/com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs
/com.unity.ml-agents/Runtime/Actuators/IDiscreteActionMask.cs
/com.unity.ml-agents/Runtime/Agent.cs
/com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs
/com.unity.ml-agents/Runtime/SimpleMultiAgentGroup.cs
/com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Observation.cs
/com.unity.ml-agents/Runtime/Policies/BehaviorParameters.cs
/com.unity.ml-agents/CHANGELOG.md
/ml-agents-envs/README.md
/ml-agents-envs/mlagents_envs/communicator_objects/observation_pb2.py
/ml-agents-envs/mlagents_envs/communicator_objects/observation_pb2.pyi
/ml-agents-envs/mlagents_envs/base_env.py
/docs/Training-ML-Agents.md
/docs/Training-Configuration-File.md
/docs/Learning-Environment-Examples.md
/docs/Installation-Anaconda-Windows.md
/docs/Installation.md
/docs/Learning-Environment-Design-Agents.md
/docs/Training-on-Amazon-Web-Service.md
/docs/Training-on-Microsoft-Azure.md
/docs/Unity-Inference-Engine.md
/ml-agents/setup.py
/ml-agents/README.md
/ml-agents/mlagents/trainers/optimizer/torch_optimizer.py
/ml-agents/mlagents/trainers/ppo/optimizer_torch.py
/ml-agents/mlagents/trainers/ppo/trainer.py
/ml-agents/mlagents/trainers/sac/optimizer_torch.py
/ml-agents/mlagents/trainers/tests/dummy_config.py
/ml-agents/mlagents/trainers/tests/test_buffer.py
/ml-agents/mlagents/trainers/tests/torch/test_hybrid.py
/ml-agents/mlagents/trainers/tests/torch/test_ppo.py
/ml-agents/mlagents/trainers/tests/torch/saver/test_saver.py
/ml-agents/mlagents/trainers/tests/torch/test_simple_rl.py
/ml-agents/mlagents/trainers/tests/simple_test_envs.py
/ml-agents/mlagents/trainers/buffer.py
/README.md
/com.unity.ml-agents.extensions/Documentation~/Match3.md
/Project/Assets/ML-Agents/Examples/FoodCollector/TFModels/GridFoodCollector.onnx.meta
/Project/Assets/ML-Agents/Examples/SharedAssets/Scripts/CollisionCallbacks.cs
/Project/Assets/ML-Agents/Examples/PushBlock/Scenes/DungeonEscape.unity.meta
/Project/Assets/ML-Agents/Examples/PushBlock/Scenes/DungeonEscape.unity
/Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/DungeonEscapePlatform.prefab.meta
/Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/Column.prefab
/Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/Column.prefab.meta
/Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/DungeonEscapeAgent.prefab.meta
/Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/Sword.prefab
/Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/Sword.prefab.meta
/Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/DungeonEscapeAgent.prefab
/Project/Assets/ML-Agents/Examples/PushBlock/Scripts/GoalDetectTrigger.cs
/Project/Assets/ML-Agents/Examples/FoodCollector/TFModels/GridFoodCollector.onnx.meta
/Project/Assets/ML-Agents/Examples/SharedAssets/Scripts/CollisionCallbacks.cs.meta
/Project/Assets/ML-Agents/Examples/PushBlock/TFModels/PushBlockCollab.onnx
/Project/Assets/ML-Agents/Examples/PushBlock/TFModels/PushBlockCollab.onnx.meta
/Project/Assets/ML-Agents/Examples/PushBlock/Scenes/PushBlockCollab.unity
/Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/PushBlockCollabAreaGrid.prefab
/Project/Assets/ML-Agents/Examples/PushBlock/Prefabs/PushBlockAgentGridCollab.prefab
/Project/Assets/ML-Agents/Examples/PushBlock/Scripts/PushAgentCollab.cs
/Project/Assets/ML-Agents/Examples/PushBlock/Scripts/PushBlockEnvController.cs
/Project/Assets/ML-Agents/Examples/FoodCollector/TFModels/GridFoodCollector.onnx
/com.unity.ml-agents.extensions/Documentation~/Grid-Sensor.md
/Project/Assets/ML-Agents/Examples/FoodCollector/TFModels/FoodCollector.onnx.meta
/com.unity.ml-agents/Runtime/DiscreteActionMasker.cs
/Project/Assets/ML-Agents/Examples/Crawler/TFModels/CrawlerDynamicVariableSpeed.nn
/Project/Assets/ML-Agents/Examples/Crawler/TFModels/CrawlerDynamicVariableSpeed.nn.meta
/Project/Assets/ML-Agents/Editor/Tests/SampleExporter.cs
/Project/Assets/ML-Agents/Examples/Walker/Scenes/Walker.unity
/com.unity.ml-agents.extensions/Runtime/Input/InputActuatorEventContext.cs

3 次代码提交

共有 432 个文件被更改,包括 1783 次插入2866 次删除
  1. 11
      README.md
  2. 29
      utils/validate_versions.py
  3. 2
      utils/make_readme_table.py
  4. 167
      utils/validate_release_links.py
  5. 23
      .github/workflows/pytest.yml
  6. 8
      .yamato/com.unity.ml-agents-pack.yml
  7. 2
      .yamato/python-ll-api-test.yml
  8. 2
      .yamato/standalone-build-test.yml
  9. 6
      DevProject/Packages/packages-lock.json
  10. 2
      docs/Training-on-Microsoft-Azure.md
  11. 16
      docs/Learning-Environment-Design-Agents.md
  12. 2
      docs/Training-Configuration-File.md
  13. 2
      docs/Training-ML-Agents.md
  14. 8
      docs/Installation-Anaconda-Windows.md
  15. 11
      docs/Installation.md
  16. 136
      docs/Learning-Environment-Examples.md
  17. 2
      docs/Training-on-Amazon-Web-Service.md
  18. 4
      docs/Unity-Inference-Engine.md
  19. 999
      docs/images/example-envs.png
  20. 6
      config/imitation/CrawlerStatic.yaml
  21. 2
      config/sac/CrawlerDynamic.yaml
  22. 10
      config/sac/FoodCollector.yaml
  23. 2
      config/sac/WalkerDynamic.yaml
  24. 2
      config/sac/WormDynamic.yaml
  25. 2
      config/ppo/CrawlerDynamic.yaml
  26. 4
      config/ppo/FoodCollector.yaml
  27. 2
      config/ppo/WormDynamic.yaml
  28. 4
      config/ppo/CrawlerDynamicVariableSpeed.yaml
  29. 0
      config/ppo/PushBlock.yaml
  30. 2
      ml-agents/README.md
  31. 4
      ml-agents/tests/yamato/scripts/run_llapi.py
  32. 17
      ml-agents/mlagents/trainers/buffer.py
  33. 25
      ml-agents/mlagents/trainers/optimizer/torch_optimizer.py
  34. 5
      ml-agents/mlagents/trainers/ppo/optimizer_torch.py
  35. 10
      ml-agents/mlagents/trainers/ppo/trainer.py
  36. 3
      ml-agents/mlagents/trainers/sac/optimizer_torch.py
  37. 9
      ml-agents/mlagents/trainers/tests/dummy_config.py
  38. 16
      ml-agents/mlagents/trainers/tests/simple_test_envs.py
  39. 36
      ml-agents/mlagents/trainers/tests/test_buffer.py
  40. 31
      ml-agents/mlagents/trainers/tests/torch/test_hybrid.py
  41. 61
      ml-agents/mlagents/trainers/tests/torch/saver/test_saver.py
  42. 5
      ml-agents/mlagents/trainers/tests/torch/test_ppo.py
  43. 6
      ml-agents/mlagents/trainers/tests/torch/test_simple_rl.py
  44. 5
      ml-agents/setup.py
  45. 2
      ml-agents-envs/README.md
  46. 23
      ml-agents-envs/mlagents_envs/communicator_objects/observation_pb2.py
  47. 7
      ml-agents-envs/mlagents_envs/communicator_objects/observation_pb2.pyi
  48. 4
      ml-agents-envs/mlagents_envs/base_env.py
  49. 85
      ml-agents-envs/mlagents_envs/rpc_utils.py
  50. 42
      ml-agents-envs/mlagents_envs/tests/test_rpc_utils.py
  51. 4
      protobuf-definitions/proto/mlagents_envs/communicator_objects/observation.proto
  52. 2
      com.unity.ml-agents.extensions/Documentation~/Grid-Sensor.md
  53. 2
      com.unity.ml-agents.extensions/Documentation~/Match3.md
  54. 16
      com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md
  55. 12
      com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/ButtonInputActionAdaptorTests.cs
  56. 12
      com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/DoubleInputActionAdaptorTests.cs
  57. 12
      com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/FloatInputActionAdapatorTests.cs
  58. 12
      com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/IntegerInputActionAdaptorTests.cs
  59. 12
      com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/Vector2InputActionAdaptorTests.cs
  60. 17
      com.unity.ml-agents.extensions/Tests/Runtime/Input/InputActionActuatorTests.cs
  61. 2
      com.unity.ml-agents.extensions/Tests/Runtime/Input/InputActuatorComponentTests.cs
  62. 2
      com.unity.ml-agents.extensions/Tests/Runtime/Input/Unity.ML-Agents.Extensions.Input.Tests.Runtime.asmdef
  63. 4
      com.unity.ml-agents.extensions/package.json
  64. 7
      com.unity.ml-agents.extensions/Runtime/Input/Adaptors/ButtonInputActionAdaptor.cs
  65. 7
      com.unity.ml-agents.extensions/Runtime/Input/Adaptors/DoubleInputActionAdaptor.cs
  66. 6
      com.unity.ml-agents.extensions/Runtime/Input/Adaptors/FloatInputActionAdaptor.cs
  67. 6
      com.unity.ml-agents.extensions/Runtime/Input/Adaptors/IntegerInputActionAdaptor.cs
  68. 7
      com.unity.ml-agents.extensions/Runtime/Input/Adaptors/Vector2InputActionAdaptor.cs
  69. 4
      com.unity.ml-agents.extensions/Runtime/Input/IRLActionInputAdaptor.cs
  70. 4
      com.unity.ml-agents.extensions/Runtime/Input/Unity.ML-Agents.Extensions.Input.asmdef
  71. 12
      com.unity.ml-agents.extensions/Runtime/Input/InputActionActuator.cs
  72. 63
      com.unity.ml-agents.extensions/Runtime/Input/InputActuatorComponent.cs
  73. 1
      Project/Project.sln.DotSettings
  74. 1
      Project/ProjectSettings/GraphicsSettings.asset
  75. 9
      Project/ProjectSettings/TagManager.asset
  76. 4
      Project/Packages/manifest.json
  77. 2
      Project/Assets/ML-Agents/Examples/GridWorld/Demos/ExpertGrid.demo.meta
  78. 52
      Project/Assets/ML-Agents/Examples/Worm/Scripts/WormAgent.cs
  79. 9
      Project/Assets/ML-Agents/Examples/Worm/Prefabs/PlatformWormDynamicTarget.prefab
  80. 13
      Project/Assets/ML-Agents/Examples/Worm/Prefabs/WormBasePrefab.prefab
  81. 1001
      Project/Assets/ML-Agents/Examples/FoodCollector/TFModels/FoodCollector.onnx
  82. 2
      Project/Assets/ML-Agents/Examples/FoodCollector/TFModels/FoodCollector.onnx.meta
  83. 5
      Project/Assets/ML-Agents/Examples/FoodCollector/Prefabs/FoodCollectorArea.prefab.meta
  84. 380
      Project/Assets/ML-Agents/Examples/FoodCollector/Prefabs/FoodCollectorArea.prefab
  85. 5
      Project/Assets/ML-Agents/Examples/FoodCollector/Scenes/FoodCollector.unity.meta
  86. 862
      Project/Assets/ML-Agents/Examples/FoodCollector/Scenes/FoodCollector.unity
  87. 2
      Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpCrawlerDynVS.demo.meta
  88. 7
      Project/Assets/ML-Agents/Examples/Crawler/Scenes/CrawlerDynamicVariableSpeed.unity
  89. 95
      Project/Assets/ML-Agents/Examples/Crawler/Scripts/CrawlerAgent.cs
  90. 13
      Project/Assets/ML-Agents/Examples/Crawler/Prefabs/CrawlerBase.prefab
  91. 2
      Project/Assets/ML-Agents/Examples/Walker/Demos/ExpertWalkerDyVS.demo.meta
  92. 2
      Project/Assets/ML-Agents/Examples/Walker/Prefabs/Ragdoll/WalkerRagdollBase.prefab
  93. 14
      Project/Assets/ML-Agents/Examples/Match3/Prefabs/Match3VectorObs.prefab
  94. 14
      Project/Assets/ML-Agents/Examples/Match3/Prefabs/Match3VisualObs.prefab
  95. 5
      Project/Assets/ML-Agents/Examples/Sorter/Scripts/SorterAgent.cs
  96. 2
      Project/Assets/ML-Agents/Examples/PushBlock/Demos/ExpertPush.demo.meta
  97. 4
      com.unity.ml-agents/.gitignore
  98. 4
      com.unity.ml-agents/Documentation~/com.unity.ml-agents.md
  99. 48
      com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Observation.cs
  100. 2
      com.unity.ml-agents/Runtime/MultiAgentGroupIdCounter.cs.meta

11
README.md


# Unity ML-Agents Toolkit
[![docs badge](https://img.shields.io/badge/docs-reference-blue.svg)](https://github.com/Unity-Technologies/ml-agents/tree/release_14_docs/docs/)
[![docs badge](https://img.shields.io/badge/docs-reference-blue.svg)](https://github.com/Unity-Technologies/ml-agents/tree/release_13_docs/docs/)
[![license badge](https://img.shields.io/badge/license-Apache--2.0-green.svg)](LICENSE)

## Releases & Documentation
**Our latest, stable release is `Release 14`. Click
[here](https://github.com/Unity-Technologies/ml-agents/tree/release_14_docs/docs/Readme.md)
**Our latest, stable release is `Release 13`. Click
[here](https://github.com/Unity-Technologies/ml-agents/tree/release_13_docs/docs/Readme.md)
to get started with the latest release of ML-Agents.**
The table below lists all our releases, including our `main` branch which is

| **Version** | **Release Date** | **Source** | **Documentation** | **Download** | **Python Package** | **Unity Package** |
|:-------:|:------:|:-------------:|:-------:|:------------:|:------------:|:------------:|
| **main (unstable)** | -- | [source](https://github.com/Unity-Technologies/ml-agents/tree/main) | [docs](https://github.com/Unity-Technologies/ml-agents/tree/main/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/main.zip) | -- | -- |
| **Release 14** | **March 5, 2021** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/release_14)** | **[docs](https://github.com/Unity-Technologies/ml-agents/tree/release_14_docs/docs/Readme.md)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/release_14.zip)** | **[0.24.1](https://pypi.org/project/mlagents/0.24.1/)** | **[1.8.1](https://docs.unity3d.com/Packages/com.unity.ml-agents@1.8/manual/index.html)** |
| **Verified Package 1.0.7** | **March 8, 2021** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/com.unity.ml-agents_1.0.7)** | **[docs](https://github.com/Unity-Technologies/ml-agents/blob/release_2_verified_docs/docs/Readme.md)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/com.unity.ml-agents_1.0.7.zip)** | **[0.16.1](https://pypi.org/project/mlagents/0.16.1/)** | **[1.0.7](https://docs.unity3d.com/Packages/com.unity.ml-agents@1.0/manual/index.html)** |
| **Verified Package 1.0.6** | November 16, 2020 | [source](https://github.com/Unity-Technologies/ml-agents/tree/com.unity.ml-agents_1.0.6) | [docs](https://github.com/Unity-Technologies/ml-agents/blob/release_2_verified_docs/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/com.unity.ml-agents_1.0.6.zip) | [0.16.1](https://pypi.org/project/mlagents/0.16.1/) | [1.0.6](https://docs.unity3d.com/Packages/com.unity.ml-agents@1.0/manual/index.html) |
| **Verified Package 1.0.6** | **November 16, 2020** | **[source](https://github.com/Unity-Technologies/ml-agents/tree/com.unity.ml-agents_1.0.6)** | **[docs](https://github.com/Unity-Technologies/ml-agents/blob/release_2_verified_docs/docs/Readme.md)** | **[download](https://github.com/Unity-Technologies/ml-agents/archive/com.unity.ml-agents_1.0.6.zip)** | **[0.16.1](https://pypi.org/project/mlagents/0.16.1/)** | **[1.0.6](https://docs.unity3d.com/Packages/com.unity.ml-agents@1.0/manual/index.html)** |
| **Verified Package 1.0.5** | September 23, 2020 | [source](https://github.com/Unity-Technologies/ml-agents/tree/com.unity.ml-agents_1.0.5) | [docs](https://github.com/Unity-Technologies/ml-agents/blob/release_2_verified_docs/docs/Readme.md) | [download](https://github.com/Unity-Technologies/ml-agents/archive/com.unity.ml-agents_1.0.5.zip) | [0.16.1](https://pypi.org/project/mlagents/0.16.1/) | [1.0.5](https://docs.unity3d.com/Packages/com.unity.ml-agents@1.0/manual/index.html) |
If you are a researcher interested in a discussion of Unity as an AI platform,
see a pre-print of our

29
utils/validate_versions.py


def set_version(
python_version: str,
csharp_version: str,
csharp_extensions_version: str,
release_tag: Optional[str],
python_version: str, csharp_version: str, release_tag: Optional[str]
) -> None:
# Sanity check - make sure test tags have a test or dev version
if release_tag and "test" in release_tag:

if csharp_version is not None:
package_version = csharp_version + "-preview"
if csharp_extensions_version is not None:
extension_version = csharp_extensions_version + "-preview"
set_extension_package_version(package_version, extension_version)
set_extension_package_version(package_version)
print(f"Setting package version to {package_version} in {ACADEMY_PATH}")
set_academy_version_string(package_version)

f.write("\n")
def set_extension_package_version(
new_dependency_version: str, new_extension_version
) -> None:
def set_extension_package_version(new_version: str) -> None:
package_json["dependencies"]["com.unity.ml-agents"] = new_dependency_version
if new_extension_version is not None:
package_json["version"] = new_extension_version
package_json["dependencies"]["com.unity.ml-agents"] = new_version
with open(MLAGENTS_EXTENSIONS_PACKAGE_JSON_PATH, "w") as f:
json.dump(package_json, f, indent=2)
f.write("\n")

parser = argparse.ArgumentParser()
parser.add_argument("--python-version", default=None)
parser.add_argument("--csharp-version", default=None)
parser.add_argument("--csharp-extensions-version", default=None)
parser.add_argument("--release-tag", default=None)
# unused, but allows precommit to pass filenames
parser.add_argument("files", nargs="*")

print(f"Updating python library to version {args.python_version}")
if args.csharp_version:
print(f"Updating C# package to version {args.csharp_version}")
if args.csharp_extensions_version:
print(
f"Updating C# extensions package to version {args.csharp_extensions_version}"
)
set_version(
args.python_version,
args.csharp_version,
args.csharp_extensions_version,
args.release_tag,
)
set_version(args.python_version, args.csharp_version, args.release_tag)
if args.release_tag is not None:
print_release_tag_commands(
args.python_version, args.csharp_version, args.release_tag

2
utils/make_readme_table.py


ReleaseInfo("release_11", "1.7.0", "0.23.0", "December 21, 2020"),
ReleaseInfo("release_12", "1.7.2", "0.23.0", "December 22, 2020"),
ReleaseInfo("release_13", "1.8.0", "0.24.0", "February 17, 2021"),
ReleaseInfo("release_14", "1.8.1", "0.24.1", "March 5, 2021"),
ReleaseInfo("", "1.0.7", "0.16.1", "March 8, 2021", is_verified=True),
ReleaseInfo("", "1.0.6", "0.16.1", "November 16, 2020", is_verified=True),
ReleaseInfo("", "1.0.5", "0.16.1", "September 23, 2020", is_verified=True),
ReleaseInfo("", "1.0.4", "0.16.1", "August 20, 2020", is_verified=True),

167
utils/validate_release_links.py


from typing import List, Optional, Pattern
RELEASE_PATTERN = re.compile(r"release_[0-9]+(_docs)*")
# This matches the various ways to invoke pip: "pip", "pip3", "python -m pip"
# It matches "mlagents" and "mlagents_envs", accessible as group "package"
# and optionally matches the version, e.g. "==1.2.3"
PIP_INSTALL_PATTERN = re.compile(
r"(python -m )?pip3* install (?P<package>mlagents(_envs)?)(==[0-9]\.[0-9]\.[0-9](\.dev[0-9]+)?)?"
)
# To allow everything in the file (effectively skipping it), use MATCH_ANY for the value
# To allow everything in the file, use None for the value
ALLOW_LIST = {
# Previous release table
"README.md": re.compile(r"\*\*(Verified Package ([0-9]\.?)*|Release [0-9]+)\*\*"),

}
def test_release_pattern():
# Just some sanity check that the regex works as expected.
for s, expected in [
(
"https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/Food.md",
True,
),
("https://github.com/Unity-Technologies/ml-agents/blob/release_4/Foo.md", True),
(
"git clone --branch release_4 https://github.com/Unity-Technologies/ml-agents.git",
True,
),
(
"https://github.com/Unity-Technologies/ml-agents/blob/release_123_docs/Foo.md",
True,
),
(
"https://github.com/Unity-Technologies/ml-agents/blob/release_123/Foo.md",
True,
),
(
"https://github.com/Unity-Technologies/ml-agents/blob/latest_release/docs/Foo.md",
False,
),
]:
assert bool(RELEASE_PATTERN.search(s)) is expected
print("release tests OK!")
def test_pip_pattern():
def test_pattern():
for s, expected in [
("pip install mlagents", True),
("pip3 install mlagents", True),
("python -m pip install mlagents", True),
("python -m pip install mlagents==1.2.3", True),
("python -m pip install mlagents_envs==1.2.3", True),
]:
assert bool(PIP_INSTALL_PATTERN.search(s)) is expected
sub_expected = "Try running rm -rf / to install"
assert sub_expected == PIP_INSTALL_PATTERN.sub(
"rm -rf /", "Try running python -m pip install mlagents==1.2.3 to install"
assert RELEASE_PATTERN.search(
"https://github.com/Unity-Technologies/ml-agents/blob/release_4_docs/Food.md"
print("pip tests OK!")
def update_pip_install_line(line, package_verion):
match = PIP_INSTALL_PATTERN.search(line)
package_name = match.group("package")
replacement_version = f"python -m pip install {package_name}=={package_verion}"
updated = PIP_INSTALL_PATTERN.sub(replacement_version, line)
return updated
assert RELEASE_PATTERN.search(
"https://github.com/Unity-Technologies/ml-agents/blob/release_4/Foo.md"
)
assert RELEASE_PATTERN.search(
"git clone --branch release_4 https://github.com/Unity-Technologies/ml-agents.git"
)
assert RELEASE_PATTERN.search(
"https://github.com/Unity-Technologies/ml-agents/blob/release_123_docs/Foo.md"
)
assert RELEASE_PATTERN.search(
"https://github.com/Unity-Technologies/ml-agents/blob/release_123/Foo.md"
)
assert not RELEASE_PATTERN.search(
"https://github.com/Unity-Technologies/ml-agents/blob/latest_release/docs/Foo.md"
)
print("tests OK!")
def git_ls_files() -> List[str]:

raise RuntimeError("Can't determine release tag")
def get_python_package_version() -> str:
"""
Returns the mlagents python package.
:return:
"""
with open(TRAINER_INIT_FILE) as f:
for line in f:
if "__version__" in line:
lhs, equals_string, rhs = line.strip().partition(" = ")
# Evaluate the right hand side of the expression
return ast.literal_eval(rhs)
# If we couldn't find the release tag, raise an exception
# (since we can't return None here)
raise RuntimeError("Can't determine python package version")
filename: str,
release_tag_pattern: Pattern,
release_tag: str,
pip_allow_pattern: Pattern,
package_version: str,
filename: str, global_allow_pattern: Pattern, release_tag: str
) -> List[str]:
"""
Validate a single file and return any offending lines.

allow_list_pattern = ALLOW_LIST.get(filename, None)
with open(filename) as f:
for line in f:
# Does it contain anything of the form release_123
has_release_pattern = RELEASE_PATTERN.search(line) is not None
# Does it contain this particular release, e.g. release_42 or release_42_docs
has_release_tag_pattern = (
release_tag_pattern.search(line) is not None
)
# Does it contain the allow list pattern for the file (if there is one)
has_allow_list_pattern = (
allow_list_pattern
keep_line = True
keep_line = not RELEASE_PATTERN.search(line)
keep_line |= global_allow_pattern.search(line) is not None
keep_line |= (
allow_list_pattern is not None
pip_install_ok = (
has_allow_list_pattern
or PIP_INSTALL_PATTERN.search(line) is None
or pip_allow_pattern.search(line) is not None
)
release_tag_ok = (
not has_release_pattern
or has_release_tag_pattern
or has_allow_list_pattern
)
if release_tag_ok and pip_install_ok:
if keep_line:
new_line = re.sub(r"release_[0-9]+", fr"{release_tag}", line)
new_line = update_pip_install_line(new_line, package_version)
new_file.write(new_line)
new_file.write(
re.sub(r"release_[0-9]+", fr"{release_tag}", line)
)
if bad_lines:
if os.path.exists(filename):
os.remove(filename)

def check_all_files(
release_allow_pattern: Pattern,
release_tag: str,
pip_allow_pattern: Pattern,
package_version: str,
) -> List[str]:
def check_all_files(allow_pattern: Pattern, release_tag: str) -> List[str]:
:param release_allow_pattern:
:param allow_pattern:
"""
bad_lines = []
file_types = {".py", ".md", ".cs"}

bad_lines += check_file(
file_name,
release_allow_pattern,
release_tag,
pip_allow_pattern,
package_version,
)
bad_lines += check_file(file_name, allow_pattern, release_tag)
return bad_lines

print("Release tag is None, exiting")
sys.exit(0)
package_version = get_python_package_version()
print(f"Python package version: {package_version}")
release_allow_pattern = re.compile(f"{release_tag}(_docs)?")
pip_allow_pattern = re.compile(
f"python -m pip install mlagents(_envs)?=={package_version}"
)
bad_lines = check_all_files(
release_allow_pattern, release_tag, pip_allow_pattern, package_version
)
allow_pattern = re.compile(f"{release_tag}(_docs)*")
bad_lines = check_all_files(allow_pattern, release_tag)
if bad_lines:
for line in bad_lines:
print(line)

if __name__ == "__main__":
if "--test" in sys.argv:
test_release_pattern()
test_pip_pattern()
test_pattern()
main()

23
.github/workflows/pytest.yml


TEST_ENFORCE_BUFFER_KEY_TYPES: 1
strategy:
matrix:
python-version: [3.6.x, 3.7.x, 3.8.x, 3.9.x]
include:
- python-version: 3.6.x
pip_constraints: test_constraints_min_version.txt
- python-version: 3.7.x
pip_constraints: test_constraints_mid_version.txt
- python-version: 3.8.x
pip_constraints: test_constraints_mid_version.txt
- python-version: 3.9.x
pip_constraints: test_constraints_max_version.txt
python-version: [3.6.x, 3.7.x, 3.8.x]
steps:
- uses: actions/checkout@v2
- name: Set up Python

# This path is specific to Ubuntu
path: ~/.cache/pip
# Look to see if there is a cache hit for the corresponding requirements file
key: ${{ runner.os }}-pip-${{ hashFiles('ml-agents/setup.py', 'ml-agents-envs/setup.py', 'gym-unity/setup.py', 'test_requirements.txt', matrix.pip_constraints) }}
key: ${{ runner.os }}-pip-${{ hashFiles('ml-agents/setup.py', 'ml-agents-envs/setup.py', 'gym-unity/setup.py', 'test_requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-
${{ runner.os }}-

run: |
python -m pip install --upgrade pip
python -m pip install --upgrade setuptools
python -m pip install --progress-bar=off -e ./ml-agents-envs -c ${{ matrix.pip_constraints }}
python -m pip install --progress-bar=off -e ./ml-agents -c ${{ matrix.pip_constraints }}
python -m pip install --progress-bar=off -r test_requirements.txt -c ${{ matrix.pip_constraints }}
python -m pip install --progress-bar=off -e ./gym-unity -c ${{ matrix.pip_constraints }}
python -m pip install --progress-bar=off -e ./ml-agents-plugin-examples -c ${{ matrix.pip_constraints }}
python -m pip install --progress-bar=off -e ./ml-agents-envs
python -m pip install --progress-bar=off -e ./ml-agents
python -m pip install --progress-bar=off -r test_requirements.txt
python -m pip install --progress-bar=off -e ./gym-unity
python -m pip install --progress-bar=off -e ./ml-agents-plugin-examples
- name: Save python dependencies
run: |
pip freeze > pip_versions-${{ matrix.python-version }}.txt

8
.yamato/com.unity.ml-agents-pack.yml


image: package-ci/ubuntu:stable
flavor: b1.small
commands:
- |
python3 -m pip install unity-downloader-cli --index-url https://artifactory.prd.it.unity3d.com/artifactory/api/pypi/pypi/simple --upgrade
unity-downloader-cli -u 2018.4 -c editor --wait --fast
./.Editor/Unity -projectPath Project -batchMode -executeMethod Unity.MLAgents.SampleExporter.ExportCuratedSamples -logFile -
npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
upm-ci project pack --project-path Project
- npm install upm-ci-utils@stable -g --registry https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-npm
- upm-ci project pack --project-path Project
artifacts:
packages:
paths:

2
.yamato/python-ll-api-test.yml


python ml-agents/tests/yamato/scripts/run_llapi.py
python ml-agents/tests/yamato/scripts/run_llapi.py --env=artifacts/testPlayer-Basic
python ml-agents/tests/yamato/scripts/run_llapi.py --env=artifacts/testPlayer-WallJump
python ml-agents/tests/yamato/scripts/run_llapi.py --env=artifacts/testPlayer-Match3
python ml-agents/tests/yamato/scripts/run_llapi.py --env=artifacts/testPlayer-Bouncer
dependencies:
- .yamato/standalone-build-test.yml#test_linux_standalone_{{ editor.version }}
triggers:

2
.yamato/standalone-build-test.yml


- unity-downloader-cli -u {{ editor.version }} -c editor --wait --fast
- python3 -u -m ml-agents.tests.yamato.standalone_build_tests --build-target=linux
- python3 -u -m ml-agents.tests.yamato.standalone_build_tests --build-target=linux --scene=Assets/ML-Agents/Examples/Basic/Scenes/Basic.unity
- python3 -u -m ml-agents.tests.yamato.standalone_build_tests --build-target=linux --scene=Assets/ML-Agents/Examples/Match3/Scenes/Match3.unity
- python3 -u -m ml-agents.tests.yamato.standalone_build_tests --build-target=linux --scene=Assets/ML-Agents/Examples/Bouncer/Scenes/Bouncer.unity
- python3 -u -m ml-agents.tests.yamato.standalone_build_tests --build-target=linux --scene=Assets/ML-Agents/Examples/WallJump/Scenes/WallJump.unity
- python3 -u -m ml-agents.tests.yamato.standalone_build_tests --build-target=linux --scene=Assets/ML-Agents/TestScenes/TestCompressedGrid/TestGridCompressed.unity
- python3 -u -m ml-agents.tests.yamato.standalone_build_tests --build-target=linux --scene=Assets/ML-Agents/TestScenes/TestCompressedTexture/TestTextureCompressed.unity

6
DevProject/Packages/packages-lock.json


"url": "https://artifactory.prd.cds.internal.unity3d.com/artifactory/api/npm/upm-candidates"
},
"com.unity.barracuda": {
"version": "1.3.1-preview",
"version": "1.3.0-preview",
"depth": 1,
"source": "registry",
"dependencies": {

"depth": 0,
"source": "local",
"dependencies": {
"com.unity.barracuda": "1.3.1-preview",
"com.unity.barracuda": "1.3.0-preview",
"com.unity.modules.imageconversion": "1.0.0",
"com.unity.modules.jsonserialize": "1.0.0",
"com.unity.modules.physics": "1.0.0",

"depth": 0,
"source": "local",
"dependencies": {
"com.unity.ml-agents": "1.8.0-preview"
"com.unity.ml-agents": "1.7.2-preview"
}
},
"com.unity.multiplayer-hlapi": {

2
docs/Training-on-Microsoft-Azure.md


instance, and set it as the working directory.
2. Install the required packages:
Torch: `pip3 install torch==1.7.0 -f https://download.pytorch.org/whl/torch_stable.html` and
MLAgents: `python -m pip install mlagents==0.24.1`
MLAgents: `pip3 install mlagents`
## Testing

16
docs/Learning-Environment-Design-Agents.md


training process learns to control the speed of the Agent through this
parameter.
The [3DBall example](Learning-Environment-Examples.md#3dball-3d-balance-ball) uses
The [Reacher example](Learning-Environment-Examples.md#reacher) uses
![3DBall](images/balance.png)
![reacher](images/reacher.png)
These control values are applied as rotation to the cube:
These control values are applied as torques to the bodies making up the arm:
var actionZ = 2f * Mathf.Clamp(actionBuffers.ContinuousActions[0], -1f, 1f);
var actionX = 2f * Mathf.Clamp(actionBuffers.ContinuousActions[1], -1f, 1f);
var torqueX = Mathf.Clamp(actionBuffers.ContinuousActions[0], -1f, 1f) * 150f;
var torqueZ = Mathf.Clamp(actionBuffers.ContinuousActions[1], -1f, 1f) * 150f;
m_RbA.AddTorque(new Vector3(torqueX, 0f, torqueZ));
gameObject.transform.Rotate(new Vector3(0, 0, 1), actionZ);
gameObject.transform.Rotate(new Vector3(1, 0, 0), actionX);
torqueX = Mathf.Clamp(actionBuffers.ContinuousActions[2], -1f, 1f) * 150f;
torqueZ = Mathf.Clamp(actionBuffers.ContinuousActions[3], -1f, 1f) * 150f;
m_RbB.AddTorque(new Vector3(torqueX, 0f, torqueZ));
}
```

2
docs/Training-Configuration-File.md


| `network_settings -> hidden_units` | (default = `128`) Number of units in the hidden layers of the neural network. Correspond to how many units are in each fully connected layer of the neural network. For simple problems where the correct action is a straightforward combination of the observation inputs, this should be small. For problems where the action is a very complex interaction between the observation variables, this should be larger. <br><br> Typical range: `32` - `512` |
| `network_settings -> num_layers` | (default = `2`) The number of hidden layers in the neural network. Corresponds to how many hidden layers are present after the observation input, or after the CNN encoding of the visual observation. For simple problems, fewer layers are likely to train faster and more efficiently. More layers may be necessary for more complex control problems. <br><br> Typical range: `1` - `3` |
| `network_settings -> normalize` | (default = `false`) Whether normalization is applied to the vector observation inputs. This normalization is based on the running average and variance of the vector observation. Normalization can be helpful in cases with complex continuous control problems, but may be harmful with simpler discrete control problems. |
| `network_settings -> vis_encode_type` | (default = `simple`) Encoder type for encoding visual observations. <br><br> `simple` (default) uses a simple encoder which consists of two convolutional layers, `nature_cnn` uses the CNN implementation proposed by [Mnih et al.](https://www.nature.com/articles/nature14236), consisting of three convolutional layers, and `resnet` uses the [IMPALA Resnet](https://arxiv.org/abs/1802.01561) consisting of three stacked layers, each with two residual blocks, making a much larger network than the other two. `match3` is a smaller CNN ([Gudmundsoon et al.](https://www.researchgate.net/publication/328307928_Human-Like_Playtesting_with_Deep_Learning)) that is optimized for board games, and can be used down to visual observation sizes of 5x5. |
| `network_settings -> vis_encoder_type` | (default = `simple`) Encoder type for encoding visual observations. <br><br> `simple` (default) uses a simple encoder which consists of two convolutional layers, `nature_cnn` uses the CNN implementation proposed by [Mnih et al.](https://www.nature.com/articles/nature14236), consisting of three convolutional layers, and `resnet` uses the [IMPALA Resnet](https://arxiv.org/abs/1802.01561) consisting of three stacked layers, each with two residual blocks, making a much larger network than the other two. `match3` is a smaller CNN ([Gudmundsoon et al.](https://www.researchgate.net/publication/328307928_Human-Like_Playtesting_with_Deep_Learning)) that is optimized for board games, and can be used down to visual observation sizes of 5x5. |
## Trainer-specific Configurations

2
docs/Training-ML-Agents.md


# Configuration of the neural network (common to PPO/SAC)
network_settings:
vis_encode_type: simple
vis_encoder_type: simple
normalize: false
hidden_units: 128
num_layers: 2

8
docs/Installation-Anaconda-Windows.md


the ml-agents Conda environment by typing `activate ml-agents`)_:
```sh
git clone --branch release_14 https://github.com/Unity-Technologies/ml-agents.git
git clone --branch release_13 https://github.com/Unity-Technologies/ml-agents.git
The `--branch release_14` option will switch to the tag of the latest stable
The `--branch release_13` option will switch to the tag of the latest stable
release. Omitting that will get the `main` branch which is potentially
unstable.

connected to the Internet and then type in the Anaconda Prompt:
```console
python -m pip install mlagents==0.24.1
pip install mlagents
```
This will complete the installation of all the required Python packages to run

this, you can try:
```console
python -m pip install mlagents==0.24.1 --no-cache-dir
pip install mlagents --no-cache-dir
```
This `--no-cache-dir` tells the pip to disable the cache.

11
docs/Installation.md


of our tutorials / guides assume you have access to our example environments).
```sh
git clone --branch release_14 https://github.com/Unity-Technologies/ml-agents.git
git clone --branch release_13 https://github.com/Unity-Technologies/ml-agents.git
The `--branch release_14` option will switch to the tag of the latest stable
release. Omitting that will get the `main` branch which is potentially unstable.
The `--branch release_13` option will switch to the tag of the latest stable
release. Omitting that will get the `main` branch which is potentially
unstable.
back, make sure to clone the `main` branch (by omitting `--branch release_14`
back, make sure to clone the `main` branch (by omitting `--branch release_13`
from the command above). See our
[Contributions Guidelines](../com.unity.ml-agents/CONTRIBUTING.md) for more
information on contributing to the ML-Agents Toolkit.

run from the command line:
```sh
python -m pip install mlagents==0.24.1
pip3 install mlagents
```
Note that this will install `mlagents` from PyPi, _not_ from the cloned

136
docs/Learning-Environment-Examples.md


number of goals.
- Benchmark Mean Reward: 0.8
## Tennis
![Tennis](images/tennis.png)
- Set-up: Two-player game where agents control rackets to hit a ball over the
net.
- Goal: The agents must hit the ball so that the opponent cannot hit a valid
return.
- Agents: The environment contains two agent with same Behavior Parameters.
After training you can set the `Behavior Type` to `Heuristic Only` on one of
the Agent's Behavior Parameters to play against your trained model.
- Agent Reward Function (independent):
- +1.0 To the agent that wins the point. An agent wins a point by preventing
the opponent from hitting a valid return.
- -1.0 To the agent who loses the point.
- Behavior Parameters:
- Vector Observation space: 9 variables corresponding to position, velocity
and orientation of ball and racket.
- Actions: 3 continuous actions, corresponding to movement
toward net or away from net, jumping and rotation.
- Visual Observations: None
- Float Properties: Three
- gravity: Magnitude of gravity
- Default: 9.81
- Recommended Minimum: 6
- Recommended Maximum: 20
- scale: Specifies the scale of the ball in the 3 dimensions (equal across the
three dimensions)
- Default: .5
- Recommended Minimum: 0.2
- Recommended Maximum: 5
## Push Block
![Push](images/push.png)

block).
- Actions: 1 discrete action branch with 7 actions, corresponding to turn clockwise
and counterclockwise, move along four different face directions, or do nothing.
- Visual Observations (Optional): One first-person camera. Use
`VisualPushBlock` scene. **The visual observation version of this
environment does not train with the provided default training parameters.**
- Float Properties: Four
- block_scale: Scale of the block along the x and z dimensions
- Default: 2

- Float Properties: Four
- Benchmark Mean Reward (Big & Small Wall): 0.8
## Reacher
![Reacher](images/reacher.png)
- Set-up: Double-jointed arm which can move to target locations.
- Goal: The agents must move its hand to the goal location, and keep it there.
- Agents: The environment contains 10 agent with same Behavior Parameters.
- Agent Reward Function (independent):
- +0.1 Each step agent's hand is in goal location.
- Behavior Parameters:
- Vector Observation space: 26 variables corresponding to position, rotation,
velocity, and angular velocities of the two arm rigid bodies.
- Actions: 4 continuous actions, corresponding to torque
applicable to two joints.
- Visual Observations: None.
- Float Properties: Five
- goal_size: radius of the goal zone
- Default: 5
- Recommended Minimum: 1
- Recommended Maximum: 10
- goal_speed: speed of the goal zone around the arm (in radians)
- Default: 1
- Recommended Minimum: 0.2
- Recommended Maximum: 4
- gravity
- Default: 9.81
- Recommended Minimum: 4
- Recommended Maximum: 20
- deviation: Magnitude of sinusoidal (cosine) deviation of the goal along the
vertical dimension
- Default: 0
- Recommended Minimum: 0
- Recommended Maximum: 5
- deviation_freq: Frequency of the cosine deviation of the goal along the
vertical dimension
- Default: 0
- Recommended Minimum: 0
- Recommended Maximum: 3
- Benchmark Mean Reward: 30
## Crawler
![Crawler](images/crawler.png)

- `CrawlerDynamicTarget`- Goal direction is randomized.
- `CrawlerDynamicVariableSpeed`- Goal direction and walking speed are randomized.
- `CrawlerStaticTarget` - Goal direction is always forward.
- `CrawlerStaticVariableSpeed`- Goal direction is always forward. Walking speed is randomized
- Agents: The environment contains 10 agents with same Behavior Parameters.
- Agent Reward Function (independent):
The reward function is now geometric meaning the reward each step is a product

rotations for joints.
- Visual Observations: None
- Float Properties: None
- Benchmark Mean Reward: 3000
- Benchmark Mean Reward for `CrawlerDynamicTarget`: 2000
- Benchmark Mean Reward for `CrawlerDynamicVariableSpeed`: 3000
- Benchmark Mean Reward for `CrawlerStaticTarget`: 4000
- Benchmark Mean Reward for `CrawlerStaticVariableSpeed`: 4000
## Worm

- Goal: The agents must move its body toward the goal direction.
- `WormStaticTarget` - Goal direction is always forward.
- `WormDynamicTarget`- Goal direction is randomized.
- Agents: The environment contains 10 agents with same Behavior Parameters.
- Agent Reward Function (independent):
The reward function is now geometric meaning the reward each step is a product

rotations for joints.
- Visual Observations: None
- Float Properties: None
- Benchmark Mean Reward: 800
- Benchmark Mean Reward for `WormStaticTarget`: 1200
- Benchmark Mean Reward for `WormDynamicTarget`: 800
## Food Collector

- -1 for interaction with red spheres
- Behavior Parameters:
- Vector Observation space: 53 corresponding to velocity of agent (2), whether
agent is frozen and/or shot its laser (2), plus grid based perception of
objects around agent's forward direction (40 by 40 with 6 different categories).
agent is frozen and/or shot its laser (2), plus ray-based perception of
objects around agent's forward direction (49; 7 raycast angles with 7
measurements for each).
- Actions:
- 3 continuous actions correspond to Forward Motion, Side Motion and Rotation
- 1 discrete acion branch for Laser with 2 possible actions corresponding to

objects, goals, and walls.
- Actions: 1 discrete action Branch, with 4 actions corresponding to agent
rotation and forward/backward movement.
- Visual Observations (Optional): First-person view for the agent. Use
`VisualHallway` scene. **The visual observation version of this environment
does not train with the provided default training parameters.**
## Bouncer
![Bouncer](images/bouncer.png)
- Set-up: Environment where the agent needs on-demand decision making. The agent
must decide how perform its next bounce only when it touches the ground.
- Goal: Catch the floating green cube. Only has a limited number of jumps.
- Agents: The environment contains one agent.
- Agent Reward Function (independent):
- +1 For catching the green cube.
- -1 For bouncing out of bounds.
- -0.05 Times the action squared. Energy expenditure penalty.
- Behavior Parameters:
- Vector Observation space: 6 corresponding to local position of agent and
green cube.
- Actions: 3 continuous actions corresponding to agent force applied for
the jump.
- Visual Observations: None
- Float Properties: Two
- target_scale: The scale of the green cube in the 3 dimensions
- Default: 150
- Recommended Minimum: 50
- Recommended Maximum: 250
- Benchmark Mean Reward: 10
## Soccer Twos
![SoccerTwos](images/soccer.png)

correspond to articulation of the following body-parts: hips, chest, spine,
head, thighs, shins, feet, arms, forearms and hands.
- Goal: The agents must move its body toward the goal direction without falling.
- `WalkerDynamic`- Goal direction is randomized.
- `WalkerDynamicVariableSpeed`- Goal direction and walking speed are randomized.
- `WalkerStatic` - Goal direction is always forward.
- `WalkerStaticVariableSpeed` - Goal direction is always forward. Walking
speed is randomized
- Agents: The environment contains 10 independent agents with same Behavior
Parameters.
- Agent Reward Function (independent):

- Default: 8
- Recommended Minimum: 3
- Recommended Maximum: 20
- Benchmark Mean Reward : 2500
- Benchmark Mean Reward for `WalkerDynamic`: 2500
- Benchmark Mean Reward for `WalkerDynamicVariableSpeed`: 2500
- Benchmark Mean Reward for `WalkerStatic`: 3500
- Benchmark Mean Reward for `WalkerStaticVariableSpeed`: 3500
## Pyramids

state.
- Actions: 1 discrete action branch, with 4 actions corresponding to agent rotation and
forward/backward movement.
- Visual Observations (Optional): First-person camera per-agent. Us
`VisualPyramids` scene. **The visual observation version of this environment
does not train with the provided default training parameters.**
- Float Properties: None
- Benchmark Mean Reward: 1.75

2
docs/Training-on-Amazon-Web-Service.md


2. Clone the ML-Agents repo and install the required Python packages
```sh
git clone --branch release_14 https://github.com/Unity-Technologies/ml-agents.git
git clone --branch release_13 https://github.com/Unity-Technologies/ml-agents.git
cd ml-agents/ml-agents/
pip3 install -e .
```

4
docs/Unity-Inference-Engine.md


loading expects certain conventions for constants and tensor names. While it is
possible to construct a model that follows these conventions, we don't provide
any additional help for this. More details can be found in
[TensorNames.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_14_docs/com.unity.ml-agents/Runtime/Inference/TensorNames.cs)
[TensorNames.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_13_docs/com.unity.ml-agents/Runtime/Inference/TensorNames.cs)
[BarracudaModelParamLoader.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_14_docs/com.unity.ml-agents/Runtime/Inference/BarracudaModelParamLoader.cs).
[BarracudaModelParamLoader.cs](https://github.com/Unity-Technologies/ml-agents/blob/release_13_docs/com.unity.ml-agents/Runtime/Inference/BarracudaModelParamLoader.cs).
If you wish to run inference on an externally trained model, you should use
Barracuda directly, instead of trying to run it through ML-Agents.

999
docs/images/example-envs.png
文件差异内容过多而无法显示
查看文件

6
config/imitation/CrawlerStatic.yaml


behaviors:
Crawler:
CrawlerStatic:
trainer_type: ppo
hyperparameters:
batch_size: 2024

learning_rate: 0.0003
use_actions: false
use_vail: false
demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawler.demo
demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawlerSta.demo
keep_checkpoints: 5
max_steps: 10000000
time_horizon: 1000

demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawler.demo
demo_path: Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawlerSta.demo
steps: 50000
strength: 0.5
samples_per_update: 0

2
config/sac/CrawlerDynamic.yaml


behaviors:
Crawler:
CrawlerDynamic:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003

10
config/sac/FoodCollector.yaml


learning_rate: 0.0003
learning_rate_schedule: constant
batch_size: 256
buffer_size: 2048
buffer_size: 500000
buffer_init_steps: 0
tau: 0.005
steps_per_update: 10.0

network_settings:
normalize: false
hidden_units: 256
num_layers: 1
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:

max_steps: 2000000
time_horizon: 64
summary_freq: 60000
threaded: true
summary_freq: 10000
threaded: true

2
config/sac/WalkerDynamic.yaml


behaviors:
Walker:
WalkerDynamic:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003

2
config/sac/WormDynamic.yaml


behaviors:
Worm:
WormDynamic:
trainer_type: sac
hyperparameters:
learning_rate: 0.0003

2
config/ppo/CrawlerDynamic.yaml


behaviors:
Crawler:
CrawlerDynamic:
trainer_type: ppo
hyperparameters:
batch_size: 2048

4
config/ppo/FoodCollector.yaml


learning_rate_schedule: linear
network_settings:
normalize: false
hidden_units: 256
num_layers: 1
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:

2
config/ppo/WormDynamic.yaml


behaviors:
Worm:
WormDynamic:
trainer_type: ppo
hyperparameters:
batch_size: 2024

4
config/ppo/CrawlerDynamicVariableSpeed.yaml


behaviors:
Walker:
CrawlerDynamicVariableSpeed:
trainer_type: ppo
hyperparameters:
batch_size: 2048

gamma: 0.995
strength: 1.0
keep_checkpoints: 5
max_steps: 30000000
max_steps: 10000000
time_horizon: 1000
summary_freq: 30000
threaded: true

0
config/ppo/PushBlock.yaml

2
ml-agents/README.md


Install the `mlagents` package with:
```sh
python -m pip install mlagents==0.24.1
pip3 install mlagents
```
## Usage & More Information

4
ml-agents/tests/yamato/scripts/run_llapi.py


# Examine the number of observations per Agent
print("Number of observations : ", len(group_spec.observation_specs))
for obs_spec in group_spec.observation_specs:
# Make sure the name was set in the ObservationSpec
assert bool(obs_spec.name) is True, f'obs_spec.name="{obs_spec.name}"'
# Is there a visual observation ?
vis_obs = any(
len(obs_spec.shape) == 3 for obs_spec in group_spec.observation_specs

17
ml-agents/mlagents/trainers/buffer.py


else:
return return_data
@property
def contains_lists(self) -> bool:
"""
Checks whether this AgentBufferField contains List[np.ndarray].
"""
return len(self) > 0 and isinstance(self[0], list)
def append(self, element: BufferEntry, padding_value: float = 0.0) -> None:
def append(self, element: np.ndarray, padding_value: float = 0.0) -> None:
"""
Adds an element to this list. Also lets you change the padding
type, so that it can be set on append (e.g. action_masks should

" too large given the current number of data points."
)
if batch_size * training_length > len(self):
if self.contains_lists:
padding = []
else:
# We want to duplicate the last value in the array, multiplied by the padding_value.
padding = np.array(self[-1], dtype=np.float32) * self.padding_value
padding = np.array(self[-1], dtype=np.float32) * self.padding_value
return [padding] * (training_length - leftover) + self[:]
else:

dimension is equal to the length of the AgentBufferField.
"""
if len(self) > 0 and not isinstance(self[0], list):
return np.asanyarray(self, dtype=dtype)
return np.asanyarray(self, dytpe=dtype)
shape = None
for _entry in self:

25
ml-agents/mlagents/trainers/optimizer/torch_optimizer.py


# If we're using LSTM, we want to get all the intermediate memories.
all_next_memories: Optional[AgentBufferField] = None
if self.policy.use_recurrent:
(
value_estimates,
all_next_memories,
next_memory,
) = self._evaluate_by_sequence(current_obs, memory)
else:
value_estimates, next_memory = self.critic.critic_pass(
current_obs, memory, sequence_length=batch.num_experiences
)
# To prevent memory leak and improve performance, evaluate with no_grad.
with torch.no_grad():
if self.policy.use_recurrent:
(
value_estimates,
all_next_memories,
next_memory,
) = self._evaluate_by_sequence(current_obs, memory)
else:
value_estimates, next_memory = self.critic.critic_pass(
current_obs, memory, sequence_length=batch.num_experiences
)
# Store the memory for the next trajectory. This should NOT have a gradient.
# Store the memory for the next trajectory
self.critic_memory_dict[agent_id] = next_memory
next_value_estimate, _ = self.critic.critic_pass(

5
ml-agents/mlagents/trainers/ppo/optimizer_torch.py


return update_stats
def get_modules(self):
modules = {
"Optimizer:value_optimizer": self.optimizer,
"Optimizer:critic": self._critic,
}
modules = {"Optimizer": self.optimizer}
for reward_provider in self.reward_signals.values():
modules.update(reward_provider.get_modules())
return modules

10
ml-agents/mlagents/trainers/ppo/trainer.py


self.policy.update_normalization(agent_buffer_trajectory)
# Get all value estimates
(
value_estimates,
value_next,
value_memories,
) = self.optimizer.get_trajectory_value_estimates(
value_estimates, value_next, value_memories = self.optimizer.get_trajectory_value_estimates(
agent_buffer_trajectory,
trajectory.next_obs,
trajectory.done_reached and not trajectory.interrupted,

int(self.hyperparameters.batch_size / self.policy.sequence_length), 1
)
advantages = np.array(
self.update_buffer[BufferKey.ADVANTAGES].get_batch(), dtype=np.float32
)
advantages = np.array(self.update_buffer[BufferKey.ADVANTAGES].get_batch())
self.update_buffer[BufferKey.ADVANTAGES].set(
(advantages - advantages.mean()) / (advantages.std() + 1e-10)
)

3
ml-agents/mlagents/trainers/sac/optimizer_torch.py


def get_modules(self):
modules = {
"Optimizer:q_network": self.q_network,
"Optimizer:value_network": self._critic,
"Optimizer:value_network": self.q_network,
"Optimizer:target_network": self.target_network,
"Optimizer:policy_optimizer": self.policy_optimizer,
"Optimizer:value_optimizer": self.value_optimizer,

9
ml-agents/mlagents/trainers/tests/dummy_config.py


shapes: List[Tuple[int, ...]]
) -> List[ObservationSpec]:
obs_specs: List[ObservationSpec] = []
for i, shape in enumerate(shapes):
for shape in shapes:
spec = ObservationSpec(
name=f"observation {i} with shape {shape}",
shape=shape,
dimension_property=dim_prop,
observation_type=ObservationType.DEFAULT,
)
spec = ObservationSpec(shape, dim_prop, ObservationType.DEFAULT)
obs_specs.append(spec)
return obs_specs

16
ml-agents/mlagents/trainers/tests/simple_test_envs.py


self.names = brain_names
self.positions: Dict[str, List[float]] = {}
self.step_count: Dict[str, float] = {}
# Concatenate the arguments for a consistent random seed
seed = (
brain_names,
step_size,
num_visual,
num_vector,
num_var_len,
vis_obs_size,
vec_obs_size,
var_len_obs_size,
action_sizes,
)
self.random = random.Random(str(seed))
self.random = random.Random(str(self.behavior_spec))
self.goal: Dict[str, int] = {}
self.action = {}
self.rewards: Dict[str, float] = {}

36
ml-agents/mlagents/trainers/tests/test_buffer.py


]
),
)
# Test padding
a = agent_2_buffer[ObsUtil.get_name_at(0)].get_batch(
batch_size=None, training_length=4, sequential=True
)
assert_array(
np.array(a),
np.array(
[
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[201, 202, 203],
[211, 212, 213],
[221, 222, 223],
[231, 232, 233],
[241, 242, 243],
[251, 252, 253],
[261, 262, 263],
[271, 272, 273],
[281, 282, 283],
]
),
)
# Test group entries return Lists of Lists. Make sure to pad properly!
# Test group entries return Lists of Lists
batch_size=None, training_length=4, sequential=True
batch_size=2, training_length=1, sequential=True
for _group_entry in a[:3]:
assert len(_group_entry) == 0
for _group_entry in a[3:]:
for _group_entry in a:
assert len(_group_entry) == 3
agent_1_buffer.reset_agent()

padded = c.padded_to_batch(pad_value=3)
assert np.array_equal(padded[0], np.array([1, 1, 1, 1]))
assert np.array_equal(padded[1], np.array([2, 2, 3, 3]))
# Make sure it doesn't fail when the field isn't a list
padded_a = a.padded_to_batch()
assert np.array_equal(padded_a, a)
def fakerandint(values):

31
ml-agents/mlagents/trainers/tests/torch/test_hybrid.py


@pytest.mark.check_environment_trains
@pytest.mark.parametrize("num_visual,training_seed", [(1, 1336), (2, 1338)])
def test_hybrid_visual_ppo(num_visual, training_seed):
@pytest.mark.parametrize("num_visual", [1, 2])
def test_hybrid_visual_ppo(num_visual):
PPO_TORCH_CONFIG.hyperparameters,
batch_size=64,
buffer_size=1024,
learning_rate=1e-4,
PPO_TORCH_CONFIG.hyperparameters, learning_rate=3.0e-4
config = attr.evolve(
PPO_TORCH_CONFIG, hyperparameters=new_hyperparams, max_steps=8000
)
check_environment_trains(env, {BRAIN_NAME: config}, training_seed=training_seed)
config = attr.evolve(PPO_TORCH_CONFIG, hyperparameters=new_hyperparams)
check_environment_trains(env, {BRAIN_NAME: config}, training_seed=1336)
@pytest.mark.check_environment_trains

buffer_init_steps=0,
)
config = attr.evolve(
SAC_TORCH_CONFIG, hyperparameters=new_hyperparams, max_steps=6000
SAC_TORCH_CONFIG, hyperparameters=new_hyperparams, max_steps=2200
check_environment_trains(env, {BRAIN_NAME: config}, success_threshold=0.9)
check_environment_trains(
env, {BRAIN_NAME: config}, success_threshold=0.9, training_seed=1336
)
@pytest.mark.parametrize("num_visual,training_seed", [(1, 1337), (2, 1338)])
def test_hybrid_visual_sac(num_visual, training_seed):
@pytest.mark.parametrize("num_visual", [1, 2])
def test_hybrid_visual_sac(num_visual):
env = SimpleEnvironment(
[BRAIN_NAME], num_visual=num_visual, num_vector=0, action_sizes=(1, 1)
)

config = attr.evolve(
SAC_TORCH_CONFIG, hyperparameters=new_hyperparams, max_steps=3000
)
check_environment_trains(env, {BRAIN_NAME: config}, training_seed=training_seed)
check_environment_trains(env, {BRAIN_NAME: config})
@pytest.mark.check_environment_trains

new_hyperparams = attr.evolve(
SAC_TORCH_CONFIG.hyperparameters,
batch_size=256,
learning_rate=3e-4,
learning_rate=1e-3,
buffer_init_steps=1000,
steps_per_update=2,
)

network_settings=new_networksettings,
max_steps=4000,
max_steps=3500,
)
check_environment_trains(env, {BRAIN_NAME: config}, training_seed=1212)

61
ml-agents/mlagents/trainers/tests/torch/saver/test_saver.py


from mlagents.trainers.optimizer.torch_optimizer import TorchOptimizer
import pytest
from unittest import mock
import os

from mlagents.trainers.policy.torch_policy import TorchPolicy
from mlagents.trainers.ppo.optimizer_torch import TorchPPOOptimizer
from mlagents.trainers.sac.optimizer_torch import TorchSACOptimizer
from mlagents.trainers.settings import TrainerSettings, PPOSettings, SACSettings
from mlagents.trainers.settings import TrainerSettings
from mlagents.trainers.tests import mock_brain as mb
from mlagents.trainers.tests.torch.test_policy import create_policy_mock
from mlagents.trainers.torch.utils import ModelUtils

assert model_saver.policy is not None
def test_load_save_policy(tmp_path):
def test_load_save(tmp_path):
path1 = os.path.join(tmp_path, "runid1")
path2 = os.path.join(tmp_path, "runid2")
trainer_params = TrainerSettings()

assert policy3.get_current_step() == 0
@pytest.mark.parametrize(
"optimizer",
[(TorchPPOOptimizer, PPOSettings), (TorchSACOptimizer, SACSettings)],
ids=["ppo", "sac"],
)
def test_load_save_optimizer(tmp_path, optimizer):
OptimizerClass, HyperparametersClass = optimizer
trainer_settings = TrainerSettings()
trainer_settings.hyperparameters = HyperparametersClass()
policy = create_policy_mock(trainer_settings, use_discrete=False)
optimizer = OptimizerClass(policy, trainer_settings)
# save at path 1
path1 = os.path.join(tmp_path, "runid1")
model_saver = TorchModelSaver(trainer_settings, path1)
model_saver.register(policy)
model_saver.register(optimizer)
model_saver.initialize_or_load()
policy.set_step(2000)
model_saver.save_checkpoint("MockBrain", 2000)
# create a new optimizer and policy
policy2 = create_policy_mock(trainer_settings, use_discrete=False)
optimizer2 = OptimizerClass(policy2, trainer_settings)
# load weights
model_saver2 = TorchModelSaver(trainer_settings, path1, load=True)
model_saver2.register(policy2)
model_saver2.register(optimizer2)
model_saver2.initialize_or_load() # This is to load the optimizers
# Compare the two optimizers
_compare_two_optimizers(optimizer, optimizer2)
# TorchPolicy.evalute() returns log_probs instead of all_log_probs like tf does.
# resulting in indeterministic results for testing.
# So here use sample_actions instead.

ModelUtils.to_numpy(log_probs1.all_discrete_tensor),
ModelUtils.to_numpy(log_probs2.all_discrete_tensor),
)
def _compare_two_optimizers(opt1: TorchOptimizer, opt2: TorchOptimizer) -> None:
trajectory = mb.make_fake_trajectory(
length=10,
observation_specs=opt1.policy.behavior_spec.observation_specs,
action_spec=opt1.policy.behavior_spec.action_spec,
max_step_complete=True,
)
with torch.no_grad():
_, opt1_val_out, _ = opt1.get_trajectory_value_estimates(
trajectory.to_agentbuffer(), trajectory.next_obs, done=False
)
_, opt2_val_out, _ = opt2.get_trajectory_value_estimates(
trajectory.to_agentbuffer(), trajectory.next_obs, done=False
)
for opt1_val, opt2_val in zip(opt1_val_out.values(), opt2_val_out.values()):
np.testing.assert_array_equal(opt1_val, opt2_val)
@pytest.mark.parametrize("discrete", [True, False], ids=["discrete", "continuous"])

5
ml-agents/mlagents/trainers/tests/torch/test_ppo.py


run_out, final_value_out, all_memories = optimizer.get_trajectory_value_estimates(
trajectory.to_agentbuffer(), trajectory.next_obs, done=False
)
if rnn:
# Check that memories don't have a Torch gradient
for mem in optimizer.critic_memory_dict.values():
assert not mem.requires_grad
for key, val in run_out.items():
assert type(key) is str
assert len(val) == 15

6
ml-agents/mlagents/trainers/tests/torch/test_simple_rl.py


)
bc_settings = BehavioralCloningSettings(demo_path=demo_path, steps=1500)
reward_signals = {
RewardSignalType.GAIL: GAILSettings(
gamma=0.8, encoding_size=32, demo_path=demo_path
)
RewardSignalType.GAIL: GAILSettings(encoding_size=32, demo_path=demo_path)
hyperparams = attr.evolve(PPO_TORCH_CONFIG.hyperparameters, learning_rate=1e-3)
hyperparams = attr.evolve(PPO_TORCH_CONFIG.hyperparameters, learning_rate=5e-3)
config = attr.evolve(
PPO_TORCH_CONFIG,
reward_signals=reward_signals,

5
ml-agents/setup.py


"pyyaml>=3.1.0",
# Windows ver. of PyTorch doesn't work from PyPi. Installation:
# https://github.com/Unity-Technologies/ml-agents/blob/main/docs/Installation.md#windows-installing-pytorch
# Torch only working on python 3.9 for 1.8.0 and above. Details see:
# https://github.com/pytorch/pytorch/issues/50014
"torch>=1.8.0,<1.9.0;(platform_system!='Windows' and python_version>='3.9')",
"torch>=1.6.0,<1.9.0;(platform_system!='Windows' and python_version<'3.9')",
'torch>=1.6.0,<1.8.0;platform_system!="Windows"',
"tensorboard>=1.15",
# cattrs 1.1.0 dropped support for python 3.6, but 1.0.0 doesn't work for python 3.9
# Since there's no version that supports both, we have to draw the line somwehere.

2
ml-agents-envs/README.md


Install the `mlagents_envs` package with:
```sh
python -m pip install mlagents_envs==0.24.1
pip3 install mlagents_envs
```
## Usage & More Information

23
ml-agents-envs/mlagents_envs/communicator_objects/observation_pb2.py


name='mlagents_envs/communicator_objects/observation.proto',
package='communicator_objects',
syntax='proto3',
serialized_pb=_b('\n4mlagents_envs/communicator_objects/observation.proto\x12\x14\x63ommunicator_objects\"\x8f\x03\n\x10ObservationProto\x12\r\n\x05shape\x18\x01 \x03(\x05\x12\x44\n\x10\x63ompression_type\x18\x02 \x01(\x0e\x32*.communicator_objects.CompressionTypeProto\x12\x19\n\x0f\x63ompressed_data\x18\x03 \x01(\x0cH\x00\x12\x46\n\nfloat_data\x18\x04 \x01(\x0b\x32\x30.communicator_objects.ObservationProto.FloatDataH\x00\x12\"\n\x1a\x63ompressed_channel_mapping\x18\x05 \x03(\x05\x12\x1c\n\x14\x64imension_properties\x18\x06 \x03(\x05\x12\x44\n\x10observation_type\x18\x07 \x01(\x0e\x32*.communicator_objects.ObservationTypeProto\x12\x0c\n\x04name\x18\x08 \x01(\t\x1a\x19\n\tFloatData\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x02\x42\x12\n\x10observation_data*)\n\x14\x43ompressionTypeProto\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03PNG\x10\x01*F\n\x14ObservationTypeProto\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x08\n\x04GOAL\x10\x01\x12\n\n\x06REWARD\x10\x02\x12\x0b\n\x07MESSAGE\x10\x03\x42%\xaa\x02\"Unity.MLAgents.CommunicatorObjectsb\x06proto3')
serialized_pb=_b('\n4mlagents_envs/communicator_objects/observation.proto\x12\x14\x63ommunicator_objects\"\x81\x03\n\x10ObservationProto\x12\r\n\x05shape\x18\x01 \x03(\x05\x12\x44\n\x10\x63ompression_type\x18\x02 \x01(\x0e\x32*.communicator_objects.CompressionTypeProto\x12\x19\n\x0f\x63ompressed_data\x18\x03 \x01(\x0cH\x00\x12\x46\n\nfloat_data\x18\x04 \x01(\x0b\x32\x30.communicator_objects.ObservationProto.FloatDataH\x00\x12\"\n\x1a\x63ompressed_channel_mapping\x18\x05 \x03(\x05\x12\x1c\n\x14\x64imension_properties\x18\x06 \x03(\x05\x12\x44\n\x10observation_type\x18\x07 \x01(\x0e\x32*.communicator_objects.ObservationTypeProto\x1a\x19\n\tFloatData\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x02\x42\x12\n\x10observation_data*)\n\x14\x43ompressionTypeProto\x12\x08\n\x04NONE\x10\x00\x12\x07\n\x03PNG\x10\x01*F\n\x14ObservationTypeProto\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x08\n\x04GOAL\x10\x01\x12\n\n\x06REWARD\x10\x02\x12\x0b\n\x07MESSAGE\x10\x03\x42%\xaa\x02\"Unity.MLAgents.CommunicatorObjectsb\x06proto3')
)
_COMPRESSIONTYPEPROTO = _descriptor.EnumDescriptor(

],
containing_type=None,
options=None,
serialized_start=480,
serialized_end=521,
serialized_start=466,
serialized_end=507,
)
_sym_db.RegisterEnumDescriptor(_COMPRESSIONTYPEPROTO)

],
containing_type=None,
options=None,
serialized_start=523,
serialized_end=593,
serialized_start=509,
serialized_end=579,
)
_sym_db.RegisterEnumDescriptor(_OBSERVATIONTYPEPROTO)

extension_ranges=[],
oneofs=[
],
serialized_start=433,
serialized_end=458,
serialized_start=419,
serialized_end=444,
)
_OBSERVATIONPROTO = _descriptor.Descriptor(

message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='communicator_objects.ObservationProto.name', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],

index=0, containing_type=None, fields=[]),
],
serialized_start=79,
serialized_end=478,
serialized_end=464,
)
_OBSERVATIONPROTO_FLOATDATA.containing_type = _OBSERVATIONPROTO

7
ml-agents-envs/mlagents_envs/communicator_objects/observation_pb2.pyi


Iterable as typing___Iterable,
List as typing___List,
Optional as typing___Optional,
Text as typing___Text,
Tuple as typing___Tuple,
cast as typing___cast,
)

compressed_channel_mapping = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[builtin___int]
dimension_properties = ... # type: google___protobuf___internal___containers___RepeatedScalarFieldContainer[builtin___int]
observation_type = ... # type: ObservationTypeProto
name = ... # type: typing___Text
@property
def float_data(self) -> ObservationProto.FloatData: ...

compressed_channel_mapping : typing___Optional[typing___Iterable[builtin___int]] = None,
dimension_properties : typing___Optional[typing___Iterable[builtin___int]] = None,
observation_type : typing___Optional[ObservationTypeProto] = None,
name : typing___Optional[typing___Text] = None,
) -> None: ...
@classmethod
def FromString(cls, s: builtin___bytes) -> ObservationProto: ...

def HasField(self, field_name: typing_extensions___Literal[u"compressed_data",u"float_data",u"observation_data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"compressed_channel_mapping",u"compressed_data",u"compression_type",u"dimension_properties",u"float_data",u"name",u"observation_data",u"observation_type",u"shape"]) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"compressed_channel_mapping",u"compressed_data",u"compression_type",u"dimension_properties",u"float_data",u"observation_data",u"observation_type",u"shape"]) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"compressed_channel_mapping",b"compressed_channel_mapping",u"compressed_data",b"compressed_data",u"compression_type",b"compression_type",u"dimension_properties",b"dimension_properties",u"float_data",b"float_data",u"name",b"name",u"observation_data",b"observation_data",u"observation_type",b"observation_type",u"shape",b"shape"]) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"compressed_channel_mapping",b"compressed_channel_mapping",u"compressed_data",b"compressed_data",u"compression_type",b"compression_type",u"dimension_properties",b"dimension_properties",u"float_data",b"float_data",u"observation_data",b"observation_data",u"observation_type",b"observation_type",u"shape",b"shape"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions___Literal[u"observation_data",b"observation_data"]) -> typing_extensions___Literal["compressed_data","float_data"]: ...

4
ml-agents-envs/mlagents_envs/base_env.py


dimension_property: Tuple[DimensionProperty, ...]
observation_type: ObservationType
# Optional name. For observations coming from com.unity.ml-agents, this
# will be the ISensor name.
name: str
class BehaviorSpec(NamedTuple):
"""

85
ml-agents-envs/mlagents_envs/rpc_utils.py


for obs in agent_info.observations:
observation_specs.append(
ObservationSpec(
name=obs.name,
shape=tuple(obs.shape),
observation_type=ObservationType(obs.observation_type),
dimension_property=tuple(
DimensionProperty(dim) for dim in obs.dimension_properties
)
tuple(obs.shape),
tuple(DimensionProperty(dim) for dim in obs.dimension_properties)
ObservationType(obs.observation_type),
)
)

return img
def _check_observations_match_spec(
obs_index: int,
observation_spec: ObservationSpec,
agent_info_list: Collection[AgentInfoProto],
) -> None:
"""
Check that all the observations match the expected size.
This gives a nicer error than a cryptic numpy error later.
"""
expected_obs_shape = tuple(observation_spec.shape)
for agent_info in agent_info_list:
agent_obs_shape = tuple(agent_info.observations[obs_index].shape)
if expected_obs_shape != agent_obs_shape:
raise UnityObservationException(
f"Observation at index={obs_index} for agent with "
f"id={agent_info.id} didn't match the ObservationSpec. "
f"Expected shape {expected_obs_shape} but got {agent_obs_shape}."
)
@timed
def _observation_to_np_array(
obs: ObservationProto, expected_shape: Optional[Iterable[int]] = None

@timed
def _process_maybe_compressed_observation(
obs_index: int,
observation_spec: ObservationSpec,
shape: Tuple[int, int, int],
shape = cast(Tuple[int, int, int], observation_spec.shape)
try:
batched_visual = [
_observation_to_np_array(agent_obs.observations[obs_index], shape)
for agent_obs in agent_info_list
]
except ValueError:
# Try to get a more useful error message
_check_observations_match_spec(obs_index, observation_spec, agent_info_list)
# If that didn't raise anything, raise the original error
raise
batched_visual = [
_observation_to_np_array(agent_obs.observations[obs_index], shape)
for agent_obs in agent_info_list
]
return np.array(batched_visual, dtype=np.float32)

@timed
def _process_rank_one_or_two_observation(
obs_index: int,
observation_spec: ObservationSpec,
agent_info_list: Collection[AgentInfoProto],
obs_index: int, shape: Tuple[int, ...], agent_info_list: Collection[AgentInfoProto]
return np.zeros((0,) + observation_spec.shape, dtype=np.float32)
try:
np_obs = np.array(
[
agent_obs.observations[obs_index].float_data.data
for agent_obs in agent_info_list
],
dtype=np.float32,
).reshape((len(agent_info_list),) + observation_spec.shape)
except ValueError:
# Try to get a more useful error message
_check_observations_match_spec(obs_index, observation_spec, agent_info_list)
# If that didn't raise anything, raise the original error
raise
return np.zeros((0,) + shape, dtype=np.float32)
np_obs = np.array(
[
agent_obs.observations[obs_index].float_data.data
for agent_obs in agent_info_list
],
dtype=np.float32,
).reshape((len(agent_info_list),) + shape)
_raise_on_nan_and_inf(np_obs, "observations")
return np_obs

]
decision_obs_list: List[np.ndarray] = []
terminal_obs_list: List[np.ndarray] = []
for obs_index, observation_spec in enumerate(behavior_spec.observation_specs):
is_visual = len(observation_spec.shape) == 3
for obs_index, observation_specs in enumerate(behavior_spec.observation_specs):
is_visual = len(observation_specs.shape) == 3
obs_shape = cast(Tuple[int, int, int], observation_specs.shape)
obs_index, observation_spec, decision_agent_info_list
obs_index, obs_shape, decision_agent_info_list
obs_index, observation_spec, terminal_agent_info_list
obs_index, obs_shape, terminal_agent_info_list
obs_index, observation_spec, decision_agent_info_list
obs_index, observation_specs.shape, decision_agent_info_list
obs_index, observation_spec, terminal_agent_info_list
obs_index, observation_specs.shape, terminal_agent_info_list
)
)
decision_rewards = np.array(

42
ml-agents-envs/mlagents_envs/tests/test_rpc_utils.py


def test_vector_observation():
n_agents = 10
shapes = [(3,), (4,)]
obs_specs = create_observation_specs_with_shapes(shapes)
arr = _process_rank_one_or_two_observation(
obs_index, obs_specs[obs_index], list_proto
)
arr = _process_rank_one_or_two_observation(obs_index, shape, list_proto)
shape = (128, 64, 3)
in_array_1 = np.random.rand(*shape)
in_array_1 = np.random.rand(128, 64, 3)
in_array_2 = np.random.rand(*shape)
in_array_2 = np.random.rand(128, 64, 3)
in_array_2_mapping = [0, 1, 2]
proto_obs_2 = generate_compressed_proto_obs_with_mapping(
in_array_2, in_array_2_mapping

ap2 = AgentInfoProto()
ap2.observations.extend([proto_obs_2])
ap_list = [ap1, ap2]
obs_spec = create_observation_specs_with_shapes([shape])[0]
arr = _process_maybe_compressed_observation(0, obs_spec, ap_list)
arr = _process_maybe_compressed_observation(0, (128, 64, 3), ap_list)
assert list(arr.shape) == [2, 128, 64, 3]
assert np.allclose(arr[0, :, :, :], in_array_1, atol=0.01)
assert np.allclose(arr[1, :, :, :], in_array_2, atol=0.01)

ap2 = AgentInfoProto()
ap2.observations.extend([proto_obs_2])
ap_list = [ap1, ap2]
shape = (128, 64, 1)
obs_spec = create_observation_specs_with_shapes([shape])[0]
arr = _process_maybe_compressed_observation(0, obs_spec, ap_list)
arr = _process_maybe_compressed_observation(0, (128, 64, 1), ap_list)
assert list(arr.shape) == [2, 128, 64, 1]
assert np.allclose(arr[0, :, :, :], expected_out_array_1, atol=0.01)
assert np.allclose(arr[1, :, :, :], expected_out_array_2, atol=0.01)

ap1 = AgentInfoProto()
ap1.observations.extend([proto_obs_1])
ap_list = [ap1]
shape = (128, 64, 8)
obs_spec = create_observation_specs_with_shapes([shape])[0]
arr = _process_maybe_compressed_observation(0, obs_spec, ap_list)
arr = _process_maybe_compressed_observation(0, (128, 64, 8), ap_list)
assert list(arr.shape) == [1, 128, 64, 8]
assert np.allclose(arr[0, :, :, :], expected_out_array_1, atol=0.01)

ap1 = AgentInfoProto()
ap1.observations.extend([proto_obs_1])
ap_list = [ap1]
shape = (128, 42, 3)
obs_spec = create_observation_specs_with_shapes([shape])[0]
_process_maybe_compressed_observation(0, obs_spec, ap_list)
_process_maybe_compressed_observation(0, (128, 42, 3), ap_list)
def test_batched_step_result_from_proto():

assert decision_steps.obs[1].shape[1] == shapes[1][0]
assert terminal_steps.obs[0].shape[1] == shapes[0][0]
assert terminal_steps.obs[1].shape[1] == shapes[1][0]
def test_mismatch_observations_raise_in_step_result_from_proto():
n_agents = 10
shapes = [(3,), (4,)]
spec = BehaviorSpec(
create_observation_specs_with_shapes(shapes), ActionSpec.create_continuous(3)
)
ap_list = generate_list_agent_proto(n_agents, shapes)
# Hack an observation to be larger, we should get an exception
ap_list[0].observations[0].shape[0] += 1
ap_list[0].observations[0].float_data.data.append(0.42)
with pytest.raises(UnityObservationException):
steps_from_proto(ap_list, spec)
def test_action_masking_discrete():

4
protobuf-definitions/proto/mlagents_envs/communicator_objects/observation.proto


repeated int32 compressed_channel_mapping = 5;
repeated int32 dimension_properties = 6;
ObservationTypeProto observation_type = 7;
// Optional name of the observation.
// This will be set to the ISensor name when writing,
// and read into the ObservationSpec in the low-level API
string name = 8;
}

2
com.unity.ml-agents.extensions/Documentation~/Grid-Sensor.md


An image can be thought of as a matrix of a predefined width (W) and a height (H) and each pixel can be thought of as simply an array of length 3 (in the case of RGB), `[Red, Green, Blue]` holding the different channel information of the color (channel) intensities at that pixel location. Thus an image is just a 3 dimensional matrix of size WxHx3. A Grid Observation can be thought of as a generalization of this setup where in place of a pixel there is a "cell" which is an array of length N representing different channel intensities at that cell position. From a Convolutional Neural Network point of view, the introduction of multiple channels in an "image" isn't a new concept. One such example is using an RGB-Depth image which is used in several robotics applications. The distinction of Grid Observations is what the data within the channels represents. Instead of limiting the channels to color intensities, the channels within a cell of a Grid Observation generalize to any data that can be represented by a single number (float or int).
Before jumping into the details of the Grid Sensor, an important thing to note is the agent performance and qualitatively different behavior over raycasts. Unity MLAgent's comes with a suite of example environments. One in particular, the [Food Collector](https://github.com/Unity-Technologies/ml-agents/tree/release_14_docs/docs/Learning-Environment-Examples.md#food-collector), has been the focus of the Grid Sensor development.
Before jumping into the details of the Grid Sensor, an important thing to note is the agent performance and qualitatively different behavior over raycasts. Unity MLAgent's comes with a suite of example environments. One in particular, the [Food Collector](https://github.com/Unity-Technologies/ml-agents/blob/release_13_docs/docs/Learning-Environment-Examples.md#food-collector), has been the focus of the Grid Sensor development.
The Food Collector environment can be described as:
* Set-up: A multi-agent environment where agents compete to collect food.

2
com.unity.ml-agents.extensions/Documentation~/Match3.md


This implementation includes:
* C# implementation catered toward a Match-3 setup including concepts around encoding for moves based on [Human Like Playtesting with Deep Learning](https://www.researchgate.net/publication/328307928_Human-Like_Playtesting_with_Deep_Learning)
* An example Match-3 scene with ML-Agents implemented (located under /Project/Assets/ML-Agents/Examples/Match3). More information, on Match-3 example [here](https://github.com/Unity-Technologies/ml-agents/tree/release_14_docs/docs/docs/Learning-Environment-Examples.md#match-3).
* An example Match-3 scene with ML-Agents implemented (located under /Project/Assets/ML-Agents/Examples/Match3). More information, on Match-3 example [here](https://github.com/Unity-Technologies/ml-agents/tree/release_13_docs/docs/docs/Learning-Environment-Examples.md#match-3).
### Feedback
If you are a Match-3 developer and are trying to leverage ML-Agents for this scenario, [we want to hear from you](https://forms.gle/TBsB9jc8WshgzViU9). Additionally, we are also looking for interested Match-3 teams to speak with us for 45 minutes. If you are interested, please indicate that in the [form](https://forms.gle/TBsB9jc8WshgzViU9). If selected, we will provide gift cards as a token of appreciation.

16
com.unity.ml-agents.extensions/Documentation~/com.unity.ml-agents.extensions.md


recommended ways to install the package:
### Local Installation
[Clone the repository](https://github.com/Unity-Technologies/ml-agents/tree/release_14_docs/docs/Installation.md#clone-the-ml-agents-toolkit-repository-optional) and follow the
[Local Installation for Development](https://github.com/Unity-Technologies/ml-agents/tree/release_14_docs/docs/Installation.md#advanced-local-installation-for-development-1)
[Clone the repository](https://github.com/Unity-Technologies/ml-agents/tree/release_13_docs/docs/Installation.md#clone-the-ml-agents-toolkit-repository-optional) and follow the
[Local Installation for Development](https://github.com/Unity-Technologies/ml-agents/tree/release_13_docs/docs/Installation.md#advanced-local-installation-for-development-1)
![Package Manager git URL](https://github.com/Unity-Technologies/ml-agents/blob/release_14_docs/docs/images/unity_package_manager_git_url.png)
![Package Manager git URL](https://github.com/Unity-Technologies/ml-agents/blob/release_13_docs/docs/images/unity_package_manager_git_url.png)
git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents.extensions#release_14
git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents.extensions
"com.unity.ml-agents.extensions": "git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents.extensions#release_14",
"com.unity.ml-agents.extensions": "git+https://github.com/Unity-Technologies/ml-agents.git?path=com.unity.ml-agents.extensions",
See [Git dependencies](https://docs.unity3d.com/Manual/upm-git.html#subfolder) for more information. Note that this
may take several minutes to resolve the packages the first time that you add it.
See [Git dependencies](https://docs.unity3d.com/Manual/upm-git.html#subfolder) for more information.
## Requirements

- No way to customize the action space of the `InputActuatorComponent`
## Need Help?
The main [README](https://github.com/Unity-Technologies/ml-agents/tree/release_14_docs/README.md) contains links for contacting the team or getting support.
The main [README](https://github.com/Unity-Technologies/ml-agents/tree/release_13_docs/README.md) contains links for contacting the team or getting support.

12
com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/ButtonInputActionAdaptorTests.cs


public void TestQueueEvent()
{
var actionBuffers = new ActionBuffers(ActionSegment<float>.Empty, new ActionSegment<int>(new[] { 1 }));
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
var val = m_Action.ReadValue<float>();
Assert.IsTrue(Mathf.Approximately(1f, val));

public void TestWriteToHeuristic()
{
var actionBuffers = new ActionBuffers(ActionSegment<float>.Empty, new ActionSegment<int>(new[] { 1 }));
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
var buffer = new ActionBuffers(ActionSegment<float>.Empty, new ActionSegment<int>(new[] { 1 }));
m_Adaptor.WriteToHeuristic(m_Action, buffer);

12
com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/DoubleInputActionAdaptorTests.cs


public void TestQueueEvent()
{
var actionBuffers = new ActionBuffers(new ActionSegment<float>(new[] { 1f }), ActionSegment<int>.Empty);
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
Assert.IsTrue(Mathf.Approximately(1f, (float)m_Action.ReadValue<double>()));
}

{
var actionBuffers = new ActionBuffers(new ActionSegment<float>(new[] { 1f }), ActionSegment<int>.Empty);
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
var buffer = new ActionBuffers(new ActionSegment<float>(new[] { 1f }), ActionSegment<int>.Empty);
m_Adaptor.WriteToHeuristic(m_Action, buffer);

12
com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/FloatInputActionAdapatorTests.cs


public void TestQueueEvent()
{
var actionBuffers = new ActionBuffers(new ActionSegment<float>(new[] { 1f }), ActionSegment<int>.Empty);
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
var val = m_Action.ReadValue<float>();
Assert.IsTrue(Mathf.Approximately(1f, val));

public void TestWriteToHeuristic()
{
var actionBuffers = new ActionBuffers(new ActionSegment<float>(new[] { 1f }), ActionSegment<int>.Empty);
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
var buffer = new ActionBuffers(new ActionSegment<float>(new[] { 1f }), ActionSegment<int>.Empty);
m_Adaptor.WriteToHeuristic(m_Action, buffer);

12
com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/IntegerInputActionAdaptorTests.cs


public void TestQueueEvent()
{
var actionBuffers = new ActionBuffers(ActionSegment<float>.Empty, new ActionSegment<int>(new[] { 1 }));
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
var val = m_Action.ReadValue<int>();
Assert.IsTrue(val == 1);

public void TestWriteToHeuristic()
{
var actionBuffers = new ActionBuffers(ActionSegment<float>.Empty, new ActionSegment<int>(new[] { 1 }));
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
var buffer = new ActionBuffers(ActionSegment<float>.Empty, new ActionSegment<int>(new int[1]));
m_Adaptor.WriteToHeuristic(m_Action, buffer);

12
com.unity.ml-agents.extensions/Tests/Runtime/Input/Adaptors/Vector2InputActionAdaptorTests.cs


public void TestQueueEvent()
{
var actionBuffers = new ActionBuffers(new ActionSegment<float>(new[] { 0f, 1f }), ActionSegment<int>.Empty);
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
var val = m_Action.ReadValue<Vector2>();
Assert.IsTrue(Mathf.Approximately(0f, val.x));

public void TestWriteToHeuristic()
{
var actionBuffers = new ActionBuffers(new ActionSegment<float>(new[] { 0f, 1f }), ActionSegment<int>.Empty);
var context = new InputActuatorEventContext(1, m_Device);
using (context.GetEventForFrame(out var eventPtr))
{
m_Adaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, new ActionSpec(), actionBuffers);
}
m_Adaptor.QueueInputEventForAction(m_Action, m_Control, new ActionSpec(), actionBuffers);
InputSystem.Update();
var buffer = new ActionBuffers(new ActionSegment<float>(new float[2]), ActionSegment<int>.Empty);
m_Adaptor.WriteToHeuristic(m_Action, buffer);

17
com.unity.ml-agents.extensions/Tests/Runtime/Input/InputActionActuatorTests.cs


using Unity.MLAgents.Policies;
using UnityEngine;
using UnityEngine.InputSystem;
using UnityEngine.InputSystem.LowLevel;
public bool eventWritten;
public bool eventQueued;
public bool writtenToHeuristic;
public ActionSpec GetActionSpecForInputAction(InputAction action)

public void WriteToInputEventForAction(InputEventPtr eventPtr, InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
public void QueueInputEventForAction(InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
eventWritten = true;
eventQueued = true;
}
public void WriteToHeuristic(InputAction action, in ActionBuffers actionBuffers)

public void Reset()
{
eventWritten = false;
eventQueued = false;
writtenToHeuristic = false;
}
}

m_BehaviorParameters = go.AddComponent<BehaviorParameters>();
var action = new InputAction("action");
m_Adaptor = new TestAdaptor();
m_Actuator = new InputActionActuator(null, m_BehaviorParameters, action, m_Adaptor, new InputActuatorEventContext(1, InputSystem.AddDevice<Gamepad>()));
m_Actuator = new InputActionActuator(null, m_BehaviorParameters, action, m_Adaptor);
}
[Test]

m_Actuator.OnActionReceived(new ActionBuffers());
m_Actuator.Heuristic(new ActionBuffers());
Assert.IsFalse(m_Adaptor.eventWritten);
Assert.IsFalse(m_Adaptor.eventQueued);
Assert.IsFalse(m_Adaptor.eventWritten);
Assert.IsFalse(m_Adaptor.eventQueued);
Assert.IsTrue(m_Adaptor.eventWritten);
Assert.IsTrue(m_Adaptor.eventQueued);
m_Adaptor.Reset();
Assert.AreEqual(m_Actuator.Name, "InputActionActuator-action");

2
com.unity.ml-agents.extensions/Tests/Runtime/Input/InputActuatorComponentTests.cs


var device = InputSystem.AddDevice("TestLayout");
var actuators = InputActuatorComponent.CreateActuatorsFromMap(inputActionMap, m_BehaviorParameters, device, new InputActuatorEventContext());
var actuators = InputActuatorComponent.CreateActuatorsFromMap(inputActionMap, m_BehaviorParameters, device);
Assert.IsTrue(actuators.Length == 2);
Assert.IsTrue(actuators[0].ActionSpec.Equals(ActionSpec.MakeContinuous(2)));
Assert.IsTrue(actuators[1].ActionSpec.NumDiscreteActions == 1);

2
com.unity.ml-agents.extensions/Tests/Runtime/Input/Unity.ML-Agents.Extensions.Input.Tests.Runtime.asmdef


"versionDefines": [
{
"name": "com.unity.inputsystem",
"expression": "1.1.0-preview.3",
"expression": "1.1.0",
"define": "MLA_INPUT_TESTS"
}
],

4
com.unity.ml-agents.extensions/package.json


{
"name": "com.unity.ml-agents.extensions",
"displayName": "ML Agents Extensions",
"version": "0.2.0-preview",
"version": "0.1.0-preview",
"com.unity.ml-agents": "1.8.1-preview"
"com.unity.ml-agents": "1.8.0-preview"
}
}

7
com.unity.ml-agents.extensions/Runtime/Input/Adaptors/ButtonInputActionAdaptor.cs


using Unity.MLAgents.Actuators;
using UnityEngine;
using UnityEngine.InputSystem;
using UnityEngine.InputSystem.Controls;
using UnityEngine.InputSystem.LowLevel;
namespace Unity.MLAgents.Extensions.Input

}
/// TODO again this might need to be more nuanced for things like continuous buttons.
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToInputEventForAction"/>
public void WriteToInputEventForAction(InputEventPtr eventPtr, InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
/// <inheritdoc cref="IRLActionInputAdaptor.QueueInputEventForAction"/>
public void QueueInputEventForAction(InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
((ButtonControl)control).WriteValueIntoEvent((float)val, eventPtr);
InputSystem.QueueDeltaStateEvent(control, (byte)val);
}
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToHeuristic"/>>

7
com.unity.ml-agents.extensions/Runtime/Input/Adaptors/DoubleInputActionAdaptor.cs


#if MLA_INPUT_SYSTEM && UNITY_2019_4_OR_NEWER
using Unity.MLAgents.Actuators;
using UnityEngine.InputSystem;
using UnityEngine.InputSystem.Controls;
using UnityEngine.InputSystem.LowLevel;
namespace Unity.MLAgents.Extensions.Input

return ActionSpec.MakeContinuous(1);
}
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToInputEventForAction"/>
public void WriteToInputEventForAction(InputEventPtr eventPtr, InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
/// <inheritdoc cref="IRLActionInputAdaptor.QueueInputEventForAction"/>
public void QueueInputEventForAction(InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
((DoubleControl)control).WriteValueIntoEvent((double)val, eventPtr);
InputSystem.QueueDeltaStateEvent(control,(double)val);
}
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToHeuristic"/>

6
com.unity.ml-agents.extensions/Runtime/Input/Adaptors/FloatInputActionAdaptor.cs


return ActionSpec.MakeContinuous(1);
}
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToInputEventForAction"/>
public void WriteToInputEventForAction(InputEventPtr eventPtr, InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
/// <inheritdoc cref="IRLActionInputAdaptor.QueueInputEventForAction"/>
public void QueueInputEventForAction(InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
control.WriteValueIntoEvent(val, eventPtr);
InputSystem.QueueDeltaStateEvent(control, val);
}
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToHeuristic"/>

6
com.unity.ml-agents.extensions/Runtime/Input/Adaptors/IntegerInputActionAdaptor.cs


return ActionSpec.MakeDiscrete(2);
}
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToInputEventForAction"/>
public void WriteToInputEventForAction(InputEventPtr eventPtr, InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
/// <inheritdoc cref="IRLActionInputAdaptor.QueueInputEventForAction"/>
public void QueueInputEventForAction(InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers)
control.WriteValueIntoEvent(val, eventPtr);
InputSystem.QueueDeltaStateEvent(control, val);
}
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToHeuristic"/>

7
com.unity.ml-agents.extensions/Runtime/Input/Adaptors/Vector2InputActionAdaptor.cs


using Unity.MLAgents.Actuators;
using UnityEngine;
using UnityEngine.InputSystem;
using UnityEngine.InputSystem.Controls;
using UnityEngine.InputSystem.LowLevel;
namespace Unity.MLAgents.Extensions.Input

return ActionSpec.MakeContinuous(2);
}
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToInputEventForAction"/>
public void WriteToInputEventForAction(InputEventPtr eventPtr, InputAction action,
/// <inheritdoc cref="IRLActionInputAdaptor.QueueInputEventForAction"/>
public void QueueInputEventForAction(InputAction action,
InputControl control,
ActionSpec actionSpec,
in ActionBuffers actionBuffers)

control.WriteValueIntoEvent(new Vector2(x, y), eventPtr);
InputSystem.QueueDeltaStateEvent(control, new Vector2(x, y));
}
/// <inheritdoc cref="IRLActionInputAdaptor.WriteToHeuristic"/>

4
com.unity.ml-agents.extensions/Runtime/Input/IRLActionInputAdaptor.cs


using System;
using Unity.MLAgents.Actuators;
using UnityEngine.InputSystem;
using UnityEngine.InputSystem.LowLevel;
namespace Unity.MLAgents.Extensions.Input
{

/// <summary>
/// Translates data from the <see cref="ActionBuffers"/> object to the <see cref="InputSystem"/>.
/// </summary>
/// <param name="eventPtr">The Event pointer to write to.</param>
void WriteToInputEventForAction(InputEventPtr eventPtr, InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers);
void QueueInputEventForAction(InputAction action, InputControl control, ActionSpec actionSpec, in ActionBuffers actionBuffers);
/// <summary>
/// Writes data from the <paramref name="action"/> to the <paramref name="actionBuffers"/>.

4
com.unity.ml-agents.extensions/Runtime/Input/Unity.ML-Agents.Extensions.Input.asmdef


"versionDefines": [
{
"name": "com.unity.inputsystem",
"expression": "1.1.0-preview.3",
"expression": "1.1.0-preview",
}
}

12
com.unity.ml-agents.extensions/Runtime/Input/InputActionActuator.cs


readonly BehaviorParameters m_BehaviorParameters;
readonly InputAction m_Action;
readonly IRLActionInputAdaptor m_InputAdaptor;
InputActuatorEventContext m_InputActuatorEventContext;
InputDevice m_Device;
InputControl m_Control;

/// via the <see cref="IRLActionInputAdaptor"/>.</param>
/// <param name="adaptor">The <see cref="IRLActionInputAdaptor"/> that will convert data between ML-Agents
/// and the <see cref="InputSystem"/>.</param>
/// <param name="inputActuatorEventContext">The object that will provide the event ptr to write to.</param>
IRLActionInputAdaptor adaptor,
InputActuatorEventContext inputActuatorEventContext)
IRLActionInputAdaptor adaptor)
m_InputActuatorEventContext = inputActuatorEventContext;
ActionSpec = adaptor.GetActionSpecForInputAction(m_Action);
m_Device = inputDevice;
m_Control = m_Device?.GetChildControl(m_Action.name);

Profiler.BeginSample("InputActionActuator.OnActionReceived");
if (!m_BehaviorParameters.IsInHeuristicMode())
{
using (m_InputActuatorEventContext.GetEventForFrame(out var eventPtr))
{
m_InputAdaptor.WriteToInputEventForAction(eventPtr, m_Action, m_Control, ActionSpec, actionBuffers);
}
m_InputAdaptor.QueueInputEventForAction(m_Action, m_Control, ActionSpec, actionBuffers);
}
Profiler.EndSample();
}

63
com.unity.ml-agents.extensions/Runtime/Input/InputActuatorComponent.cs


#if MLA_INPUT_SYSTEM && UNITY_2019_4_OR_NEWER
using System;
using System.Collections.Generic;
using Unity.Collections;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Policies;
using UnityEngine;

using UnityEngine.InputSystem.LowLevel;
using UnityEngine.InputSystem.Layouts;
using UnityEngine.InputSystem.Utilities;
#if UNITY_EDITOR

get
{
#if UNITY_EDITOR
if (!EditorApplication.isPlaying && m_ActionSpec.NumContinuousActions == 0
&& m_ActionSpec.BranchSizes == null
|| m_ActionSpec.BranchSizes.Length == 0)
{
FindNeededComponents();
var actuators = CreateActuatorsFromMap(m_InputAsset.FindActionMap(m_PlayerInput.defaultActionMap),
m_BehaviorParameters,
null,
InputActuatorEventContext.s_EditorContext);
m_ActionSpec = CombineActuatorActionSpecs(actuators);
}
FindNeededComponents();
var actuators = CreateActuatorsFromMap(m_InputAsset.FindActionMap(m_PlayerInput.defaultActionMap), m_BehaviorParameters, null);
m_ActionSpec = CombineActuatorActionSpecs(actuators);
#endif
return m_ActionSpec;
}

RegisterLayoutBuilder(inputActionMap, m_LayoutName);
m_Device = InputSystem.AddDevice(m_LayoutName);
var context = new InputActuatorEventContext(inputActionMap.actions.Count, m_Device);
m_Actuators = CreateActuatorsFromMap(inputActionMap, m_BehaviorParameters, m_Device, context);
m_Actuators = CreateActuatorsFromMap(inputActionMap, m_BehaviorParameters, m_Device);
UpdateDeviceBinding(m_BehaviorParameters.IsInHeuristicMode());
inputActionMap.Enable();

internal static IActuator[] CreateActuatorsFromMap(InputActionMap inputActionMap,
BehaviorParameters behaviorParameters,
InputDevice inputDevice,
InputActuatorEventContext context)
InputDevice inputDevice)
{
var actuators = new IActuator[inputActionMap.actions.Count];
for (var i = 0; i < inputActionMap.actions.Count; i++)

var adaptor = (IRLActionInputAdaptor)Activator.CreateInstance(controlTypeToAdaptorType[actionLayout.type]);
actuators[i] = new InputActionActuator(inputDevice, behaviorParameters, action, adaptor, context);
actuators[i] = new InputActionActuator(inputDevice, behaviorParameters, action, adaptor);
// Reasonably, the input system starts adding numbers after the first none numbered name
// is added. So for device ID of 0, we use the empty string in the path.

action.processors,
mlAgentsControlSchemeName);
action.bindingMask = InputBinding.MaskByGroup(mlAgentsControlSchemeName);
}
return actuators;
}

m_PlayerInput = null;
m_BehaviorParameters = null;
m_Device = null;
}
int m_ActuatorsWrittenToEvent;
NativeArray<byte> m_InputBufferForFrame;
InputEventPtr m_InputEventPtrForFrame;
public InputEventPtr GetEventForFrame()
{
#if UNITY_EDITOR
if (!EditorApplication.isPlaying)
{
return new InputEventPtr();
}
#endif
if (m_ActuatorsWrittenToEvent % m_Actuators.Length == 0 || !m_InputEventPtrForFrame.valid)
{
m_ActuatorsWrittenToEvent = 0;
m_InputEventPtrForFrame = new InputEventPtr();
m_InputBufferForFrame = StateEvent.From(m_Device, out m_InputEventPtrForFrame);
}
return m_InputEventPtrForFrame;
}
public void EventProcessedInFrame()
{
#if UNITY_EDITOR
if (!EditorApplication.isPlaying)
{
return;
}
#endif
m_ActuatorsWrittenToEvent++;
if (m_ActuatorsWrittenToEvent == m_Actuators.Length && m_InputEventPtrForFrame.valid)
{
InputSystem.QueueEvent(m_InputEventPtrForFrame);
m_InputBufferForFrame.Dispose();
}
}
}
}

1
Project/Project.sln.DotSettings


<wpf:ResourceDictionary xml:space="preserve" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:s="clr-namespace:System;assembly=mscorlib" xmlns:ss="urn:shemas-jetbrains-com:settings-storage-xaml" xmlns:wpf="http://schemas.microsoft.com/winfx/2006/xaml/presentation">
<s:String x:Key="/Default/CodeStyle/Naming/CSharpNaming/Abbreviations/=ML/@EntryIndexedValue">ML</s:String>
<s:Boolean x:Key="/Default/UserDictionary/Words/=Dont/@EntryIndexedValue">True</s:Boolean></wpf:ResourceDictionary>

1
Project/ProjectSettings/GraphicsSettings.asset


- {fileID: 10770, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 10783, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 16000, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 16001, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 17000, guid: 0000000000000000f000000000000000, type: 0}
m_PreloadedShaders: []
m_SpritesDefaultMaterial: {fileID: 10754, guid: 0000000000000000f000000000000000,

9
Project/ProjectSettings/TagManager.asset


- purpleAgent
- purpleGoal
- tile
- zombie
- blockLarge
- blockVeryLarge
- blockSmall
- door
- switch
- key
- lock
- dragon
layers:
- Default
- TransparentFX

4
Project/Packages/manifest.json


"dependencies": {
"com.unity.ads": "2.0.8",
"com.unity.analytics": "3.2.3",
"com.unity.barracuda": "1.2.1-preview",
"com.unity.collab-proxy": "1.2.15",
"com.unity.ml-agents": "file:../../com.unity.ml-agents",
"com.unity.ml-agents.extensions": "file:../../com.unity.ml-agents.extensions",

"com.unity.modules.video": "1.0.0",
"com.unity.modules.vr": "1.0.0",
"com.unity.modules.wind": "1.0.0",
"com.unity.modules.xr": "1.0.0",
"com.unity.nuget.newtonsoft-json": "2.0.0"
"com.unity.modules.xr": "1.0.0"
},
"testables": [
"com.unity.ml-agents",

2
Project/Assets/ML-Agents/Examples/GridWorld/Demos/ExpertGrid.demo.meta


guid: 0092f2e4aece345aea4730a37eeebf68
ScriptedImporter:
fileIDToRecycleName:
11400002: Assets/ML-Agents/Examples/GridWorld/Demos/ExpertGridWorld.demo
11400000: Assets/ML-Agents/Examples/GridWorld/Demos/ExpertGrid.demo
externalObjects: {}
userData: ' (Unity.MLAgents.Demonstrations.DemonstrationSummary)'
assetBundleName:

52
Project/Assets/ML-Agents/Examples/Worm/Scripts/WormAgent.cs


[RequireComponent(typeof(JointDriveController))] // Required to set joint forces
public class WormAgent : Agent
{
//The type of agent behavior we want to use.
//This setting will determine how the agent is set up during initialization.
public enum WormAgentBehaviorType
{
WormDynamic,
WormStatic
}
[Tooltip(
"Dynamic - The agent will run towards a target that changes position.\n\n" +
"Static - The agent will run towards a static target. "
)]
public WormAgentBehaviorType typeOfWorm;
[Header("Target Prefabs")] public Transform TargetPrefab; //Target prefab to use in Dynamic envs
//Brains
//A different brain will be used depending on the CrawlerAgentBehaviorType selected
[Header("NN Models")] public NNModel wormDyModel;
public NNModel wormStModel;
[Header("Target Prefabs")] public Transform dynamicTargetPrefab; //Target prefab to use in Dynamic envs
public Transform staticTargetPrefab; //Target prefab to use in Static envs
private Transform m_Target; //Target the agent will walk towards during training.
[Header("Body Parts")] public Transform bodySegment0;

public override void Initialize()
{
SpawnTarget(TargetPrefab, transform.position); //spawn target
SetAgentType();
m_StartingPos = bodySegment0.position;
m_OrientationCube = GetComponentInChildren<OrientationCubeController>();

void SpawnTarget(Transform prefab, Vector3 pos)
{
m_Target = Instantiate(prefab, pos, Quaternion.identity, transform);
}
/// <summary>
/// Set up the agent based on the type
/// </summary>
void SetAgentType()
{
var behaviorParams = GetComponent<Unity.MLAgents.Policies.BehaviorParameters>();
switch (typeOfWorm)
{
case WormAgentBehaviorType.WormDynamic:
{
behaviorParams.BehaviorName = "WormDynamic"; //set behavior name
if (wormDyModel)
behaviorParams.Model = wormDyModel; //assign the brain
SpawnTarget(dynamicTargetPrefab, transform.position); //spawn target
break;
}
case WormAgentBehaviorType.WormStatic:
{
behaviorParams.BehaviorName = "WormStatic"; //set behavior name
if (wormStModel)
behaviorParams.Model = wormStModel; //assign the brain
SpawnTarget(staticTargetPrefab, transform.TransformPoint(new Vector3(0, 0, 1000))); //spawn target
break;
}
}
}
/// <summary>

9
Project/Assets/ML-Agents/Examples/Worm/Prefabs/PlatformWormDynamicTarget.prefab


m_Modification:
m_TransformParent: {fileID: 7519741477752072726}
m_Modifications:
- target: {fileID: 2461460301642470340, guid: ff2999c8614d848f8a7e55e3a6fb9282,
type: 3}
propertyPath: targetToLookAt
value:
objectReference: {fileID: 0}
- target: {fileID: 7430253518223459950, guid: ff2999c8614d848f8a7e55e3a6fb9282,
type: 3}
propertyPath: m_Name

- target: {fileID: 7430253518223459951, guid: ff2999c8614d848f8a7e55e3a6fb9282,
type: 3}
propertyPath: m_RootOrder
value: 2
value: 3
objectReference: {fileID: 0}
- target: {fileID: 7430253518223459951, guid: ff2999c8614d848f8a7e55e3a6fb9282,
type: 3}

- target: {fileID: 845566399918322646, guid: d6fc96a99a9754f07b48abf1e0d55a5c,
type: 3}
propertyPath: m_Name
value: PlatformWorm
value: PlatformWormDynamicTarget
objectReference: {fileID: 0}
- target: {fileID: 845742365997159796, guid: d6fc96a99a9754f07b48abf1e0d55a5c,
type: 3}

13
Project/Assets/ML-Agents/Examples/Worm/Prefabs/WormBasePrefab.prefab


- component: {fileID: 7430253518223459946}
- component: {fileID: 7430253518223459945}
m_Layer: 0
m_Name: Worm
m_Name: WormBasePrefab
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0

VectorActionDescriptions: []
VectorActionSpaceType: 1
hasUpgradedBrainParametersWithActionSpec: 1
m_Model: {fileID: 11400000, guid: 117512193457f4b35994eedc14532276, type: 3}
m_Model: {fileID: 11400000, guid: e81305346bd9b408c8871523f9088c2a, type: 3}
m_BehaviorName: Worm
m_BehaviorName: WormDynamic
TeamId: 0
m_UseChildSensors: 1
m_UseChildActuators: 1

maxStep: 0
hasUpgradedFromAgentParameters: 1
MaxStep: 5000
TargetPrefab: {fileID: 3839136118347789758, guid: 46734abd0de454192b407379c6a4ab8d,
typeOfWorm: 0
wormDyModel: {fileID: 11400000, guid: 117512193457f4b35994eedc14532276, type: 3}
wormStModel: {fileID: 11400000, guid: fc1e2a84251634459bfd8edc900e2e71, type: 3}
dynamicTargetPrefab: {fileID: 3839136118347789758, guid: 46734abd0de454192b407379c6a4ab8d,
type: 3}
staticTargetPrefab: {fileID: 3839136118347789758, guid: 2173d15c0b5fc49e5870c9d1c7f7ee8e,
type: 3}
bodySegment0: {fileID: 7430253517585478437}
bodySegment1: {fileID: 7430253518698367209}

1001
Project/Assets/ML-Agents/Examples/FoodCollector/TFModels/FoodCollector.onnx
文件差异内容过多而无法显示
查看文件

2
Project/Assets/ML-Agents/Examples/FoodCollector/TFModels/FoodCollector.onnx.meta


fileFormatVersion: 2
guid: 75910f45f20be49b18e2b95879a217b2
guid: 3210b528a2bc44a86bd6bd1d571070f8
ScriptedImporter:
fileIDToRecycleName:
11400000: main obj

5
Project/Assets/ML-Agents/Examples/FoodCollector/Prefabs/FoodCollectorArea.prefab.meta


fileFormatVersion: 2
guid: b5339e4b990ade14f992aadf3bf8591b
PrefabImporter:
guid: 38400a68c4ea54b52998e34ee238d1a7
NativeFormatImporter:
mainObjectFileID: 100100000
userData:
assetBundleName:
assetBundleVariant:

380
Project/Assets/ML-Agents/Examples/FoodCollector/Prefabs/FoodCollectorArea.prefab


- component: {fileID: 54936164982484646}
- component: {fileID: 114374774605792098}
- component: {fileID: 114176228333253036}
- component: {fileID: 114725457980523372}
- component: {fileID: 6035497842152854922}
m_Layer: 0
m_Name: Agent
m_TagString: agent

m_Name:
m_EditorClassIdentifier:
m_BrainParameters:
VectorObservationSize: 0
VectorObservationSize: 4
NumStackedVectorObservations: 1
m_ActionSpec:
m_NumContinuousActions: 3

VectorActionSpaceType: 0
VectorActionSpaceType: 1
m_Model: {fileID: 11400000, guid: 75910f45f20be49b18e2b95879a217b2, type: 3}
m_Model: {fileID: 11400000, guid: 3210b528a2bc44a86bd6bd1d571070f8, type: 3}
m_BehaviorName: GridFoodCollector
m_BehaviorName: FoodCollector
TeamId: 0
m_UseChildSensors: 1
m_UseChildActuators: 1

goodMaterial: {fileID: 2100000, guid: c67450f290f3e4897bc40276a619e78d, type: 2}
frozenMaterial: {fileID: 2100000, guid: 66163cf35956a4be08e801b750c26f33, type: 2}
myLaser: {fileID: 1081721624670010}
contribute: 0
useVectorObs: 0
contribute: 1
useVectorObs: 1
--- !u!114 &8297075921230369060
--- !u!114 &114725457980523372
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
m_Script: {fileID: 11500000, guid: 6bb6b867a41448888c1cd4f99643ad71, type: 3}
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!114 &1222199865870203693
m_SensorName: RayPerceptionSensor
m_DetectableTags:
- food
- agent
- wall
- badFood
- frozenAgent
m_RaysPerDirection: 3
m_MaxRayDegrees: 70
m_SphereCastRadius: 0.5
m_RayLength: 50
m_RayLayerMask:
serializedVersion: 2
m_Bits: 4294967291
m_ObservationStacks: 1
rayHitColor: {r: 1, g: 0, b: 0, a: 1}
rayMissColor: {r: 1, g: 1, b: 1, a: 1}
m_StartVerticalOffset: 0
m_EndVerticalOffset: 0
--- !u!114 &8297075921230369060
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 3a6da8f78a394c6ab027688eab81e04d, type: 3}
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
debugCommandLineOverride:
--- !u!114 &6035497842152854922
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!114 &1222199865870203693
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 801669c0cdece6b40b2e741ad0b119ac, type: 3}
m_Script: {fileID: 11500000, guid: 3a6da8f78a394c6ab027688eab81e04d, type: 3}
Name:
CellScaleX: 1
CellScaleZ: 1
GridNumSideX: 40
GridNumSideZ: 40
CellScaleY: 0.01
RotateToAgent: 1
ChannelDepth: 06000000
DetectableObjects:
- food
- agent
- wall
- badFood
- frozenAgent
ObserveMask:
serializedVersion: 2
m_Bits: 307
gridDepthType: 1
rootReference: {fileID: 0}
ObservationPerCell: 0
NumberOfObservations: 0
ChannelOffsets:
DebugColors:
- {r: 0.4039216, g: 0.7372549, b: 0.41960788, a: 0}
- {r: 0.12941177, g: 0.5882353, b: 0.95294124, a: 0}
- {r: 0.3921569, g: 0.3921569, b: 0.3921569, a: 0}
- {r: 0.74509805, g: 0.227451, b: 0.15294118, a: 0}
- {r: 0, g: 0, b: 0, a: 0}
GizmoYOffset: 0
ShowGizmos: 0
CompressionType: 1
debugCommandLineOverride:
--- !u!1 &1482701732800114
GameObject:
m_ObjectHideFlags: 0

- component: {fileID: 54504078365531932}
- component: {fileID: 114522573150607728}
- component: {fileID: 114711827726849508}
- component: {fileID: 114443152683847924}
- component: {fileID: 3067525015186813280}
m_Layer: 0
m_Name: Agent (1)
m_TagString: agent

m_Name:
m_EditorClassIdentifier:
m_BrainParameters:
VectorObservationSize: 0
VectorObservationSize: 4
NumStackedVectorObservations: 1
m_ActionSpec:
m_NumContinuousActions: 3

VectorActionSpaceType: 0
VectorActionSpaceType: 1
m_Model: {fileID: 11400000, guid: 75910f45f20be49b18e2b95879a217b2, type: 3}
m_Model: {fileID: 11400000, guid: 3210b528a2bc44a86bd6bd1d571070f8, type: 3}
m_BehaviorName: GridFoodCollector
m_BehaviorName: FoodCollector
TeamId: 0
m_UseChildSensors: 1
m_UseChildActuators: 1

frozenMaterial: {fileID: 2100000, guid: 66163cf35956a4be08e801b750c26f33, type: 2}
myLaser: {fileID: 1941433838307300}
contribute: 0
useVectorObs: 0
useVectorObs: 1
--- !u!114 &259154752087955944
--- !u!114 &114443152683847924
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
m_Script: {fileID: 11500000, guid: 6bb6b867a41448888c1cd4f99643ad71, type: 3}
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!114 &3067525015186813280
m_SensorName: RayPerceptionSensor
m_DetectableTags:
- food
- agent
- wall
- badFood
- frozenAgent
m_RaysPerDirection: 3
m_MaxRayDegrees: 70
m_SphereCastRadius: 0.5
m_RayLength: 50
m_RayLayerMask:
serializedVersion: 2
m_Bits: 4294967291
m_ObservationStacks: 1
rayHitColor: {r: 1, g: 0, b: 0, a: 1}
rayMissColor: {r: 1, g: 1, b: 1, a: 1}
m_StartVerticalOffset: 0
m_EndVerticalOffset: 0
--- !u!114 &259154752087955944
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 801669c0cdece6b40b2e741ad0b119ac, type: 3}
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
Name:
CellScaleX: 1
CellScaleZ: 1
GridNumSideX: 40
GridNumSideZ: 40
CellScaleY: 0.01
RotateToAgent: 1
ChannelDepth: 06000000
DetectableObjects:
- food
- agent
- wall
- badFood
- frozenAgent
ObserveMask:
serializedVersion: 2
m_Bits: 307
gridDepthType: 1
rootReference: {fileID: 0}
ObservationPerCell: 0
NumberOfObservations: 0
ChannelOffsets:
DebugColors:
- {r: 0.4039216, g: 0.7372549, b: 0.41960788, a: 0}
- {r: 0.12941177, g: 0.5882353, b: 0.95294124, a: 0}
- {r: 0.3921569, g: 0.3921569, b: 0.3921569, a: 0}
- {r: 0.74509805, g: 0.227451, b: 0.15294118, a: 0}
- {r: 0, g: 0, b: 0, a: 0}
GizmoYOffset: 0
ShowGizmos: 0
CompressionType: 1
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!1 &1528397385587768
GameObject:
m_ObjectHideFlags: 0

- component: {fileID: 54961653455021136}
- component: {fileID: 114980787530065684}
- component: {fileID: 114542632553128056}
- component: {fileID: 114986980423924774}
- component: {fileID: 8466013622553267624}
m_Layer: 0
m_Name: Agent (2)
m_TagString: agent

m_Name:
m_EditorClassIdentifier:
m_BrainParameters:
VectorObservationSize: 0
VectorObservationSize: 4
NumStackedVectorObservations: 1
m_ActionSpec:
m_NumContinuousActions: 3

VectorActionSpaceType: 0
VectorActionSpaceType: 1
m_Model: {fileID: 11400000, guid: 75910f45f20be49b18e2b95879a217b2, type: 3}
m_Model: {fileID: 11400000, guid: 3210b528a2bc44a86bd6bd1d571070f8, type: 3}
m_BehaviorName: GridFoodCollector
m_BehaviorName: FoodCollector
TeamId: 0
m_UseChildSensors: 1
m_UseChildActuators: 1

frozenMaterial: {fileID: 2100000, guid: 66163cf35956a4be08e801b750c26f33, type: 2}
myLaser: {fileID: 1421240237750412}
contribute: 0
useVectorObs: 0
useVectorObs: 1
--- !u!114 &5519119940433428255
--- !u!114 &114986980423924774
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
m_Script: {fileID: 11500000, guid: 6bb6b867a41448888c1cd4f99643ad71, type: 3}
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!114 &8466013622553267624
m_SensorName: RayPerceptionSensor
m_DetectableTags:
- food
- agent
- wall
- badFood
- frozenAgent
m_RaysPerDirection: 3
m_MaxRayDegrees: 70
m_SphereCastRadius: 0.5
m_RayLength: 50
m_RayLayerMask:
serializedVersion: 2
m_Bits: 4294967291
m_ObservationStacks: 1
rayHitColor: {r: 1, g: 0, b: 0, a: 1}
rayMissColor: {r: 1, g: 1, b: 1, a: 1}
m_StartVerticalOffset: 0
m_EndVerticalOffset: 0
--- !u!114 &5519119940433428255
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 801669c0cdece6b40b2e741ad0b119ac, type: 3}
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
Name:
CellScaleX: 1
CellScaleZ: 1
GridNumSideX: 40
GridNumSideZ: 40
CellScaleY: 0.01
RotateToAgent: 1
ChannelDepth: 06000000
DetectableObjects:
- food
- agent
- wall
- badFood
- frozenAgent
ObserveMask:
serializedVersion: 2
m_Bits: 307
gridDepthType: 1
rootReference: {fileID: 0}
ObservationPerCell: 0
NumberOfObservations: 0
ChannelOffsets:
DebugColors:
- {r: 0.4039216, g: 0.7372549, b: 0.41960788, a: 0}
- {r: 0.12941177, g: 0.5882353, b: 0.95294124, a: 0}
- {r: 0.3921569, g: 0.3921569, b: 0.3921569, a: 0}
- {r: 0.74509805, g: 0.227451, b: 0.15294118, a: 0}
- {r: 0, g: 0, b: 0, a: 0}
GizmoYOffset: 0
ShowGizmos: 0
CompressionType: 1
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!1 &1617924810425504
GameObject:
m_ObjectHideFlags: 0

- component: {fileID: 54819001862035794}
- component: {fileID: 114878550018296316}
- component: {fileID: 114189751434580810}
- component: {fileID: 114644889237473510}
- component: {fileID: 6247312751399400490}
m_Layer: 0
m_Name: Agent (4)
m_TagString: agent

m_Name:
m_EditorClassIdentifier:
m_BrainParameters:
VectorObservationSize: 0
VectorObservationSize: 4
NumStackedVectorObservations: 1
m_ActionSpec:
m_NumContinuousActions: 3

VectorActionSpaceType: 0
VectorActionSpaceType: 1
m_Model: {fileID: 11400000, guid: 75910f45f20be49b18e2b95879a217b2, type: 3}
m_Model: {fileID: 11400000, guid: 3210b528a2bc44a86bd6bd1d571070f8, type: 3}
m_BehaviorName: GridFoodCollector
m_BehaviorName: FoodCollector
TeamId: 0
m_UseChildSensors: 1
m_UseChildActuators: 1

frozenMaterial: {fileID: 2100000, guid: 66163cf35956a4be08e801b750c26f33, type: 2}
myLaser: {fileID: 1617924810425504}
contribute: 0
useVectorObs: 0
useVectorObs: 1
--- !u!114 &5884750436653390196
--- !u!114 &114644889237473510
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
m_Script: {fileID: 11500000, guid: 6bb6b867a41448888c1cd4f99643ad71, type: 3}
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!114 &6247312751399400490
m_SensorName: RayPerceptionSensor
m_DetectableTags:
- food
- agent
- wall
- badFood
- frozenAgent
m_RaysPerDirection: 3
m_MaxRayDegrees: 70
m_SphereCastRadius: 0.5
m_RayLength: 50
m_RayLayerMask:
serializedVersion: 2
m_Bits: 4294967291
m_ObservationStacks: 1
rayHitColor: {r: 1, g: 0, b: 0, a: 1}
rayMissColor: {r: 1, g: 1, b: 1, a: 1}
m_StartVerticalOffset: 0
m_EndVerticalOffset: 0
--- !u!114 &5884750436653390196
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 801669c0cdece6b40b2e741ad0b119ac, type: 3}
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
Name:
CellScaleX: 1
CellScaleZ: 1
GridNumSideX: 40
GridNumSideZ: 40
CellScaleY: 0.01
RotateToAgent: 1
ChannelDepth: 06000000
DetectableObjects:
- food
- agent
- wall
- badFood
- frozenAgent
ObserveMask:
serializedVersion: 2
m_Bits: 307
gridDepthType: 1
rootReference: {fileID: 0}
ObservationPerCell: 0
NumberOfObservations: 0
ChannelOffsets:
DebugColors:
- {r: 0.4039216, g: 0.7372549, b: 0.41960788, a: 0}
- {r: 0.12941177, g: 0.5882353, b: 0.95294124, a: 0}
- {r: 0.3921569, g: 0.3921569, b: 0.3921569, a: 0}
- {r: 0.74509805, g: 0.227451, b: 0.15294118, a: 0}
- {r: 0, g: 0, b: 0, a: 0}
GizmoYOffset: 0
ShowGizmos: 0
CompressionType: 1
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!1 &1688105343773098
GameObject:
m_ObjectHideFlags: 0

- component: {fileID: 54895479068989492}
- component: {fileID: 114035338027591536}
- component: {fileID: 114235147148547996}
- component: {fileID: 114276061479012222}
- component: {fileID: 5837508007780682603}
m_Layer: 0
m_Name: Agent (3)
m_TagString: agent

m_Name:
m_EditorClassIdentifier:
m_BrainParameters:
VectorObservationSize: 0
VectorObservationSize: 4
NumStackedVectorObservations: 1
m_ActionSpec:
m_NumContinuousActions: 3

VectorActionSpaceType: 0
VectorActionSpaceType: 1
m_Model: {fileID: 11400000, guid: 75910f45f20be49b18e2b95879a217b2, type: 3}
m_Model: {fileID: 11400000, guid: 3210b528a2bc44a86bd6bd1d571070f8, type: 3}
m_BehaviorName: GridFoodCollector
m_BehaviorName: FoodCollector
TeamId: 0
m_UseChildSensors: 1
m_UseChildActuators: 1

frozenMaterial: {fileID: 2100000, guid: 66163cf35956a4be08e801b750c26f33, type: 2}
myLaser: {fileID: 1045923826166930}
contribute: 0
useVectorObs: 0
useVectorObs: 1
--- !u!114 &4768752321433982785
--- !u!114 &114276061479012222
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
m_Script: {fileID: 11500000, guid: 6bb6b867a41448888c1cd4f99643ad71, type: 3}
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!114 &5837508007780682603
m_SensorName: RayPerceptionSensor
m_DetectableTags:
- food
- agent
- wall
- badFood
- frozenAgent
m_RaysPerDirection: 3
m_MaxRayDegrees: 70
m_SphereCastRadius: 0.5
m_RayLength: 50
m_RayLayerMask:
serializedVersion: 2
m_Bits: 4294967291
m_ObservationStacks: 1
rayHitColor: {r: 1, g: 0, b: 0, a: 1}
rayMissColor: {r: 1, g: 1, b: 1, a: 1}
m_StartVerticalOffset: 0
m_EndVerticalOffset: 0
--- !u!114 &4768752321433982785
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 801669c0cdece6b40b2e741ad0b119ac, type: 3}
m_Script: {fileID: 11500000, guid: 3a5c9d521e5ef4759a8246a07d52221e, type: 3}
Name:
CellScaleX: 1
CellScaleZ: 1
GridNumSideX: 40
GridNumSideZ: 40
CellScaleY: 0.01
RotateToAgent: 1
ChannelDepth: 06000000
DetectableObjects:
- food
- agent
- wall
- badFood
- frozenAgent
ObserveMask:
serializedVersion: 2
m_Bits: 307
gridDepthType: 1
rootReference: {fileID: 0}
ObservationPerCell: 0
NumberOfObservations: 0
ChannelOffsets:
DebugColors:
- {r: 0.4039216, g: 0.7372549, b: 0.41960788, a: 0}
- {r: 0.12941177, g: 0.5882353, b: 0.95294124, a: 0}
- {r: 0.3921569, g: 0.3921569, b: 0.3921569, a: 0}
- {r: 0.74509805, g: 0.227451, b: 0.15294118, a: 0}
- {r: 0, g: 0, b: 0, a: 0}
GizmoYOffset: 0
ShowGizmos: 0
CompressionType: 1
DecisionPeriod: 5
TakeActionsBetweenDecisions: 1
--- !u!1 &1729825611722018
GameObject:
m_ObjectHideFlags: 0

- component: {fileID: 4688212428263696}
- component: {fileID: 114181230191376748}
m_Layer: 0
m_Name: GridFoodCollectorArea
m_Name: FoodCollectorArea
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0

5
Project/Assets/ML-Agents/Examples/FoodCollector/Scenes/FoodCollector.unity.meta


fileFormatVersion: 2
guid: 74aeee1f5073c4998840fc784793f1ef
guid: 11583205ab5b74bb4bb1b9951cf9e437
timeCreated: 1506808980
licenseType: Pro
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

862
Project/Assets/ML-Agents/Examples/FoodCollector/Scenes/FoodCollector.unity


m_ReflectionIntensity: 1
m_CustomReflection: {fileID: 0}
m_Sun: {fileID: 0}
m_IndirectSpecularColor: {r: 0.44971168, g: 0.4997775, b: 0.57563686, a: 1}
m_IndirectSpecularColor: {r: 0.4497121, g: 0.49977785, b: 0.57563704, a: 1}
m_UseRadianceAmbientProbe: 0
--- !u!157 &3
LightmapSettings:

m_PVRFilterTypeDirect: 0
m_PVRFilterTypeIndirect: 0
m_PVRFilterTypeAO: 0
m_PVRFilteringMode: 2
m_PVRFilteringMode: 1
m_PVRCulling: 1
m_PVRFilteringGaussRadiusDirect: 1
m_PVRFilteringGaussRadiusIndirect: 5

debug:
m_Flags: 0
m_NavMeshData: {fileID: 0}
--- !u!1001 &190823800
--- !u!1001 &89545475
PrefabInstance:
m_ObjectHideFlags: 0
serializedVersion: 2

- target: {fileID: 1819751139121548, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 1819751139121548, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
value: GridFoodCollectorArea
value: FoodCollectorArea (1)
- target: {fileID: 4137908820211030, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.x
value: -17.2
objectReference: {fileID: 0}
- target: {fileID: 4259834826122778, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.x
value: -23.9
objectReference: {fileID: 0}
- target: {fileID: 4419274671784554, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.x
value: -8.9
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
value: 0
value: -50
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
value: 6
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.z
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4756368533889646, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.x
value: -30.4
objectReference: {fileID: 0}
- target: {fileID: 4756368533889646, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.z
value: -9.9
objectReference: {fileID: 0}
- target: {fileID: 3067525015186813280, guid: b5339e4b990ade14f992aadf3bf8591b,
type: 3}
propertyPath: NumCollidersPerCell
value: 1
objectReference: {fileID: 0}
- target: {fileID: 3067525015186813280, guid: b5339e4b990ade14f992aadf3bf8591b,
type: 3}
propertyPath: EstimatedMaxCollidersPerCell
value: 4
objectReference: {fileID: 0}
- target: {fileID: 5837508007780682603, guid: b5339e4b990ade14f992aadf3bf8591b,
type: 3}
propertyPath: ChannelOffsets.Array.size
value: 1
objectReference: {fileID: 0}
- target: {fileID: 5837508007780682603, guid: b5339e4b990ade14f992aadf3bf8591b,
type: 3}
propertyPath: ShowGizmos
value: 0
objectReference: {fileID: 0}
- target: {fileID: 5837508007780682603, guid: b5339e4b990ade14f992aadf3bf8591b,
type: 3}
propertyPath: ObservationPerCell
value: 6
objectReference: {fileID: 0}
- target: {fileID: 5837508007780682603, guid: b5339e4b990ade14f992aadf3bf8591b,
type: 3}
propertyPath: NumberOfObservations
value: 9600
value: 7
- target: {fileID: 5837508007780682603, guid: b5339e4b990ade14f992aadf3bf8591b,
type: 3}
propertyPath: m_Enabled
value: 1
objectReference: {fileID: 0}
- target: {fileID: 5837508007780682603, guid: b5339e4b990ade14f992aadf3bf8591b,
type: 3}
propertyPath: rootReference
value:
objectReference: {fileID: 190823801}
m_SourcePrefab: {fileID: 100100000, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
--- !u!1 &190823801 stripped
GameObject:
m_CorrespondingSourceObject: {fileID: 1706274796045088, guid: b5339e4b990ade14f992aadf3bf8591b,
type: 3}
m_PrefabInstance: {fileID: 190823800}
m_PrefabAsset: {fileID: 0}
--- !u!1001 &392794583
m_SourcePrefab: {fileID: 100100000, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
--- !u!1001 &269100759
PrefabInstance:
m_ObjectHideFlags: 0
serializedVersion: 2

- target: {fileID: 1819751139121548, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 1819751139121548, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
value: GridFoodCollectorArea (1)
value: FoodCollectorArea (3)
- target: {fileID: 1819751139121548, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
value: -50
value: -150
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
value: 7
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.z
value: 0
value: 9
m_SourcePrefab: {fileID: 100100000, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
--- !u!1 &625137506
m_SourcePrefab: {fileID: 100100000, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
--- !u!1 &273651478
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Component:
- component: {fileID: 625137507}
- component: {fileID: 625137509}
- component: {fileID: 625137508}
- component: {fileID: 273651479}
- component: {fileID: 273651481}
- component: {fileID: 273651480}
m_Layer: 5
m_Name: Text
m_TagString: Untagged

m_IsActive: 1
--- !u!224 &625137507
--- !u!224 &273651479
m_GameObject: {fileID: 625137506}
m_GameObject: {fileID: 273651478}
m_Father: {fileID: 965533424}
m_Father: {fileID: 1799584681}
m_RootOrder: 0
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0, y: 0}

m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &625137508
--- !u!114 &273651480
m_GameObject: {fileID: 625137506}
m_GameObject: {fileID: 273651478}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 708705254, guid: f70555f144d8491a825f0804e09c671c, type: 3}

m_VerticalOverflow: 0
m_LineSpacing: 1
m_Text: NOM
--- !u!222 &625137509
--- !u!222 &273651481
m_GameObject: {fileID: 625137506}
m_GameObject: {fileID: 273651478}
--- !u!1 &378228137
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 378228141}
- component: {fileID: 378228140}
- component: {fileID: 378228139}
- component: {fileID: 378228138}
m_Layer: 5
m_Name: Canvas
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!114 &378228138
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 378228137}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 1301386320, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_Name:
m_EditorClassIdentifier:
m_IgnoreReversedGraphics: 1
m_BlockingObjects: 0
m_BlockingMask:
serializedVersion: 2
m_Bits: 4294967295
--- !u!114 &378228139
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 378228137}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 1980459831, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_Name:
m_EditorClassIdentifier:
m_UiScaleMode: 1
m_ReferencePixelsPerUnit: 100
m_ScaleFactor: 1
m_ReferenceResolution: {x: 800, y: 600}
m_ScreenMatchMode: 0
m_MatchWidthOrHeight: 0.5
m_PhysicalUnit: 3
m_FallbackScreenDPI: 96
m_DefaultSpriteDPI: 96
m_DynamicPixelsPerUnit: 1
--- !u!223 &378228140
Canvas:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 378228137}
m_Enabled: 1
serializedVersion: 3
m_RenderMode: 0
m_Camera: {fileID: 0}
m_PlaneDistance: 100
m_PixelPerfect: 0
m_ReceivesEvents: 1
m_OverrideSorting: 0
m_OverridePixelPerfect: 0
m_SortingBucketNormalizedSize: 0
m_AdditionalShaderChannelsFlag: 0
m_SortingLayerID: 0
m_SortingOrder: 0
m_TargetDisplay: 0
--- !u!224 &378228141
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 378228137}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 0, y: 0, z: 0}
m_Children:
- {fileID: 1799584681}
- {fileID: 1086444498}
m_Father: {fileID: 0}
m_RootOrder: 2
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0, y: 0}
m_AnchorMax: {x: 0, y: 0}
m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 0, y: 0}
m_Pivot: {x: 0, y: 0}
--- !u!1 &499540684
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 499540687}
- component: {fileID: 499540686}
- component: {fileID: 499540685}
m_Layer: 0
m_Name: EventSystem
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!114 &499540685
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 499540684}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 1077351063, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_Name:
m_EditorClassIdentifier:
m_HorizontalAxis: Horizontal
m_VerticalAxis: Vertical
m_SubmitButton: Submit
m_CancelButton: Cancel
m_InputActionsPerSecond: 10
m_RepeatDelay: 0.5
m_ForceModuleActive: 0
--- !u!114 &499540686
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 499540684}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: -619905303, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_Name:
m_EditorClassIdentifier:
m_FirstSelected: {fileID: 0}
m_sendNavigationEvents: 1
m_DragThreshold: 5
--- !u!4 &499540687
Transform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 499540684}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 4
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
--- !u!1001 &587417076
PrefabInstance:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 1819751139121548, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_Name
value: FoodCollectorArea (2)
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalPosition.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalPosition.y
value: -100
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalPosition.z
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalRotation.x
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalRotation.y
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalRotation.z
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalRotation.w
value: 1
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_RootOrder
value: 8
objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
--- !u!1001 &916917435
PrefabInstance:
m_ObjectHideFlags: 0

objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: 5889392e3f05b448a8a06c5def6c2dec, type: 3}
--- !u!1 &965533423
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 965533424}
- component: {fileID: 965533426}
- component: {fileID: 965533425}
m_Layer: 5
m_Name: Panel
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 0
--- !u!224 &965533424
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 965533423}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children:
- {fileID: 625137507}
m_Father: {fileID: 1064449898}
m_RootOrder: 0
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0, y: 0}
m_AnchorMax: {x: 1, y: 1}
m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 0, y: 0}
m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &965533425
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 965533423}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: -765806418, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_Name:
m_EditorClassIdentifier:
m_Material: {fileID: 0}
m_Color: {r: 0, g: 0, b: 0, a: 0.472}
m_RaycastTarget: 1
m_OnCullStateChanged:
m_PersistentCalls:
m_Calls: []
m_Sprite: {fileID: 10907, guid: 0000000000000000f000000000000000, type: 0}
m_Type: 1
m_PreserveAspect: 0
m_FillCenter: 1
m_FillMethod: 4
m_FillAmount: 1
m_FillClockwise: 1
m_FillOrigin: 0
m_UseSpriteMesh: 0
--- !u!222 &965533426
CanvasRenderer:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 965533423}
m_CullTransparentMesh: 0
--- !u!1 &1009000883
GameObject:
m_ObjectHideFlags: 0

m_OcclusionCulling: 1
m_StereoConvergence: 10
m_StereoSeparation: 0.022
--- !u!1001 &1043871087
PrefabInstance:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 1819751139121548, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_Name
value: GridFoodCollectorArea (2)
objectReference: {fileID: 0}
- target: {fileID: 1819751139121548, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.y
value: -100
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.z
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalRotation.x
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalRotation.y
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalRotation.z
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalRotation.w
value: 1
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_RootOrder
value: 8
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.z
value: 0
objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
--- !u!1 &1064449894
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
serializedVersion: 6
m_Component:
- component: {fileID: 1064449898}
- component: {fileID: 1064449897}
- component: {fileID: 1064449896}
- component: {fileID: 1064449895}
m_Layer: 5
m_Name: Canvas
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0
m_StaticEditorFlags: 0
m_IsActive: 1
--- !u!114 &1064449895
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1064449894}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 1301386320, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_Name:
m_EditorClassIdentifier:
m_IgnoreReversedGraphics: 1
m_BlockingObjects: 0
m_BlockingMask:
serializedVersion: 2
m_Bits: 4294967295
--- !u!114 &1064449896
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1064449894}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 1980459831, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_Name:
m_EditorClassIdentifier:
m_UiScaleMode: 1
m_ReferencePixelsPerUnit: 100
m_ScaleFactor: 1
m_ReferenceResolution: {x: 800, y: 600}
m_ScreenMatchMode: 0
m_MatchWidthOrHeight: 0.5
m_PhysicalUnit: 3
m_FallbackScreenDPI: 96
m_DefaultSpriteDPI: 96
m_DynamicPixelsPerUnit: 1
--- !u!223 &1064449897
Canvas:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1064449894}
m_Enabled: 1
serializedVersion: 3
m_RenderMode: 0
m_Camera: {fileID: 0}
m_PlaneDistance: 100
m_PixelPerfect: 0
m_ReceivesEvents: 1
m_OverrideSorting: 0
m_OverridePixelPerfect: 0
m_SortingBucketNormalizedSize: 0
m_AdditionalShaderChannelsFlag: 0
m_SortingLayerID: 0
m_SortingOrder: 0
m_TargetDisplay: 0
--- !u!224 &1064449898
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1064449894}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 0, y: 0, z: 0}
m_Children:
- {fileID: 965533424}
- {fileID: 1418304525}
m_Father: {fileID: 0}
m_RootOrder: 2
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0, y: 0}
m_AnchorMax: {x: 0, y: 0}
m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 0, y: 0}
m_Pivot: {x: 0, y: 0}
--- !u!1 &1418304524
--- !u!1 &1086444495
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Component:
- component: {fileID: 1418304525}
- component: {fileID: 1418304527}
- component: {fileID: 1418304526}
- component: {fileID: 1086444498}
- component: {fileID: 1086444497}
- component: {fileID: 1086444496}
m_Layer: 5
m_Name: Text
m_TagString: Untagged

m_IsActive: 1
--- !u!224 &1418304525
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1418304524}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 1064449898}
m_RootOrder: 1
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0.5, y: 0.5}
m_AnchorMax: {x: 0.5, y: 0.5}
m_AnchoredPosition: {x: -1000, y: -239.57645}
m_SizeDelta: {x: 160, y: 30}
m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &1418304526
--- !u!114 &1086444496
m_GameObject: {fileID: 1418304524}
m_GameObject: {fileID: 1086444495}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 708705254, guid: f70555f144d8491a825f0804e09c671c, type: 3}

m_VerticalOverflow: 0
m_LineSpacing: 1
m_Text: New Text
--- !u!222 &1418304527
--- !u!222 &1086444497
m_GameObject: {fileID: 1418304524}
m_GameObject: {fileID: 1086444495}
--- !u!224 &1086444498
RectTransform:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1086444495}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 378228141}
m_RootOrder: 1
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0.5, y: 0.5}
m_AnchorMax: {x: 0.5, y: 0.5}
m_AnchoredPosition: {x: -1000, y: -239.57645}
m_SizeDelta: {x: 160, y: 30}
m_Pivot: {x: 0.5, y: 0.5}
--- !u!1001 &1142607725
PrefabInstance:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalPosition.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalPosition.y
value: 12.3
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalPosition.z
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalRotation.x
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalRotation.y
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalRotation.z
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_LocalRotation.w
value: 1
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
propertyPath: m_RootOrder
value: 6
objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: 38400a68c4ea54b52998e34ee238d1a7, type: 3}
--- !u!1 &1574236047
GameObject:
m_ObjectHideFlags: 0

agents: []
listArea: []
totalScore: 0
scoreText: {fileID: 1418304526}
scoreText: {fileID: 1086444496}
--- !u!4 &1574236049
Transform:
m_ObjectHideFlags: 0

m_Father: {fileID: 0}
m_RootOrder: 3
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
--- !u!1 &1956702417
--- !u!1 &1799584680
GameObject:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}

m_Component:
- component: {fileID: 1956702420}
- component: {fileID: 1956702419}
- component: {fileID: 1956702418}
m_Layer: 0
m_Name: EventSystem
- component: {fileID: 1799584681}
- component: {fileID: 1799584683}
- component: {fileID: 1799584682}
m_Layer: 5
m_Name: Panel
m_IsActive: 1
--- !u!114 &1956702418
MonoBehaviour:
m_IsActive: 0
--- !u!224 &1799584681
RectTransform:
m_GameObject: {fileID: 1956702417}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 1077351063, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_Name:
m_EditorClassIdentifier:
m_HorizontalAxis: Horizontal
m_VerticalAxis: Vertical
m_SubmitButton: Submit
m_CancelButton: Cancel
m_InputActionsPerSecond: 10
m_RepeatDelay: 0.5
m_ForceModuleActive: 0
--- !u!114 &1956702419
m_GameObject: {fileID: 1799584680}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children:
- {fileID: 273651479}
m_Father: {fileID: 378228141}
m_RootOrder: 0
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
m_AnchorMin: {x: 0, y: 0}
m_AnchorMax: {x: 1, y: 1}
m_AnchoredPosition: {x: 0, y: 0}
m_SizeDelta: {x: 0, y: 0}
m_Pivot: {x: 0.5, y: 0.5}
--- !u!114 &1799584682
m_GameObject: {fileID: 1956702417}
m_GameObject: {fileID: 1799584680}
m_Script: {fileID: -619905303, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_Script: {fileID: -765806418, guid: f70555f144d8491a825f0804e09c671c, type: 3}
m_FirstSelected: {fileID: 0}
m_sendNavigationEvents: 1
m_DragThreshold: 5
--- !u!4 &1956702420
Transform:
m_Material: {fileID: 0}
m_Color: {r: 0, g: 0, b: 0, a: 0.472}
m_RaycastTarget: 1
m_OnCullStateChanged:
m_PersistentCalls:
m_Calls: []
m_Sprite: {fileID: 10907, guid: 0000000000000000f000000000000000, type: 0}
m_Type: 1
m_PreserveAspect: 0
m_FillCenter: 1
m_FillMethod: 4
m_FillAmount: 1
m_FillClockwise: 1
m_FillOrigin: 0
m_UseSpriteMesh: 0
--- !u!222 &1799584683
CanvasRenderer:
m_GameObject: {fileID: 1956702417}
m_LocalRotation: {x: 0, y: 0, z: 0, w: 1}
m_LocalPosition: {x: 0, y: 0, z: 0}
m_LocalScale: {x: 1, y: 1, z: 1}
m_Children: []
m_Father: {fileID: 0}
m_RootOrder: 4
m_LocalEulerAnglesHint: {x: 0, y: 0, z: 0}
--- !u!1001 &1985725465
PrefabInstance:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Modification:
m_TransformParent: {fileID: 0}
m_Modifications:
- target: {fileID: 1819751139121548, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_Name
value: GridFoodCollectorArea (3)
objectReference: {fileID: 0}
- target: {fileID: 1819751139121548, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_IsActive
value: 1
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.y
value: -150
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalPosition.z
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalRotation.x
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalRotation.y
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalRotation.z
value: -0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalRotation.w
value: 1
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_RootOrder
value: 9
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.x
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.y
value: 0
objectReference: {fileID: 0}
- target: {fileID: 4688212428263696, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
propertyPath: m_LocalEulerAnglesHint.z
value: 0
objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: b5339e4b990ade14f992aadf3bf8591b, type: 3}
m_GameObject: {fileID: 1799584680}
m_CullTransparentMesh: 0
--- !u!1001 &2124876351
PrefabInstance:
m_ObjectHideFlags: 0

2
Project/Assets/ML-Agents/Examples/Crawler/Demos/ExpCrawlerDynVS.demo.meta


guid: 34586a8d0f1c342a49973b36a609e73b
ScriptedImporter:
fileIDToRecycleName:
11400002: Assets/ML-Agents/Examples/Crawler/Demos/ExpertCrawler.demo
11400002: Assets/ML-Agents/Examples/Crawler/Demos/ExpCrawlerDynVS.demo
externalObjects: {}
userData: ' (Unity.MLAgents.Demonstrations.DemonstrationSummary)'
assetBundleName:

7
Project/Assets/ML-Agents/Examples/Crawler/Scenes/CrawlerDynamicVariableSpeed.unity


m_Name:
m_EditorClassIdentifier:
target: {fileID: 1018218737}
smoothingTime: 0
--- !u!1001 &1481808307
PrefabInstance:
m_ObjectHideFlags: 0

propertyPath: typeOfCrawler
value: 1
objectReference: {fileID: 0}
- target: {fileID: 3421283062001101770, guid: 0058b366f9d6d44a3ba35beb06b0174b,
type: 3}
propertyPath: TargetPrefab
value:
objectReference: {fileID: 3839136118347789758, guid: 46734abd0de454192b407379c6a4ab8d,
type: 3}
- target: {fileID: 6810587057221831324, guid: 0058b366f9d6d44a3ba35beb06b0174b,
type: 3}
propertyPath: m_LocalPosition.x

95
Project/Assets/ML-Agents/Examples/Crawler/Scripts/CrawlerAgent.cs


[RequireComponent(typeof(JointDriveController))] // Required to set joint forces
public class CrawlerAgent : Agent
{
//The type of crawler behavior we want to use.
//This setting will determine how the agent is set up during initialization.
public enum CrawlerAgentBehaviorType
{
CrawlerDynamic,
CrawlerDynamicVariableSpeed,
CrawlerStatic,
CrawlerStaticVariableSpeed,
CrawlerDynamicVariableSpeedCustomTarget
}
[Tooltip(
"VariableSpeed - The agent will sample random speed magnitudes while training.\n" +
"Dynamic - The agent will run towards a target that changes position.\n" +
"Static - The agent will run towards a static target. "
)]
public CrawlerAgentBehaviorType typeOfCrawler;
//Crawler Brains
//A different brain will be used depending on the CrawlerAgentBehaviorType selected
[Header("NN Models")] public NNModel crawlerDyModel;
public NNModel crawlerDyVSModel;
public NNModel crawlerStModel;
public NNModel crawlerStVSModel;
[Header("Walk Speed")]
[Range(0.1f, m_maxWalkingSpeed)]

set { m_TargetWalkingSpeed = Mathf.Clamp(value, .1f, m_maxWalkingSpeed); }
}
//Should the agent sample a new goal velocity each episode?
//If true, TargetWalkingSpeed will be randomly set between 0.1 and m_maxWalkingSpeed in OnEpisodeBegin()
//If false, the goal velocity will be m_maxWalkingSpeed
private bool m_RandomizeWalkSpeedEachEpisode;
[Header("Target To Walk Towards")]
public Transform TargetPrefab; //Target prefab to use in Dynamic envs
private Transform m_Target; //Target the agent will walk towards during training.
[Header("Target To Walk Towards")] public Transform dynamicTargetPrefab; //Target prefab to use in Dynamic envs
public Transform staticTargetPrefab; //Target prefab to use in Static envs
public Transform m_Target; //Target the agent will walk towards during training.
[Header("Body Parts")] [Space(10)] public Transform body;
public Transform leg0Upper;

public override void Initialize()
{
SpawnTarget(TargetPrefab, transform.position); //spawn target
SetAgentType();
m_OrientationCube = GetComponentInChildren<OrientationCubeController>();
m_DirectionIndicator = GetComponentInChildren<DirectionIndicator>();

}
/// <summary>
/// Set up the agent based on the typeOfCrawler
/// </summary>
void SetAgentType()
{
var behaviorParams = GetComponent<Unity.MLAgents.Policies.BehaviorParameters>();
switch (typeOfCrawler)
{
case CrawlerAgentBehaviorType.CrawlerDynamicVariableSpeedCustomTarget:
{
behaviorParams.BehaviorName = "CrawlerDynamicVariableSpeed"; //set behavior name
if (crawlerDyVSModel)
behaviorParams.Model = crawlerDyVSModel; //assign the model
m_RandomizeWalkSpeedEachEpisode = true; //randomize m_TargetWalkingSpeed during training
break;
}
case CrawlerAgentBehaviorType.CrawlerDynamic:
{
behaviorParams.BehaviorName = "CrawlerDynamic"; //set behavior name
if (crawlerDyModel)
behaviorParams.Model = crawlerDyModel; //assign the model
m_RandomizeWalkSpeedEachEpisode = false; //do not randomize m_TargetWalkingSpeed during training
SpawnTarget(dynamicTargetPrefab, transform.position); //spawn target
break;
}
case CrawlerAgentBehaviorType.CrawlerDynamicVariableSpeed:
{
behaviorParams.BehaviorName = "CrawlerDynamicVariableSpeed"; //set behavior name
if (crawlerDyVSModel)
behaviorParams.Model = crawlerDyVSModel; //assign the model
m_RandomizeWalkSpeedEachEpisode = true; //randomize m_TargetWalkingSpeed during training
SpawnTarget(dynamicTargetPrefab, transform.position); //spawn target
break;
}
case CrawlerAgentBehaviorType.CrawlerStatic:
{
behaviorParams.BehaviorName = "CrawlerStatic"; //set behavior name
if (crawlerStModel)
behaviorParams.Model = crawlerStModel; //assign the model
m_RandomizeWalkSpeedEachEpisode = false; //do not randomize m_TargetWalkingSpeed during training
SpawnTarget(staticTargetPrefab, transform.TransformPoint(new Vector3(0, 0, 1000))); //spawn target
break;
}
case CrawlerAgentBehaviorType.CrawlerStaticVariableSpeed:
{
behaviorParams.BehaviorName = "CrawlerStaticVariableSpeed"; //set behavior name
if (crawlerStVSModel)
behaviorParams.Model = crawlerStVSModel; //assign the model
m_RandomizeWalkSpeedEachEpisode = true; //randomize m_TargetWalkingSpeed during training
SpawnTarget(staticTargetPrefab, transform.TransformPoint(new Vector3(0, 0, 1000))); //spawn target
break;
}
}
}
/// <summary>
/// Loop over body parts and reset them to initial conditions.
/// </summary>
public override void OnEpisodeBegin()

UpdateOrientationObjects();
//Set our goal walking speed
TargetWalkingSpeed = Random.Range(0.1f, m_maxWalkingSpeed);
TargetWalkingSpeed =
m_RandomizeWalkSpeedEachEpisode ? Random.Range(0.1f, m_maxWalkingSpeed) : TargetWalkingSpeed;
}
/// <summary>

13
Project/Assets/ML-Agents/Examples/Crawler/Prefabs/CrawlerBase.prefab


VectorActionDescriptions: []
VectorActionSpaceType: 1
hasUpgradedBrainParametersWithActionSpec: 1
m_Model: {fileID: 11400000, guid: 0d9a992c217a44684b41c7663f3eab3d, type: 3}
m_Model: {fileID: 11400000, guid: c6509001ba679447fba27f894761c3ba, type: 3}
m_BehaviorName: Crawler
m_BehaviorName:
TeamId: 0
m_UseChildSensors: 1
m_UseChildActuators: 1

maxStep: 0
hasUpgradedFromAgentParameters: 1
MaxStep: 5000
typeOfCrawler: 0
crawlerDyModel: {fileID: 11400000, guid: 2dc51465533e7468d8bcafc17250cebf, type: 3}
crawlerDyVSModel: {fileID: 11400000, guid: 0d9a992c217a44684b41c7663f3eab3d, type: 3}
crawlerStModel: {fileID: 11400000, guid: e88b5542c96104c01b56f1ed82d8ccc8, type: 3}
crawlerStVSModel: {fileID: 11400000, guid: e0800a8eb11a34c138fa8186124af9dc, type: 3}
TargetPrefab: {fileID: 3839136118347789758, guid: 46734abd0de454192b407379c6a4ab8d,
dynamicTargetPrefab: {fileID: 3839136118347789758, guid: 46734abd0de454192b407379c6a4ab8d,
type: 3}
staticTargetPrefab: {fileID: 3839136118347789758, guid: 2173d15c0b5fc49e5870c9d1c7f7ee8e,
type: 3}
body: {fileID: 4845971001588102148}
leg0Upper: {fileID: 4845971001327157979}

2
Project/Assets/ML-Agents/Examples/Walker/Demos/ExpertWalkerDyVS.demo.meta


guid: a4b02e2c382c247919eb63ce72e90a3b
ScriptedImporter:
fileIDToRecycleName:
11400002: Assets/ML-Agents/Examples/Walker/Demos/ExpertWalker.demo
11400002: Assets/ML-Agents/Examples/Walker/Demos/ExpertWalkerDyVS.demo
externalObjects: {}
userData: ' (Unity.MLAgents.Demonstrations.DemonstrationSummary)'
assetBundleName:

2
Project/Assets/ML-Agents/Examples/Walker/Prefabs/Ragdoll/WalkerRagdollBase.prefab


- component: {fileID: 895268871377934302}
- component: {fileID: 895268871377934301}
m_Layer: 0
m_Name: WalkerRagdoll
m_Name: WalkerRagdollBase
m_TagString: Untagged
m_Icon: {fileID: 0}
m_NavMeshLayer: 0

14
Project/Assets/ML-Agents/Examples/Match3/Prefabs/Match3VectorObs.prefab


- component: {fileID: 2118285884327540687}
- component: {fileID: 2118285884327540680}
- component: {fileID: 3357012711826686276}
- component: {fileID: 2164669533582273470}
m_Layer: 0
m_Name: Match3 Agent
m_TagString: Untagged

ActuatorName: Match3 Actuator
RandomSeed: -1
ForceHeuristic: 0
--- !u!114 &2164669533582273470
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 2118285884327540673}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 3a6da8f78a394c6ab027688eab81e04d, type: 3}
m_Name:
m_EditorClassIdentifier:
debugCommandLineOverride:

14
Project/Assets/ML-Agents/Examples/Match3/Prefabs/Match3VisualObs.prefab


- component: {fileID: 3019509692332007776}
- component: {fileID: 3019509692332007783}
- component: {fileID: 8270768986451624427}
- component: {fileID: 5564406567458194538}
m_Layer: 0
m_Name: Match3 Agent
m_TagString: Untagged

ActuatorName: Match3 Actuator
RandomSeed: -1
ForceHeuristic: 0
--- !u!114 &5564406567458194538
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 3019509692332007790}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 3a6da8f78a394c6ab027688eab81e04d, type: 3}
m_Name:
m_EditorClassIdentifier:
debugCommandLineOverride:

5
Project/Assets/ML-Agents/Examples/Sorter/Scripts/SorterAgent.cs


int m_NumberOfTilesToSpawn;
int m_MaxNumberOfTiles;
PushBlockSettings m_PushBlockSettings;
Rigidbody m_AgentRb;
// The BufferSensorComponent is the Sensor that allows the Agent to observe

m_MaxNumberOfTiles = k_HighestTileValue;
m_ResetParams = Academy.Instance.EnvironmentParameters;
m_BufferSensor = GetComponent<BufferSensorComponent>();
m_PushBlockSettings = FindObjectOfType<PushBlockSettings>();
m_AgentRb = GetComponent<Rigidbody>();
m_StartingPos = transform.position;
}

}
transform.Rotate(rotateDir, Time.deltaTime * 200f);
m_AgentRb.AddForce(dirToGo * 2, ForceMode.VelocityChange);
m_AgentRb.AddForce(dirToGo * m_PushBlockSettings.agentRunSpeed,
ForceMode.VelocityChange);
}

2
Project/Assets/ML-Agents/Examples/PushBlock/Demos/ExpertPush.demo.meta


guid: 7f11f35191533404c9957443a681aaee
ScriptedImporter:
fileIDToRecycleName:
11400002: Assets/ML-Agents/Examples/PushBlock/Demos/ExpertPushBlock.demo
11400000: Assets/ML-Agents/Examples/PushBlock/Demos/ExpertPush.demo
externalObjects: {}
userData: ' (Unity.MLAgents.Demonstrations.DemonstrationSummary)'
assetBundleName:

4
com.unity.ml-agents/.gitignore


/Assets/Plugins*
/Assets/Demonstrations*
/csharp_timers.json
/Samples/
/Samples.meta
*.api.meta
*.api.meta

4
com.unity.ml-agents/Documentation~/com.unity.ml-agents.md


[unity ML-Agents Toolkit]: https://github.com/Unity-Technologies/ml-agents
[unity inference engine]: https://docs.unity3d.com/Packages/com.unity.barracuda@latest/index.html
[package manager documentation]: https://docs.unity3d.com/Manual/upm-ui-install.html
[installation instructions]: https://github.com/Unity-Technologies/ml-agents/blob/release_14_docs/docs/Installation.md
[installation instructions]: https://github.com/Unity-Technologies/ml-agents/blob/release_13_docs/docs/Installation.md
[ML-Agents GitHub repo]: https://github.com/Unity-Technologies/ml-agents/blob/release_14_docs/com.unity.ml-agents.extensions
[ML-Agents GitHub repo]: https://github.com/Unity-Technologies/ml-agents/blob/release_13_docs/com.unity.ml-agents.extensions

48
com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Observation.cs


byte[] descriptorData = global::System.Convert.FromBase64String(
string.Concat(
"CjRtbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL29ic2VydmF0",
"aW9uLnByb3RvEhRjb21tdW5pY2F0b3Jfb2JqZWN0cyKPAwoQT2JzZXJ2YXRp",
"aW9uLnByb3RvEhRjb21tdW5pY2F0b3Jfb2JqZWN0cyKBAwoQT2JzZXJ2YXRp",
"b25Qcm90bxINCgVzaGFwZRgBIAMoBRJEChBjb21wcmVzc2lvbl90eXBlGAIg",
"ASgOMiouY29tbXVuaWNhdG9yX29iamVjdHMuQ29tcHJlc3Npb25UeXBlUHJv",
"dG8SGQoPY29tcHJlc3NlZF9kYXRhGAMgASgMSAASRgoKZmxvYXRfZGF0YRgE",

"b25fdHlwZRgHIAEoDjIqLmNvbW11bmljYXRvcl9vYmplY3RzLk9ic2VydmF0",
"aW9uVHlwZVByb3RvEgwKBG5hbWUYCCABKAkaGQoJRmxvYXREYXRhEgwKBGRh",
"dGEYASADKAJCEgoQb2JzZXJ2YXRpb25fZGF0YSopChRDb21wcmVzc2lvblR5",
"cGVQcm90bxIICgROT05FEAASBwoDUE5HEAEqRgoUT2JzZXJ2YXRpb25UeXBl",
"UHJvdG8SCwoHREVGQVVMVBAAEggKBEdPQUwQARIKCgZSRVdBUkQQAhILCgdN",
"RVNTQUdFEANCJaoCIlVuaXR5Lk1MQWdlbnRzLkNvbW11bmljYXRvck9iamVj",
"dHNiBnByb3RvMw=="));
"aW9uVHlwZVByb3RvGhkKCUZsb2F0RGF0YRIMCgRkYXRhGAEgAygCQhIKEG9i",
"c2VydmF0aW9uX2RhdGEqKQoUQ29tcHJlc3Npb25UeXBlUHJvdG8SCAoETk9O",
"RRAAEgcKA1BORxABKkYKFE9ic2VydmF0aW9uVHlwZVByb3RvEgsKB0RFRkFV",
"TFQQABIICgRHT0FMEAESCgoGUkVXQVJEEAISCwoHTUVTU0FHRRADQiWqAiJV",
"bml0eS5NTEFnZW50cy5Db21tdW5pY2F0b3JPYmplY3RzYgZwcm90bzM="));
new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.ObservationProto), global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Parser, new[]{ "Shape", "CompressionType", "CompressedData", "FloatData", "CompressedChannelMapping", "DimensionProperties", "ObservationType", "Name" }, new[]{ "ObservationData" }, null, new pbr::GeneratedClrTypeInfo[] { new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Types.FloatData), global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Types.FloatData.Parser, new[]{ "Data" }, null, null, null)})
new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.ObservationProto), global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Parser, new[]{ "Shape", "CompressionType", "CompressedData", "FloatData", "CompressedChannelMapping", "DimensionProperties", "ObservationType" }, new[]{ "ObservationData" }, null, new pbr::GeneratedClrTypeInfo[] { new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Types.FloatData), global::Unity.MLAgents.CommunicatorObjects.ObservationProto.Types.FloatData.Parser, new[]{ "Data" }, null, null, null)})
}));
}
#endregion

compressedChannelMapping_ = other.compressedChannelMapping_.Clone();
dimensionProperties_ = other.dimensionProperties_.Clone();
observationType_ = other.observationType_;
name_ = other.name_;
switch (other.ObservationDataCase) {
case ObservationDataOneofCase.CompressedData:
CompressedData = other.CompressedData;

}
}
/// <summary>Field number for the "name" field.</summary>
public const int NameFieldNumber = 8;
private string name_ = "";
/// <summary>
/// Optional name of the observation.
/// This will be set to the ISensor name when writing,
/// and read into the ObservationSpec in the low-level API
/// </summary>
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public string Name {
get { return name_; }
set {
name_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
}
}
private object observationData_;
/// <summary>Enum of possible cases for the "observation_data" oneof.</summary>
public enum ObservationDataOneofCase {

if(!compressedChannelMapping_.Equals(other.compressedChannelMapping_)) return false;
if(!dimensionProperties_.Equals(other.dimensionProperties_)) return false;
if (ObservationType != other.ObservationType) return false;
if (Name != other.Name) return false;
if (ObservationDataCase != other.ObservationDataCase) return false;
return Equals(_unknownFields, other._unknownFields);
}

hash ^= compressedChannelMapping_.GetHashCode();
hash ^= dimensionProperties_.GetHashCode();
if (ObservationType != 0) hash ^= ObservationType.GetHashCode();
if (Name.Length != 0) hash ^= Name.GetHashCode();
hash ^= (int) observationDataCase_;
if (_unknownFields != null) {
hash ^= _unknownFields.GetHashCode();

output.WriteRawTag(56);
output.WriteEnum((int) ObservationType);
}
if (Name.Length != 0) {
output.WriteRawTag(66);
output.WriteString(Name);
}
if (_unknownFields != null) {
_unknownFields.WriteTo(output);
}

if (ObservationType != 0) {
size += 1 + pb::CodedOutputStream.ComputeEnumSize((int) ObservationType);
}
if (Name.Length != 0) {
size += 1 + pb::CodedOutputStream.ComputeStringSize(Name);
}
if (_unknownFields != null) {
size += _unknownFields.CalculateSize();
}

dimensionProperties_.Add(other.dimensionProperties_);
if (other.ObservationType != 0) {
ObservationType = other.ObservationType;
}
if (other.Name.Length != 0) {
Name = other.Name;
}
switch (other.ObservationDataCase) {
case ObservationDataOneofCase.CompressedData:

}
case 56: {
observationType_ = (global::Unity.MLAgents.CommunicatorObjects.ObservationTypeProto) input.ReadEnum();
break;
}
case 66: {
Name = input.ReadString();
break;
}
}

2
com.unity.ml-agents/Runtime/MultiAgentGroupIdCounter.cs.meta


fileFormatVersion: 2
guid: 3744ac27d956e43e1a39c7ba2550ab82
guid: 5661ffdb6c7704e84bc785572dcd5bd1
MonoImporter:
externalObjects: {}
serializedVersion: 2

部分文件因为文件数量过多而无法显示

正在加载...
取消
保存