浏览代码

Merge branch 'master' into develop-critic-optimizer

/develop/critic-op-lstm-currentmem
Andrew Cohen 3 年前
当前提交
dc8e8494
共有 69 个文件被更改,包括 5409 次插入248 次删除
  1. 3
      .github/workflows/pytest.yml
  2. 1
      .yamato/com.unity.ml-agents-performance.yml
  3. 4
      .yamato/com.unity.ml-agents-test.yml
  4. 4
      .yamato/compressed-sensor-test.yml
  5. 4
      .yamato/gym-interface-test.yml
  6. 4
      .yamato/python-ll-api-test.yml
  7. 15
      .yamato/test_versions.metafile
  8. 2
      Project/Assets/ML-Agents/Editor/Tests/StandaloneBuildTest.cs
  9. 29
      Project/Assets/ML-Agents/Examples/SharedAssets/Scripts/ModelOverrider.cs
  10. 1
      Project/ProjectSettings/TagManager.asset
  11. 18
      com.unity.ml-agents/CHANGELOG.md
  12. 16
      com.unity.ml-agents/Editor/BehaviorParametersEditor.cs
  13. 55
      com.unity.ml-agents/Runtime/Academy.cs
  14. 4
      com.unity.ml-agents/Runtime/Agent.cs
  15. 15
      com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs
  16. 5
      com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs
  17. 147
      com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
  18. 5
      com.unity.ml-agents/Runtime/Communicator/UnityRLCapabilities.cs
  19. 40
      com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Capabilities.cs
  20. 70
      com.unity.ml-agents/Runtime/Inference/BarracudaModelParamLoader.cs
  21. 6
      com.unity.ml-agents/Runtime/Inference/ModelRunner.cs
  22. 21
      com.unity.ml-agents/Runtime/Sensors/BufferSensor.cs
  23. 14
      com.unity.ml-agents/Runtime/Sensors/BufferSensorComponent.cs
  24. 17
      com.unity.ml-agents/Runtime/Sensors/CameraSensor.cs
  25. 2
      com.unity.ml-agents/Runtime/Sensors/IDimensionPropertiesSensor.cs
  26. 14
      com.unity.ml-agents/Runtime/SideChannels/SideChannel.cs
  27. 19
      com.unity.ml-agents/Tests/Editor/Communicator/RpcCommunicatorTests.cs
  28. 30
      com.unity.ml-agents/Tests/Editor/ParameterLoaderTest.cs
  29. 2
      docs/Installation.md
  30. 51
      docs/Learning-Environment-Design-Agents.md
  31. 27
      docs/Learning-Environment-Examples.md
  32. 2
      docs/Training-Configuration-File.md
  33. 2
      gym-unity/README.md
  34. 11
      ml-agents-envs/mlagents_envs/communicator_objects/capabilities_pb2.py
  35. 6
      ml-agents-envs/mlagents_envs/communicator_objects/capabilities_pb2.pyi
  36. 4
      ml-agents-envs/mlagents_envs/environment.py
  37. 16
      ml-agents-envs/mlagents_envs/rpc_utils.py
  38. 14
      ml-agents-envs/mlagents_envs/tests/test_rpc_utils.py
  39. 40
      ml-agents/mlagents/trainers/ghost/trainer.py
  40. 9
      ml-agents/mlagents/trainers/tests/torch/test_ghost.py
  41. 49
      ml-agents/mlagents/trainers/torch/attention.py
  42. 29
      ml-agents/tests/yamato/training_int_tests.py
  43. 3
      protobuf-definitions/proto/mlagents_envs/communicator_objects/capabilities.proto
  44. 8
      Project/Assets/ML-Agents/Examples/Sorter.meta
  45. 105
      config/ppo/Sorter_curriculum.yaml
  46. 1001
      docs/images/sorter.png
  47. 8
      Project/Assets/ML-Agents/Examples/Sorter/Meshes.meta
  48. 63
      Project/Assets/ML-Agents/Examples/Sorter/Meshes/ArenaWalls.fbx
  49. 247
      Project/Assets/ML-Agents/Examples/Sorter/Meshes/ArenaWalls.fbx.meta
  50. 8
      Project/Assets/ML-Agents/Examples/Sorter/Prefabs.meta
  51. 8
      Project/Assets/ML-Agents/Examples/Sorter/Scenes.meta
  52. 8
      Project/Assets/ML-Agents/Examples/Sorter/Scripts.meta
  53. 8
      Project/Assets/ML-Agents/Examples/Sorter/TFModels.meta
  54. 15
      Project/Assets/ML-Agents/Examples/Sorter/TFModels/Sorter.onnx.meta
  55. 1001
      Project/Assets/ML-Agents/Examples/Sorter/TFModels/Sorter.onnx
  56. 7
      Project/Assets/ML-Agents/Examples/Sorter/Prefabs/Area.prefab.meta
  57. 1001
      Project/Assets/ML-Agents/Examples/Sorter/Prefabs/Area.prefab
  58. 11
      Project/Assets/ML-Agents/Examples/Sorter/Scripts/SorterAgent.cs.meta
  59. 11
      Project/Assets/ML-Agents/Examples/Sorter/Scripts/NumberTile.cs.meta
  60. 34
      Project/Assets/ML-Agents/Examples/Sorter/Scripts/NumberTile.cs
  61. 273
      Project/Assets/ML-Agents/Examples/Sorter/Scripts/SorterAgent.cs
  62. 9
      Project/Assets/ML-Agents/Examples/Sorter/Scenes/Sorter.unity.meta
  63. 1001
      Project/Assets/ML-Agents/Examples/Sorter/Scenes/Sorter.unity

3
.github/workflows/pytest.yml


run: python -c "import sys; print(sys.version)"
- name: Install dependencies
run: |
# pin pip to workaround https://github.com/pypa/pip/issues/9180
python -m pip install pip==20.2
python -m pip install --upgrade pip
python -m pip install --upgrade setuptools
python -m pip install --progress-bar=off -e ./ml-agents-envs
python -m pip install --progress-bar=off -e ./ml-agents

1
.yamato/com.unity.ml-agents-performance.yml


test_editors:
- version: 2019.4
- version: 2020.1
- version: 2020.2
---
{% for editor in test_editors %}

4
.yamato/com.unity.ml-agents-test.yml


enableCodeCoverage: !!bool true
testProject: DevProject
enableNoDefaultPackages: !!bool true
- version: 2020.1
enableCodeCoverage: !!bool true
testProject: DevProject
enableNoDefaultPackages: !!bool true
- version: 2020.2
enableCodeCoverage: !!bool true
testProject: DevProject

4
.yamato/compressed-sensor-test.yml


- .yamato/standalone-build-test.yml#test_linux_standalone_{{ editor.version }}
triggers:
cancel_old_ci: true
{% if editor.extra_test == "sensor" %}
expression: |
(pull_request.target eq "master" OR
pull_request.target match "release.+") AND

pull_request.changes.any match "Project/**" OR
pull_request.changes.any match "ml-agents/**" OR
pull_request.changes.any match "ml-agents/tests/yamato/**" OR
{% endif %}
{% endfor %}

4
.yamato/gym-interface-test.yml


- .yamato/standalone-build-test.yml#test_linux_standalone_{{ editor.version }}
triggers:
cancel_old_ci: true
{% if editor.extra_test == "gym" %}
expression: |
(pull_request.target eq "master" OR
pull_request.target match "release.+") AND

pull_request.changes.any match "ml-agents/**" OR
pull_request.changes.any match "ml-agents/tests/yamato/**" OR
{% endif %}
{% endfor %}

4
.yamato/python-ll-api-test.yml


- .yamato/standalone-build-test.yml#test_linux_standalone_{{ editor.version }}
triggers:
cancel_old_ci: true
{% if editor.extra_test == "llapi" %}
expression: |
(pull_request.target eq "master" OR
pull_request.target match "release.+") AND

pull_request.changes.any match "ml-agents/**" OR
pull_request.changes.any match "ml-agents/tests/yamato/**" OR
{% endif %}
{% endfor %}

15
.yamato/test_versions.metafile


# List of editor versions for standalone-build-test and its dependencies.
# csharp_backcompat_version is used in training-int-tests to determine the
# older package version to run the backwards compat tests against.
# We always run training-int-tests for all versions of the editor
# For each "other" test, we only run it against a single version of the
# editor to reduce the number of yamato jobs
csharp_backcompat_version: 1.0.0
extra_test: llapi
csharp_backcompat_version: 1.0.0
- version: 2020.1
csharp_backcompat_version: 1.0.0
extra_test: gym
# 2020.2 moved the AssetImporters namespace
# but we didn't handle this until 1.2.0
csharp_backcompat_version: 1.2.0
extra_test: sensor

2
Project/Assets/ML-Agents/Editor/Tests/StandaloneBuildTest.cs


scenes,
outputPath,
buildTarget,
BuildOptions.None
BuildOptions.Development
);
var isOk = buildResult.summary.result == BuildResult.Succeeded;
var error = "";

29
Project/Assets/ML-Agents/Examples/SharedAssets/Scripts/ModelOverrider.cs


const string k_CommandLineModelOverrideDirectoryFlag = "--mlagents-override-model-directory";
const string k_CommandLineModelOverrideExtensionFlag = "--mlagents-override-model-extension";
const string k_CommandLineQuitAfterEpisodesFlag = "--mlagents-quit-after-episodes";
const string k_CommandLineQuitAfterSeconds = "--mlagents-quit-after-seconds";
const string k_CommandLineQuitOnLoadFailure = "--mlagents-quit-on-load-failure";
// The attached Agent

// Max episodes to run. Only used if > 0
// Will default to 1 if override models are specified, otherwise 0.
int m_MaxEpisodes;
// Deadline - exit if the time exceeds this
DateTime m_Deadline = DateTime.MaxValue;
int m_NumSteps;
int m_PreviousNumSteps;

void GetAssetPathFromCommandLine()
{
var maxEpisodes = 0;
var timeoutSeconds = 0;
string[] commandLineArgsOverride = null;
if (!string.IsNullOrEmpty(debugCommandLineOverride) && Application.isEditor)
{

{
Int32.TryParse(args[i + 1], out maxEpisodes);
}
else if (args[i] == k_CommandLineQuitAfterSeconds && i < args.Length - 1)
{
Int32.TryParse(args[i + 1], out timeoutSeconds);
}
else if (args[i] == k_CommandLineQuitOnLoadFailure)
{
m_QuitOnLoadFailure = true;

m_MaxEpisodes = maxEpisodes > 0 ? maxEpisodes : 1;
Debug.Log($"setting m_MaxEpisodes to {maxEpisodes}");
}
if (timeoutSeconds > 0)
{
m_Deadline = DateTime.Now + TimeSpan.FromSeconds(timeoutSeconds);
Debug.Log($"setting deadline to {timeoutSeconds} from now.");
}
}
void OnEnable()

EditorApplication.isPlaying = false;
#endif
}
else if (DateTime.Now >= m_Deadline)
{
Debug.Log(
$"Deadline exceeded. " +
$"{TotalCompletedEpisodes}/{m_MaxEpisodes} episodes and " +
$"{TotalNumSteps}/{m_MaxEpisodes * m_Agent.MaxStep} steps completed. Exiting.");
Application.Quit(0);
#if UNITY_EDITOR
EditorApplication.isPlaying = false;
#endif
}
m_NumSteps++;
}

1
Project/ProjectSettings/TagManager.asset


- symbol_O_Goal
- purpleAgent
- purpleGoal
- tile
layers:
- Default
- TransparentFX

18
com.unity.ml-agents/CHANGELOG.md


and this project adheres to
[Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Major Changes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
### Minor Changes
#### com.unity.ml-agents / com.unity.ml-agents.extensions (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
## [Unreleased]
### Bug Fixes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
## [1.8.0-preview] - 2021-02-17
### Major Changes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)

reduced the amount of memory allocated by approximately 25%. (#4887)
- Removed several memory allocations that happened during inference with discrete actions. (#4922)
- Properly catch permission errors when writing timer files. (#4921)
- Unexpected exceptions during training initialization and shutdown are now logged. If you see
"noisy" logs, please let us know! (#4930, #4935)
#### ml-agents / ml-agents-envs / gym-unity (Python)
- Fixed a bug that would cause an exception when `RunOptions` was deserialized via `pickle`. (#4842)

while waiting for a connection, and raises a better error message if it crashes. (#4880)
- Passing a `-logfile` option in the `--env-args` option to `mlagents-learn` is
no longer overwritten. (#4880)
- The `load_weights` function was being called unnecessarily often in the Ghost Trainer leading to training slowdowns. (#4934)
## [1.7.2-preview] - 2020-12-22

16
com.unity.ml-agents/Editor/BehaviorParametersEditor.cs


// Grab the sensor components, since we need them to determine the observation sizes.
// TODO make these methods of BehaviorParameters
SensorComponent[] sensorComponents;
if (behaviorParameters.UseChildSensors)
{
sensorComponents = behaviorParameters.GetComponentsInChildren<SensorComponent>();
}
else
{
sensorComponents = behaviorParameters.GetComponents<SensorComponent>();
}
var agent = behaviorParameters.gameObject.GetComponent<Agent>();
agent.sensors = new List<ISensor>();
agent.InitializeSensors();
var sensors = agent.sensors.ToArray();
ActuatorComponent[] actuatorComponents;
if (behaviorParameters.UseChildActuators)

// Get the total size of the sensors generated by ObservableAttributes.
// If there are any errors (e.g. unsupported type, write-only properties), display them too.
int observableAttributeSensorTotalSize = 0;
var agent = behaviorParameters.GetComponent<Agent>();
if (agent != null && behaviorParameters.ObservableAttributeHandling != ObservableAttributeOptions.Ignore)
{
List<string> observableErrors = new List<string>();

if (brainParameters != null)
{
var failedChecks = Inference.BarracudaModelParamLoader.CheckModel(
barracudaModel, brainParameters, sensorComponents, actuatorComponents,
barracudaModel, brainParameters, sensors, actuatorComponents,
observableAttributeSensorTotalSize, behaviorParameters.BehaviorType
);
foreach (var check in failedChecks)

55
com.unity.ml-agents/Runtime/Academy.cs


/// <term>1.4.0</term>
/// <description>Support training analytics sent from python trainer to the editor.</description>
/// </item>
/// <item>
/// <term>1.5.0</term>
/// <description>Support variable length observation training.</description>
/// </item>
const string k_ApiVersion = "1.4.0";
const string k_ApiVersion = "1.5.0";
/// <summary>
/// Unity package version of com.unity.ml-agents.

{
// We try to exchange the first message with Python. If this fails, it means
// no Python Process is ready to train the environment. In this case, the
//environment must use Inference.
// environment must use Inference.
bool initSuccessful = false;
var communicatorInitParams = new CommunicatorInitParameters
{
unityCommunicationVersion = k_ApiVersion,
unityPackageVersion = k_PackageVersion,
name = "AcademySingleton",
CSharpCapabilities = new UnityRLCapabilities()
};
var unityRlInitParameters = Communicator.Initialize(
new CommunicatorInitParameters
{
unityCommunicationVersion = k_ApiVersion,
unityPackageVersion = k_PackageVersion,
name = "AcademySingleton",
CSharpCapabilities = new UnityRLCapabilities()
});
UnityEngine.Random.InitState(unityRlInitParameters.seed);
// We might have inference-only Agents, so set the seed for them too.
m_InferenceSeed = unityRlInitParameters.seed;
TrainerCapabilities = unityRlInitParameters.TrainerCapabilities;
TrainerCapabilities.WarnOnPythonMissingBaseRLCapabilities();
initSuccessful = Communicator.Initialize(
communicatorInitParams,
out var unityRlInitParameters
);
if (initSuccessful)
{
UnityEngine.Random.InitState(unityRlInitParameters.seed);
// We might have inference-only Agents, so set the seed for them too.
m_InferenceSeed = unityRlInitParameters.seed;
TrainerCapabilities = unityRlInitParameters.TrainerCapabilities;
TrainerCapabilities.WarnOnPythonMissingBaseRLCapabilities();
}
else
{
Debug.Log($"Couldn't connect to trainer on port {port} using API version {k_ApiVersion}. Will perform inference instead.");
Communicator = null;
}
catch
catch (Exception ex)
Debug.Log($"" +
$"Couldn't connect to trainer on port {port} using API version {k_ApiVersion}. " +
"Will perform inference instead."
);
Debug.Log($"Unexpected exception when trying to initialize communication: {ex}\nWill perform inference instead.");
if (Communicator != null)
{
Communicator.QuitCommandReceived += OnQuitCommandReceived;

4
com.unity.ml-agents/Runtime/Agent.cs


/// </summary>
internal void InitializeSensors()
{
if (m_PolicyFactory == null)
{
m_PolicyFactory = GetComponent<BehaviorParameters>();
}
if (m_PolicyFactory.ObservableAttributeHandling != ObservableAttributeOptions.Ignore)
{
var excludeInherited =

15
com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs


{
observationProto.DimensionProperties.Add((int)dimensionProperties[i]);
}
// Checking trainer compatibility with variable length observations
if (dimensionProperties.Length == 2)
{
if (dimensionProperties[0] == DimensionProperty.VariableSize &&
dimensionProperties[1] == DimensionProperty.None)
{
var trainerCanHandleVarLenObs = Academy.Instance.TrainerCapabilities == null || Academy.Instance.TrainerCapabilities.VariableLengthObservation;
if (!trainerCanHandleVarLenObs)
{
throw new UnityAgentsException("Variable Length Observations are not supported by the trainer");
}
}
}
}
observationProto.Shape.AddRange(shape);

CompressedChannelMapping = proto.CompressedChannelMapping,
HybridActions = proto.HybridActions,
TrainingAnalytics = proto.TrainingAnalytics,
VariableLengthObservation = proto.VariableLengthObservation,
};
}

CompressedChannelMapping = rlCaps.CompressedChannelMapping,
HybridActions = rlCaps.HybridActions,
TrainingAnalytics = rlCaps.TrainingAnalytics,
VariableLengthObservation = rlCaps.VariableLengthObservation,
};
}

5
com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs


/// Sends the academy parameters through the Communicator.
/// Is used by the academy to send the AcademyParameters to the communicator.
/// </summary>
/// <returns>The External Initialization Parameters received.</returns>
/// <returns>Whether the connection was successful.</returns>
UnityRLInitParameters Initialize(CommunicatorInitParameters initParameters);
/// <param name="initParametersOut">The External Initialization Parameters received</param>
bool Initialize(CommunicatorInitParameters initParameters, out UnityRLInitParameters initParametersOut);
/// <summary>
/// Registers a new Brain to the Communicator.

147
com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs


internal static bool CheckCommunicationVersionsAreCompatible(
string unityCommunicationVersion,
string pythonApiVersion,
string pythonLibraryVersion)
string pythonApiVersion
)
{
var unityVersion = new Version(unityCommunicationVersion);
var pythonVersion = new Version(pythonApiVersion);

/// Sends the initialization parameters through the Communicator.
/// Is used by the academy to send initialization parameters to the communicator.
/// </summary>
/// <returns>The External Initialization Parameters received.</returns>
/// <returns>Whether the connection was successful.</returns>
public UnityRLInitParameters Initialize(CommunicatorInitParameters initParameters)
/// <param name="initParametersOut">The External Initialization Parameters received.</param>
public bool Initialize(CommunicatorInitParameters initParameters, out UnityRLInitParameters initParametersOut)
{
var academyParameters = new UnityRLInitializationOutputProto
{

{
RlInitializationOutput = academyParameters
},
out input);
var pythonPackageVersion = initializationInput.RlInitializationInput.PackageVersion;
var pythonCommunicationVersion = initializationInput.RlInitializationInput.CommunicationVersion;
var unityCommunicationVersion = initParameters.unityCommunicationVersion;
TrainingAnalytics.SetTrainerInformation(pythonPackageVersion, pythonCommunicationVersion);
out input
);
}
catch (Exception ex)
{
if (ex is RpcException rpcException)
{
var communicationIsCompatible = CheckCommunicationVersionsAreCompatible(unityCommunicationVersion,
pythonCommunicationVersion,
pythonPackageVersion);
// Initialization succeeded part-way. The most likely cause is a mismatch between the communicator
// API strings, so log an explicit warning if that's the case.
if (initializationInput != null && input == null)
{
if (!communicationIsCompatible)
switch (rpcException.Status.StatusCode)
Debug.LogWarningFormat(
"Communication protocol between python ({0}) and Unity ({1}) have different " +
"versions which make them incompatible. Python library version: {2}.",
pythonCommunicationVersion, initParameters.unityCommunicationVersion,
pythonPackageVersion
);
case StatusCode.Unavailable:
// This is the common case where there's no trainer to connect to.
break;
case StatusCode.DeadlineExceeded:
// We don't currently set a deadline for connection, but likely will in the future.
break;
default:
Debug.Log($"Unexpected gRPC exception when trying to initialize communication: {rpcException}");
break;
else
{
Debug.LogWarningFormat(
"Unknown communication error between Python. Python communication protocol: {0}, " +
"Python library version: {1}.",
pythonCommunicationVersion,
pythonPackageVersion
);
}
throw new UnityAgentsException("ICommunicator.Initialize() failed.");
else
{
Debug.Log($"Unexpected exception when trying to initialize communication: {ex}");
}
initParametersOut = new UnityRLInitParameters();
return false;
catch
var pythonPackageVersion = initializationInput.RlInitializationInput.PackageVersion;
var pythonCommunicationVersion = initializationInput.RlInitializationInput.CommunicationVersion;
TrainingAnalytics.SetTrainerInformation(pythonPackageVersion, pythonCommunicationVersion);
var communicationIsCompatible = CheckCommunicationVersionsAreCompatible(
initParameters.unityCommunicationVersion,
pythonCommunicationVersion
);
// Initialization succeeded part-way. The most likely cause is a mismatch between the communicator
// API strings, so log an explicit warning if that's the case.
if (initializationInput != null && input == null)
var exceptionMessage = "The Communicator was unable to connect. Please make sure the External " +
"process is ready to accept communication with Unity.";
// Check for common error condition and add details to the exception message.
var httpProxy = Environment.GetEnvironmentVariable("HTTP_PROXY");
var httpsProxy = Environment.GetEnvironmentVariable("HTTPS_PROXY");
if (httpProxy != null || httpsProxy != null)
if (!communicationIsCompatible)
{
Debug.LogWarningFormat(
"Communication protocol between python ({0}) and Unity ({1}) have different " +
"versions which make them incompatible. Python library version: {2}.",
pythonCommunicationVersion, initParameters.unityCommunicationVersion,
pythonPackageVersion
);
}
else
exceptionMessage += " Try removing HTTP_PROXY and HTTPS_PROXY from the" +
"environment variables and try again.";
Debug.LogWarningFormat(
"Unknown communication error between Python. Python communication protocol: {0}, " +
"Python library version: {1}.",
pythonCommunicationVersion,
pythonPackageVersion
);
throw new UnityAgentsException(exceptionMessage);
initParametersOut = new UnityRLInitParameters();
return false;
return initializationInput.RlInitializationInput.ToUnityRLInitParameters();
initParametersOut = initializationInput.RlInitializationInput.ToUnityRLInitParameters();
return true;
}
/// <summary>

SendCommandEvent(rlInput.Command);
}
UnityInputProto Initialize(UnityOutputProto unityOutput,
out UnityInputProto unityInput)
UnityInputProto Initialize(UnityOutputProto unityOutput, out UnityInputProto unityInput)
{
#if UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX
m_IsOpen = true;

}
return result.UnityInput;
#else
throw new UnityAgentsException(
"You cannot perform training on this platform.");
throw new UnityAgentsException("You cannot perform training on this platform.");
#endif
}

{
return null;
}
try
{
var message = m_Client.Exchange(WrapMessage(unityOutput, 200));

QuitCommandReceived?.Invoke();
return message.UnityInput;
}
catch
catch (Exception ex)
if (ex is RpcException rpcException)
{
// Log more verbose errors if they're something the user can possibly do something about.
switch (rpcException.Status.StatusCode)
{
case StatusCode.Unavailable:
// This can happen when python disconnects. Ignore it to avoid noisy logs.
break;
case StatusCode.ResourceExhausted:
// This happens is the message body is too large. There's no way to
// gracefully handle this, but at least we can show the message and the
// user can try to reduce the number of agents or observation sizes.
Debug.LogError($"GRPC Exception: {rpcException.Message}. Disconnecting from trainer.");
break;
default:
// Other unknown errors. Log at INFO level.
Debug.Log($"GRPC Exception: {rpcException.Message}. Disconnecting from trainer.");
break;
}
}
else
{
// Fall-through for other error types
Debug.LogError($"Communication Exception: {ex.Message}. Disconnecting from trainer.");
}
m_IsOpen = false;
QuitCommandReceived?.Invoke();
return null;

5
com.unity.ml-agents/Runtime/Communicator/UnityRLCapabilities.cs


public bool CompressedChannelMapping;
public bool HybridActions;
public bool TrainingAnalytics;
public bool VariableLengthObservation;
/// <summary>
/// A class holding the capabilities flags for Reinforcement Learning across C# and the Trainer codebase. This

bool concatenatedPngObservations = true,
bool compressedChannelMapping = true,
bool hybridActions = true,
bool trainingAnalytics = true)
bool trainingAnalytics = true,
bool variableLengthObservation = true)
{
BaseRLCapabilities = baseRlCapabilities;
ConcatenatedPngObservations = concatenatedPngObservations;

VariableLengthObservation = variableLengthObservation;
}
/// <summary>

40
com.unity.ml-agents/Runtime/Grpc/CommunicatorObjects/Capabilities.cs


byte[] descriptorData = global::System.Convert.FromBase64String(
string.Concat(
"CjVtbGFnZW50c19lbnZzL2NvbW11bmljYXRvcl9vYmplY3RzL2NhcGFiaWxp",
"dGllcy5wcm90bxIUY29tbXVuaWNhdG9yX29iamVjdHMirwEKGFVuaXR5UkxD",
"dGllcy5wcm90bxIUY29tbXVuaWNhdG9yX29iamVjdHMi0gEKGFVuaXR5UkxD",
"ASgIEhkKEXRyYWluaW5nQW5hbHl0aWNzGAUgASgIQiWqAiJVbml0eS5NTEFn",
"ZW50cy5Db21tdW5pY2F0b3JPYmplY3RzYgZwcm90bzM="));
"ASgIEhkKEXRyYWluaW5nQW5hbHl0aWNzGAUgASgIEiEKGXZhcmlhYmxlTGVu",
"Z3RoT2JzZXJ2YXRpb24YBiABKAhCJaoCIlVuaXR5Lk1MQWdlbnRzLkNvbW11",
"bmljYXRvck9iamVjdHNiBnByb3RvMw=="));
new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.UnityRLCapabilitiesProto), global::Unity.MLAgents.CommunicatorObjects.UnityRLCapabilitiesProto.Parser, new[]{ "BaseRLCapabilities", "ConcatenatedPngObservations", "CompressedChannelMapping", "HybridActions", "TrainingAnalytics" }, null, null, null)
new pbr::GeneratedClrTypeInfo(typeof(global::Unity.MLAgents.CommunicatorObjects.UnityRLCapabilitiesProto), global::Unity.MLAgents.CommunicatorObjects.UnityRLCapabilitiesProto.Parser, new[]{ "BaseRLCapabilities", "ConcatenatedPngObservations", "CompressedChannelMapping", "HybridActions", "TrainingAnalytics", "VariableLengthObservation" }, null, null, null)
}));
}
#endregion

compressedChannelMapping_ = other.compressedChannelMapping_;
hybridActions_ = other.hybridActions_;
trainingAnalytics_ = other.trainingAnalytics_;
variableLengthObservation_ = other.variableLengthObservation_;
_unknownFields = pb::UnknownFieldSet.Clone(other._unknownFields);
}

}
}
/// <summary>Field number for the "variableLengthObservation" field.</summary>
public const int VariableLengthObservationFieldNumber = 6;
private bool variableLengthObservation_;
/// <summary>
/// Support for variable length observations of rank 2
/// </summary>
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public bool VariableLengthObservation {
get { return variableLengthObservation_; }
set {
variableLengthObservation_ = value;
}
}
[global::System.Diagnostics.DebuggerNonUserCodeAttribute]
public override bool Equals(object other) {
return Equals(other as UnityRLCapabilitiesProto);

if (CompressedChannelMapping != other.CompressedChannelMapping) return false;
if (HybridActions != other.HybridActions) return false;
if (TrainingAnalytics != other.TrainingAnalytics) return false;
if (VariableLengthObservation != other.VariableLengthObservation) return false;
return Equals(_unknownFields, other._unknownFields);
}

if (CompressedChannelMapping != false) hash ^= CompressedChannelMapping.GetHashCode();
if (HybridActions != false) hash ^= HybridActions.GetHashCode();
if (TrainingAnalytics != false) hash ^= TrainingAnalytics.GetHashCode();
if (VariableLengthObservation != false) hash ^= VariableLengthObservation.GetHashCode();
if (_unknownFields != null) {
hash ^= _unknownFields.GetHashCode();
}

if (TrainingAnalytics != false) {
output.WriteRawTag(40);
output.WriteBool(TrainingAnalytics);
}
if (VariableLengthObservation != false) {
output.WriteRawTag(48);
output.WriteBool(VariableLengthObservation);
}
if (_unknownFields != null) {
_unknownFields.WriteTo(output);

if (TrainingAnalytics != false) {
size += 1 + 1;
}
if (VariableLengthObservation != false) {
size += 1 + 1;
}
if (_unknownFields != null) {
size += _unknownFields.CalculateSize();
}

if (other.TrainingAnalytics != false) {
TrainingAnalytics = other.TrainingAnalytics;
}
if (other.VariableLengthObservation != false) {
VariableLengthObservation = other.VariableLengthObservation;
}
_unknownFields = pb::UnknownFieldSet.MergeFrom(_unknownFields, other._unknownFields);
}

}
case 40: {
TrainingAnalytics = input.ReadBool();
break;
}
case 48: {
VariableLengthObservation = input.ReadBool();
break;
}
}

70
com.unity.ml-agents/Runtime/Inference/BarracudaModelParamLoader.cs


/// <param name="brainParameters">
/// The BrainParameters that are used verify the compatibility with the InferenceEngine
/// </param>
/// <param name="sensorComponents">Attached sensor components</param>
/// <param name="sensors">Attached sensor components</param>
SensorComponent[] sensorComponents, ActuatorComponent[] actuatorComponents,
ISensor[] sensors, ActuatorComponent[] actuatorComponents,
int observableAttributeTotalSize = 0,
BehaviorType behaviorType = BehaviorType.Default)
{

}
failedModelChecks.AddRange(
CheckInputTensorPresence(model, brainParameters, memorySize, sensorComponents)
CheckInputTensorPresence(model, brainParameters, memorySize, sensors)
CheckInputTensorShape(model, brainParameters, sensorComponents, observableAttributeTotalSize)
CheckInputTensorShape(model, brainParameters, sensors, observableAttributeTotalSize)
);
failedModelChecks.AddRange(
CheckOutputTensorShape(model, brainParameters, actuatorComponents)

/// <param name="memory">
/// The memory size that the model is expecting.
/// </param>
/// <param name="sensorComponents">Array of attached sensor components</param>
/// <param name="sensors">Array of attached sensor components</param>
/// <returns>
/// A IEnumerable of string corresponding to the failed input presence checks.
/// </returns>

int memory,
SensorComponent[] sensorComponents
ISensor[] sensors
)
{
var failedModelChecks = new List<string>();

// If there are not enough Visual Observation Input compared to what the
// sensors expect.
var visObsIndex = 0;
for (var sensorIndex = 0; sensorIndex < sensorComponents.Length; sensorIndex++)
for (var sensorIndex = 0; sensorIndex < sensors.Length; sensorIndex++)
var sensor = sensorComponents[sensorIndex];
var sensor = sensors[sensorIndex];
if (sensor.GetObservationShape().Length == 3)
{
if (!tensorsNames.Contains(

/// Checks that the shape of the visual observation input placeholder is the same as the corresponding sensor.
/// </summary>
/// <param name="tensorProxy">The tensor that is expected by the model</param>
/// <param name="sensorComponent">The sensor that produces the visual observation.</param>
/// <param name="sensor">The sensor that produces the visual observation.</param>
TensorProxy tensorProxy, SensorComponent sensorComponent)
TensorProxy tensorProxy, ISensor sensor)
var shape = sensorComponent.GetObservationShape();
var shape = sensor.GetObservationShape();
var heightBp = shape[0];
var widthBp = shape[1];
var pixelBp = shape[2];

/// Checks that the shape of the rank 2 observation input placeholder is the same as the corresponding sensor.
/// </summary>
/// <param name="tensorProxy">The tensor that is expected by the model</param>
/// <param name="sensorComponent">The sensor that produces the visual observation.</param>
/// <param name="sensor">The sensor that produces the visual observation.</param>
TensorProxy tensorProxy, SensorComponent sensorComponent)
TensorProxy tensorProxy, ISensor sensor)
var shape = sensorComponent.GetObservationShape();
var shape = sensor.GetObservationShape();
var dim1Bp = shape[0];
var dim2Bp = shape[1];
var dim1T = tensorProxy.Channels;

/// <param name="brainParameters">
/// The BrainParameters that are used verify the compatibility with the InferenceEngine
/// </param>
/// <param name="sensorComponents">Attached sensors</param>
/// <param name="sensors">Attached sensors</param>
Model model, BrainParameters brainParameters, SensorComponent[] sensorComponents,
Model model, BrainParameters brainParameters, ISensor[] sensors,
new Dictionary<string, Func<BrainParameters, TensorProxy, SensorComponent[], int, string>>()
new Dictionary<string, Func<BrainParameters, TensorProxy, ISensor[], int, string>>()
{
{TensorNames.VectorObservationPlaceholder, CheckVectorObsShape},
{TensorNames.PreviousActionPlaceholder, CheckPreviousActionShape},

}
var visObsIndex = 0;
for (var sensorIndex = 0; sensorIndex < sensorComponents.Length; sensorIndex++)
for (var sensorIndex = 0; sensorIndex < sensors.Length; sensorIndex++)
var sensorComponent = sensorComponents[sensorIndex];
if (sensorComponent.GetObservationShape().Length == 3)
var sens = sensors[sensorIndex];
if (sens.GetObservationShape().Length == 3)
(bp, tensor, scs, i) => CheckVisualObsShape(tensor, sensorComponent);
(bp, tensor, scs, i) => CheckVisualObsShape(tensor, sens);
if (sensorComponent.GetObservationShape().Length == 2)
if (sens.GetObservationShape().Length == 2)
(bp, tensor, scs, i) => CheckRankTwoObsShape(tensor, sensorComponent);
(bp, tensor, scs, i) => CheckRankTwoObsShape(tensor, sens);
}
}

else
{
var tester = tensorTester[tensor.name];
var error = tester.Invoke(brainParameters, tensor, sensorComponents, observableAttributeTotalSize);
var error = tester.Invoke(brainParameters, tensor, sensors, observableAttributeTotalSize);
if (error != null)
{
failedModelChecks.Add(error);

/// The BrainParameters that are used verify the compatibility with the InferenceEngine
/// </param>
/// <param name="tensorProxy">The tensor that is expected by the model</param>
/// <param name="sensorComponents">Array of attached sensor components</param>
/// <param name="sensors">Array of attached sensor components</param>
/// <param name="observableAttributeTotalSize">Sum of the sizes of all ObservableAttributes.</param>
/// <returns>
/// If the Check failed, returns a string containing information about why the

BrainParameters brainParameters, TensorProxy tensorProxy, SensorComponent[] sensorComponents,
BrainParameters brainParameters, TensorProxy tensorProxy, ISensor[] sensors,
int observableAttributeTotalSize)
{
var vecObsSizeBp = brainParameters.VectorObservationSize;

var totalVectorSensorSize = 0;
foreach (var sensorComp in sensorComponents)
foreach (var sens in sensors)
if (sensorComp.GetObservationShape().Length == 1)
if ((sens.GetObservationShape().Length == 1))
totalVectorSensorSize += sensorComp.GetObservationShape()[0];
totalVectorSensorSize += sens.GetObservationShape()[0];
totalVectorSensorSize += observableAttributeTotalSize;
if (vecObsSizeBp * numStackedVector + totalVectorSensorSize != totalVecObsSizeT)
if (totalVectorSensorSize != totalVecObsSizeT)
foreach (var sensorComp in sensorComponents)
foreach (var sensorComp in sensors)
{
if (sensorComp.GetObservationShape().Length == 1)
{

$"but received: \n" +
$"Vector observations: {vecObsSizeBp} x {numStackedVector}\n" +
$"Total [Observable] attributes: {observableAttributeTotalSize}\n" +
$"SensorComponent sizes: {sensorSizes}.";
$"Sensor sizes: {sensorSizes}.";
}
return null;
}

/// The BrainParameters that are used verify the compatibility with the InferenceEngine
/// </param>
/// <param name="tensorProxy"> The tensor that is expected by the model</param>
/// <param name="sensorComponents">Array of attached sensor components (unused).</param>
/// <param name="sensors">Array of attached sensor components (unused).</param>
SensorComponent[] sensorComponents, int observableAttributeTotalSize)
ISensor[] sensors, int observableAttributeTotalSize)
{
var numberActionsBp = brainParameters.ActionSpec.NumDiscreteActions;
var numberActionsT = tensorProxy.shape[tensorProxy.shape.Length - 1];

6
com.unity.ml-agents/Runtime/Inference/ModelRunner.cs


SensorShapeValidator m_SensorShapeValidator = new SensorShapeValidator();
bool m_VisualObservationsInitialized;
bool m_ObservationsInitialized;
/// <summary>
/// Initializes the Brain with the Model that it will use when selecting actions for

{
return;
}
if (!m_VisualObservationsInitialized)
if (!m_ObservationsInitialized)
m_VisualObservationsInitialized = true;
m_ObservationsInitialized = true;
}
Profiler.BeginSample("ModelRunner.DecideAction");

21
com.unity.ml-agents/Runtime/Sensors/BufferSensor.cs


namespace Unity.MLAgents.Sensors
{
internal class BufferSensor : ISensor, IDimensionPropertiesSensor, IBuiltInSensor
/// <summary>
/// A Sensor that allows to observe a variable number of entities.
/// </summary>
public class BufferSensor : ISensor, IDimensionPropertiesSensor, IBuiltInSensor
static DimensionProperty[] s_DimensionProperties = new DimensionProperty[]{
DimensionProperty.VariableSize,
DimensionProperty.None
};
public BufferSensor(int maxNumberObs, int obsSize)
{
m_MaxNumObs = maxNumberObs;

/// <inheritdoc/>
public DimensionProperty[] GetDimensionProperties()
{
return new DimensionProperty[]{
DimensionProperty.VariableSize,
DimensionProperty.None
};
return s_DimensionProperties;
}
/// <summary>

/// <param name="obs"> The float array observation</param>
public void AppendObservation(float[] obs)
{
if (obs.Length != m_ObsSize)
{
throw new UnityAgentsException(
"The BufferSensor was expecting an observation of size " +
$"{m_ObsSize} but received {obs.Length} observations instead."
);
}
if (m_CurrentNumObservables >= m_MaxNumObs)
{
return;

14
com.unity.ml-agents/Runtime/Sensors/BufferSensorComponent.cs


{
/// <summary>
/// A component for BufferSensor.
/// A SensorComponent that creates a <see cref="BufferSensor"/>.
internal class BufferSensorComponent : SensorComponent
public class BufferSensorComponent : SensorComponent
/// <summary>
/// This is how many floats each entities will be represented with. This number
/// is fixed and all entities must have the same representation.
/// </summary>
/// <summary>
/// This is the maximum number of entities the `BufferSensor` will be able to
/// collect.
/// </summary>
private BufferSensor m_Sensor;
/// <inheritdoc/>

17
com.unity.ml-agents/Runtime/Sensors/CameraSensor.cs


/// <summary>
/// A sensor that wraps a Camera object to generate visual observations for an agent.
/// </summary>
public class CameraSensor : ISensor, IBuiltInSensor
public class CameraSensor : ISensor, IBuiltInSensor, IDimensionPropertiesSensor
{
Camera m_Camera;
int m_Width;

int[] m_Shape;
SensorCompressionType m_CompressionType;
static DimensionProperty[] s_DimensionProperties = new DimensionProperty[] {
DimensionProperty.TranslationalEquivariance,
DimensionProperty.TranslationalEquivariance,
DimensionProperty.None };
/// <summary>
/// The Camera used for rendering the sensor observations.

public int[] GetObservationShape()
{
return m_Shape;
}
/// <summary>
/// Accessor for the dimension properties of a camera sensor. A camera sensor
/// Has translational equivariance along width and hight and no property along
/// the channels dimension.
/// </summary>
/// <returns></returns>
public DimensionProperty[] GetDimensionProperties()
{
return s_DimensionProperties;
}
/// <summary>

2
com.unity.ml-agents/Runtime/Sensors/IDimensionPropertiesSensor.cs


/// The Dimension property flags of the observations
/// </summary>
[System.Flags]
internal enum DimensionProperty
public enum DimensionProperty
{
/// <summary>
/// No properties specified.

14
com.unity.ml-agents/Runtime/SideChannels/SideChannel.cs


using System.Collections.Generic;
using System;
using UnityEngine;
namespace Unity.MLAgents.SideChannels
{

internal void ProcessMessage(byte[] msg)
{
using (var incomingMsg = new IncomingMessage(msg))
try
{
using (var incomingMsg = new IncomingMessage(msg))
{
OnMessageReceived(incomingMsg);
}
}
catch (Exception ex)
OnMessageReceived(incomingMsg);
// Catch all errors in the sidechannel processing, so that a single
// bad SideChannel implementation doesn't take everything down with it.
Debug.LogError($"Error processing SideChannel message: {ex}.\nThe message will be skipped.");
}
}

19
com.unity.ml-agents/Tests/Editor/Communicator/RpcCommunicatorTests.cs


{
var unityVerStr = "1.0.0";
var pythonVerStr = "1.0.0";
var pythonPackageVerStr = "0.16.0";
pythonVerStr,
pythonPackageVerStr));
pythonVerStr));
pythonVerStr,
pythonPackageVerStr));
pythonVerStr));
pythonVerStr,
pythonPackageVerStr));
pythonVerStr));
pythonVerStr,
pythonPackageVerStr));
pythonVerStr));
pythonVerStr,
pythonPackageVerStr));
pythonVerStr));
pythonVerStr,
pythonPackageVerStr));
pythonVerStr));
}
}

30
com.unity.ml-agents/Tests/Editor/ParameterLoaderTest.cs


var errors = BarracudaModelParamLoader.CheckModel(
model, validBrainParameters,
new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]
new ISensor[] { new VectorSensor(8), sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]
);
Assert.AreEqual(0, errors.Count()); // There should not be any errors
}

var errors = BarracudaModelParamLoader.CheckModel(
model, validBrainParameters,
new SensorComponent[] { sensor_21_20_3 }, new ActuatorComponent[0]
new ISensor[] { sensor_21_20_3.CreateSensor() }, new ActuatorComponent[0]
);
Assert.AreEqual(0, errors.Count()); // There should not be any errors
}

var errors = BarracudaModelParamLoader.CheckModel(
model, validBrainParameters,
new SensorComponent[] { }, new ActuatorComponent[0]
new ISensor[] { new VectorSensor(validBrainParameters.VectorObservationSize) }, new ActuatorComponent[0]
);
Assert.AreEqual(0, errors.Count()); // There should not be any errors
}

brainParameters.VectorObservationSize = 9; // Invalid observation
var errors = BarracudaModelParamLoader.CheckModel(
model, brainParameters,
new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]
new ISensor[] { sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]
);
Assert.Greater(errors.Count(), 0);

model, brainParameters,
new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]
new ISensor[] { sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]
);
Assert.Greater(errors.Count(), 0);
}

var brainParameters = GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters();
brainParameters.VectorObservationSize = 1; // Invalid observation
var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3 }, new ActuatorComponent[0]);
var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor() }, new ActuatorComponent[0]);
Assert.Greater(errors.Count(), 0);
}

brainParameters.VectorObservationSize = 9; // Invalid observation
var errors = BarracudaModelParamLoader.CheckModel(
model, brainParameters,
new SensorComponent[] { }, new ActuatorComponent[0]
new ISensor[] { }, new ActuatorComponent[0]
);
Assert.Greater(errors.Count(), 0);

model, brainParameters,
new SensorComponent[] { }, new ActuatorComponent[0]
new ISensor[] { }, new ActuatorComponent[0]
);
Assert.Greater(errors.Count(), 0);
}

var brainParameters = GetContinuous2vis8vec2actionBrainParameters();
brainParameters.ActionSpec = ActionSpec.MakeContinuous(3); // Invalid action
var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]);
var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]);
errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]);
errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]);
Assert.Greater(errors.Count(), 0);
}

var brainParameters = GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters();
brainParameters.ActionSpec = ActionSpec.MakeDiscrete(3, 3); // Invalid action
var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3 }, new ActuatorComponent[0]);
var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor() }, new ActuatorComponent[0]);
errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3 }, new ActuatorComponent[0]);
errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor() }, new ActuatorComponent[0]);
Assert.Greater(errors.Count(), 0);
}

var brainParameters = GetHybridBrainParameters();
brainParameters.ActionSpec = new ActionSpec(3, new[] { 3 }); // Invalid discrete action size
var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]);
var errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]);
errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]);
errors = BarracudaModelParamLoader.CheckModel(model, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]);
Assert.Greater(errors.Count(), 0);
}

var brainParameters = GetContinuous2vis8vec2actionBrainParameters();
var errors = BarracudaModelParamLoader.CheckModel(null, brainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 }, new ActuatorComponent[0]);
var errors = BarracudaModelParamLoader.CheckModel(null, brainParameters, new ISensor[] { sensor_21_20_3.CreateSensor(), sensor_20_22_3.CreateSensor() }, new ActuatorComponent[0]);
Assert.Greater(errors.Count(), 0);
}
}

2
docs/Installation.md


installing ML-Agents. Activate your virtual environment and run from the command line:
```sh
pip3 install torch==1.7.0 -f https://download.pytorch.org/whl/torch_stable.html
pip3 install torch~=1.7.1 -f https://download.pytorch.org/whl/torch_stable.html
```
Note that on Windows, you may also need Microsoft's

51
docs/Learning-Environment-Design-Agents.md


- [Visual Observation Summary & Best Practices](#visual-observation-summary--best-practices)
- [Raycast Observations](#raycast-observations)
- [RayCast Observation Summary & Best Practices](#raycast-observation-summary--best-practices)
- [Variable Length Observations](#variable-length-observations)
- [Variable Length Observation Summary & Best Practices](#variable-length-observation-summary--best-practices)
- [Actions and Actuators](#actions-and-actuators)
- [Continuous Actions](#continuous-actions)
- [Discrete Actions](#discrete-actions)

for the agent that doesn't require a fully rendered image to convey.
- Use as few rays and tags as necessary to solve the problem in order to improve
learning stability and agent performance.
### Variable Length Observations
It is possible for agents to collect observations from a varying number of
GameObjects by using a `BufferSensor`.
You can add a `BufferSensor` to your Agent by adding a `BufferSensorComponent` to
its GameObject.
The `BufferSensor` can be useful in situations in which the Agent must pay
attention to a varying number of entities (for example, a varying number of
enemies or projectiles).
On the trainer side, the `BufferSensor`
is processed using an attention module. More information about attention
mechanisms can be found [here](https://arxiv.org/abs/1706.03762). Training or
doing inference with variable length observations can be slower than using
a flat vector observation. However, attention mechanisms enable solving
problems that require comparative reasoning between entities in a scene
such as our [Sorter environment](Learning-Environment-Examples.md#sorter).
Note that even though the `BufferSensor` can process a variable number of
entities, you still need to define a maximum number of entities. This is
because our network architecture requires to know what the shape of the
observations will be. If fewer entities are observed than the maximum, the
observation will be padded with zeros and the trainer will ignore
the padded observations. Note that attention layers are invariant to
the order of the entities, so there is no need to properly "order" the
entities before feeding them into the `BufferSensor`.
The the `BufferSensorComponent` Editor inspector have two arguments:
- `Observation Size` : This is how many floats each entities will be
represented with. This number is fixed and all entities must
have the same representation. For example, if the entities you want to
put into the `BufferSensor` have for relevant information position and
speed, then the `Observation Size` should be 6 floats.
- `Maximum Number of Entities` : This is the maximum number of entities
the `BufferSensor` will be able to collect.
To add an entity's observations to a `BufferSensorComponent`, you need
to call `BufferSensorComponent.AppendObservation()`
with a float array of size `Observation Size` as argument.
__Note__: Currently, the observations put into the `BufferSensor` are
not normalized, you will need to normalize your observations manually
between -1 and 1.
#### Variable Length Observation Summary & Best Practices
- Attach `BufferSensorComponent` to use.
- Call `BufferSensorComponent.AppendObservation()` to add the observations
of an entity to the `BufferSensor`.
- Normalize the entities observations before feeding them into the `BufferSensor`.
## Actions and Actuators

27
docs/Learning-Environment-Examples.md


- 37.6 for vector observations
- 34.2 for simple heuristic (pick a random valid move)
- 37.0 for greedy heuristic (pick the highest-scoring valid move)
## Sorter
![Sorter](images/sorter.png)
- Set-up: The Agent is in a circular room with numbered tiles. The values of the
tiles are random between 1 and 20. The tiles present in the room are randomized
at each episode. When the Agent visits a tile, it turns green.
- Goal: Visit all the tiles in ascending order.
- Agents: The environment contains a single Agent
- Agent Reward Function:
- -.0002 Existential penalty.
- +1 For visiting the right tile
- -1 For visiting the wrong tile
- BehaviorParameters:
- Vector Observations : 4 : 2 floats for Position and 2 floats for orientation
- Variable Length Observations : Between 1 and 20 entities (one for each tile)
each with 22 observations, the first 20 are one hot encoding of the value of the tile,
the 21st and 22nd represent the position of the tile relative to the Agent and the 23rd
is `1` if the tile was visited and `0` otherwise.
- Actions: 3 discrete branched actions corresponding to forward, backward,
sideways movement, as well as rotation.
- Float Properties: One
- num_tiles: The maximum number of tiles to sample.
- Default: 2
- Recommended Minimum: 1
- Recommended Maximum: 20
- Benchmark Mean Reward: Depends on the number of tiles.

2
docs/Training-Configuration-File.md


- LSTM does not work well with continuous actions. Please use
discrete actions for better results.
- Since the memories must be sent back and forth between Python and Unity, using
too large `memory_size` will slow down training.
- Adding a recurrent layer increases the complexity of the neural network, it is
recommended to decrease `num_layers` when using recurrent.
- It is required that `memory_size` be divisible by 2.

2
gym-unity/README.md


def main():
unity_env = UnityEnvironment("./envs/GridWorld")
env = UnityToGymWrapper(unity_env, 0, uint8_visual=True)
logger.configure('./logs') # Çhange to log in a different directory
logger.configure('./logs') # Change to log in a different directory
act = deepq.learn(
env,
"cnn", # conv_only is also a good choice for GridWorld

11
ml-agents-envs/mlagents_envs/communicator_objects/capabilities_pb2.py


name='mlagents_envs/communicator_objects/capabilities.proto',
package='communicator_objects',
syntax='proto3',
serialized_pb=_b('\n5mlagents_envs/communicator_objects/capabilities.proto\x12\x14\x63ommunicator_objects\"\xaf\x01\n\x18UnityRLCapabilitiesProto\x12\x1a\n\x12\x62\x61seRLCapabilities\x18\x01 \x01(\x08\x12#\n\x1b\x63oncatenatedPngObservations\x18\x02 \x01(\x08\x12 \n\x18\x63ompressedChannelMapping\x18\x03 \x01(\x08\x12\x15\n\rhybridActions\x18\x04 \x01(\x08\x12\x19\n\x11trainingAnalytics\x18\x05 \x01(\x08\x42%\xaa\x02\"Unity.MLAgents.CommunicatorObjectsb\x06proto3')
serialized_pb=_b('\n5mlagents_envs/communicator_objects/capabilities.proto\x12\x14\x63ommunicator_objects\"\xd2\x01\n\x18UnityRLCapabilitiesProto\x12\x1a\n\x12\x62\x61seRLCapabilities\x18\x01 \x01(\x08\x12#\n\x1b\x63oncatenatedPngObservations\x18\x02 \x01(\x08\x12 \n\x18\x63ompressedChannelMapping\x18\x03 \x01(\x08\x12\x15\n\rhybridActions\x18\x04 \x01(\x08\x12\x19\n\x11trainingAnalytics\x18\x05 \x01(\x08\x12!\n\x19variableLengthObservation\x18\x06 \x01(\x08\x42%\xaa\x02\"Unity.MLAgents.CommunicatorObjectsb\x06proto3')
)

message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='variableLengthObservation', full_name='communicator_objects.UnityRLCapabilitiesProto.variableLengthObservation', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],

oneofs=[
],
serialized_start=80,
serialized_end=255,
serialized_end=290,
)
DESCRIPTOR.message_types_by_name['UnityRLCapabilitiesProto'] = _UNITYRLCAPABILITIESPROTO

6
ml-agents-envs/mlagents_envs/communicator_objects/capabilities_pb2.pyi


compressedChannelMapping = ... # type: builtin___bool
hybridActions = ... # type: builtin___bool
trainingAnalytics = ... # type: builtin___bool
variableLengthObservation = ... # type: builtin___bool
def __init__(self,
*,

hybridActions : typing___Optional[builtin___bool] = None,
trainingAnalytics : typing___Optional[builtin___bool] = None,
variableLengthObservation : typing___Optional[builtin___bool] = None,
) -> None: ...
@classmethod
def FromString(cls, s: builtin___bytes) -> UnityRLCapabilitiesProto: ...

def ClearField(self, field_name: typing_extensions___Literal[u"baseRLCapabilities",u"compressedChannelMapping",u"concatenatedPngObservations",u"hybridActions",u"trainingAnalytics"]) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"baseRLCapabilities",u"compressedChannelMapping",u"concatenatedPngObservations",u"hybridActions",u"trainingAnalytics",u"variableLengthObservation"]) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"baseRLCapabilities",b"baseRLCapabilities",u"compressedChannelMapping",b"compressedChannelMapping",u"concatenatedPngObservations",b"concatenatedPngObservations",u"hybridActions",b"hybridActions",u"trainingAnalytics",b"trainingAnalytics"]) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"baseRLCapabilities",b"baseRLCapabilities",u"compressedChannelMapping",b"compressedChannelMapping",u"concatenatedPngObservations",b"concatenatedPngObservations",u"hybridActions",b"hybridActions",u"trainingAnalytics",b"trainingAnalytics",u"variableLengthObservation",b"variableLengthObservation"]) -> None: ...

4
ml-agents-envs/mlagents_envs/environment.py


# * 1.2.0 - support compression mapping for stacked compressed observations.
# * 1.3.0 - support action spaces with both continuous and discrete actions.
# * 1.4.0 - support training analytics sent from python trainer to the editor.
API_VERSION = "1.4.0"
# * 1.5.0 - support variable length observation training.
API_VERSION = "1.5.0"
# Default port that the editor listens on. If an environment executable
# isn't specified, this port will be used.

capabilities.compressedChannelMapping = True
capabilities.hybridActions = True
capabilities.trainingAnalytics = True
capabilities.variableLengthObservation = True
return capabilities
@staticmethod

16
ml-agents-envs/mlagents_envs/rpc_utils.py


@timed
def observation_to_np_array(
def _observation_to_np_array(
obs: ObservationProto, expected_shape: Optional[Iterable[int]] = None
) -> np.ndarray:
"""

@timed
def _process_visual_observation(
def _process_maybe_compressed_observation(
obs_index: int,
shape: Tuple[int, int, int],
agent_info_list: Collection[AgentInfoProto],

batched_visual = [
observation_to_np_array(agent_obs.observations[obs_index], shape)
_observation_to_np_array(agent_obs.observations[obs_index], shape)
for agent_obs in agent_info_list
]
return np.array(batched_visual, dtype=np.float32)

@timed
def _process_vector_observation(
def _process_rank_one_or_two_observation(
obs_index: int, shape: Tuple[int, ...], agent_info_list: Collection[AgentInfoProto]
) -> np.ndarray:
if len(agent_info_list) == 0:

if is_visual:
obs_shape = cast(Tuple[int, int, int], observation_specs.shape)
decision_obs_list.append(
_process_visual_observation(
_process_maybe_compressed_observation(
_process_visual_observation(
_process_maybe_compressed_observation(
_process_vector_observation(
_process_rank_one_or_two_observation(
_process_vector_observation(
_process_rank_one_or_two_observation(
obs_index, observation_specs.shape, terminal_agent_info_list
)
)

14
ml-agents-envs/mlagents_envs/tests/test_rpc_utils.py


from mlagents_envs.rpc_utils import (
behavior_spec_from_proto,
process_pixels,
_process_visual_observation,
_process_vector_observation,
_process_maybe_compressed_observation,
_process_rank_one_or_two_observation,
steps_from_proto,
)
from PIL import Image

shapes = [(3,), (4,)]
list_proto = generate_list_agent_proto(n_agents, shapes)
for obs_index, shape in enumerate(shapes):
arr = _process_vector_observation(obs_index, shape, list_proto)
arr = _process_rank_one_or_two_observation(obs_index, shape, list_proto)
assert list(arr.shape) == ([n_agents] + list(shape))
assert np.allclose(arr, 0.1, atol=0.01)

ap2 = AgentInfoProto()
ap2.observations.extend([proto_obs_2])
ap_list = [ap1, ap2]
arr = _process_visual_observation(0, (128, 64, 3), ap_list)
arr = _process_maybe_compressed_observation(0, (128, 64, 3), ap_list)
assert list(arr.shape) == [2, 128, 64, 3]
assert np.allclose(arr[0, :, :, :], in_array_1, atol=0.01)
assert np.allclose(arr[1, :, :, :], in_array_2, atol=0.01)

ap2 = AgentInfoProto()
ap2.observations.extend([proto_obs_2])
ap_list = [ap1, ap2]
arr = _process_visual_observation(0, (128, 64, 1), ap_list)
arr = _process_maybe_compressed_observation(0, (128, 64, 1), ap_list)
assert list(arr.shape) == [2, 128, 64, 1]
assert np.allclose(arr[0, :, :, :], expected_out_array_1, atol=0.01)
assert np.allclose(arr[1, :, :, :], expected_out_array_2, atol=0.01)

ap1 = AgentInfoProto()
ap1.observations.extend([proto_obs_1])
ap_list = [ap1]
arr = _process_visual_observation(0, (128, 64, 8), ap_list)
arr = _process_maybe_compressed_observation(0, (128, 64, 8), ap_list)
assert list(arr.shape) == [1, 128, 64, 8]
assert np.allclose(arr[0, :, :, :], expected_out_array_1, atol=0.01)

ap1.observations.extend([proto_obs_1])
ap_list = [ap1]
with pytest.raises(UnityObservationException):
_process_visual_observation(0, (128, 42, 3), ap_list)
_process_maybe_compressed_observation(0, (128, 42, 3), ap_list)
def test_batched_step_result_from_proto():

40
ml-agents/mlagents/trainers/ghost/trainer.py


next_learning_team = self.controller.get_learning_team
# CASE 1: Current learning team is managed by this GhostTrainer.
# If the learning team changes, the following loop over queues will push the
# new policy into the policy queue for the new learning agent if
# that policy is managed by this GhostTrainer. Otherwise, it will save the current snapshot.
# CASE 2: Current learning team is managed by a different GhostTrainer.
# If the learning team changes to a team managed by this GhostTrainer, this loop
# will push the current_snapshot into the correct queue. Otherwise,
# it will continue skipping and swap_snapshot will continue to handle
# pushing fixed snapshots
# Case 3: No team change. The if statement just continues to push the policy
# Case 1: No team change. The if statement just continues to push the policy
# into the correct queue (or not if not learning team).
for brain_name in self._internal_policy_queues:
internal_policy_queue = self._internal_policy_queues[brain_name]

except AgentManagerQueue.Empty:
pass
if next_learning_team in self._team_to_name_to_policy_queue:
continue
if (
self._learning_team == next_learning_team
and next_learning_team in self._team_to_name_to_policy_queue
):
name_to_policy_queue = self._team_to_name_to_policy_queue[
next_learning_team
]

policy = self.get_policy(behavior_id)
policy.load_weights(self.current_policy_snapshot[brain_name])
name_to_policy_queue[brain_name].put(policy)
# CASE 2: Current learning team is managed by this GhostTrainer.
# If the learning team changes, the following loop over queues will push the
# new policy into the policy queue for the new learning agent if
# that policy is managed by this GhostTrainer. Otherwise, it will save the current snapshot.
# CASE 3: Current learning team is managed by a different GhostTrainer.
# If the learning team changes to a team managed by this GhostTrainer, this loop
# will push the current_snapshot into the correct queue. Otherwise,
# it will continue skipping and swap_snapshot will continue to handle
# pushing fixed snapshots
if (
self._learning_team != next_learning_team
and next_learning_team in self._team_to_name_to_policy_queue
):
name_to_policy_queue = self._team_to_name_to_policy_queue[
next_learning_team
]
for brain_name in name_to_policy_queue:
behavior_id = create_name_behavior_id(brain_name, next_learning_team)
policy = self.get_policy(behavior_id)
policy.load_weights(self.current_policy_snapshot[brain_name])
name_to_policy_queue[brain_name].put(policy)
# Note save and swap should be on different step counters.
# We don't want to save unless the policy is learning.

9
ml-agents/mlagents/trainers/tests/torch/test_ghost.py


VECTOR_ACTION_SPACE = 1
VECTOR_OBS_SPACE = 8
DISCRETE_ACTION_SPACE = [3, 3, 3, 2]
BUFFER_INIT_SAMPLES = 513
BUFFER_INIT_SAMPLES = 10241
NUM_AGENTS = 12

assert policy_queue0.empty() and not policy_queue1.empty()
# clear
policy_queue1.get_nowait()
mock_specs = mb.setup_test_behavior_specs(
False,
False,
vector_action_space=VECTOR_ACTION_SPACE,
vector_obs_space=VECTOR_OBS_SPACE,
)
buffer = mb.simulate_rollout(BUFFER_INIT_SAMPLES, mock_specs)
# Mock out reward signal eval

49
ml-agents/mlagents/trainers/torch/attention.py


from mlagents.torch_utils import torch
import warnings
from typing import Tuple, Optional, List
from mlagents.trainers.torch.layers import (
LinearEncoder,

from mlagents.trainers.exception import UnityTrainerException
def get_zero_entities_mask(observations: List[torch.Tensor]) -> List[torch.Tensor]:
def get_zero_entities_mask(entities: List[torch.Tensor]) -> List[torch.Tensor]:
"""
Takes a List of Tensors and returns a List of mask Tensor with 1 if the input was
all zeros (on dimension 2) and 0 otherwise. This is used in the Attention

if exporting_to_onnx.is_exporting():
with warnings.catch_warnings():
# We ignore a TracerWarning from PyTorch that warns that doing
# shape[n].item() will cause the trace to be incorrect (the trace might
# not generalize to other inputs)
# We ignore this warning because we know the model will always be
# run with inputs of the same shape
warnings.simplefilter("ignore")
# When exporting to ONNX, we want to transpose the entities. This is
# because ONNX only support input in NCHW (channel first) format.
# Barracuda also expect to get data in NCHW.
entities = [
torch.transpose(obs, 2, 1).reshape(
-1, obs.shape[1].item(), obs.shape[2].item()
)
for obs in entities
]
(torch.sum(ent ** 2, axis=2) < 0.01).float() for ent in observations
(torch.sum(ent ** 2, axis=2) < 0.01).float() for ent in entities
]
return key_masks

)
def forward(self, x_self: torch.Tensor, entities: torch.Tensor) -> torch.Tensor:
num_entities = self.entity_num_max_elements
if num_entities < 0:
if exporting_to_onnx.is_exporting():
raise UnityTrainerException(
"Trying to export an attention mechanism that doesn't have a set max \
number of elements."
)
num_entities = entities.shape[1]
if exporting_to_onnx.is_exporting():
# When exporting to ONNX, we want to transpose the entities. This is
# because ONNX only support input in NCHW (channel first) format.
# Barracuda also expect to get data in NCHW.
entities = torch.transpose(entities, 2, 1).reshape(
-1, num_entities, self.entity_size
)
num_entities = self.entity_num_max_elements
if num_entities < 0:
if exporting_to_onnx.is_exporting():
raise UnityTrainerException(
"Trying to export an attention mechanism that doesn't have a set max \
number of elements."
)
num_entities = entities.shape[1]
expanded_self = x_self.reshape(-1, 1, self.self_size)
expanded_self = torch.cat([expanded_self] * num_entities, dim=1)
# Concatenate all observations with self

29
ml-agents/tests/yamato/training_int_tests.py


import argparse
import json
import os
import shutil
import sys

log_output_path = f"{get_base_output_path()}/inference.{model_extension}.txt"
# 10 minutes for inference is more than enough
process_timeout = 10 * 60
# Try to gracefully exit a few seconds before that.
model_override_timeout = process_timeout - 15
exe_path = exes[0]
args = [
exe_path,

"1",
"--mlagents-override-model-extension",
model_extension,
"--mlagents-quit-after-seconds",
str(model_override_timeout),
timeout = 15 * 60 # 15 minutes for inference is more than enough
res = subprocess.run(args, timeout=timeout)
res = subprocess.run(args, timeout=process_timeout)
end_time = time.time()
if res.returncode != 0:
print("Error running inference!")

else:
print(f"Inference succeeded! Took {end_time - start_time} seconds")
print(f"Inference finished! Took {end_time - start_time} seconds")
# Check the artifacts directory for the timers, so we can get the gauges
timer_file = f"{exe_path}_Data/ML-Agents/Timers/3DBall_timers.json"
with open(timer_file) as f:
timer_data = json.load(f)
gauges = timer_data.get("gauges", {})
rewards = gauges.get("Override_3DBall.CumulativeReward", {})
max_reward = rewards.get("max")
if max_reward is None:
print(
"Unable to find rewards in timer file. This usually indicates a problem with Barracuda or inference."
)
return False
# We could check that the rewards are over a threshold, but since we train for so short a time,
# the values could be highly variable. So don't do it for now.
return True

3
protobuf-definitions/proto/mlagents_envs/communicator_objects/capabilities.proto


// support for training analytics
bool trainingAnalytics = 5;
// Support for variable length observations of rank 2
bool variableLengthObservation = 6;
}

8
Project/Assets/ML-Agents/Examples/Sorter.meta


fileFormatVersion: 2
guid: 5921b34611f764a849f03ecb648faaed
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

105
config/ppo/Sorter_curriculum.yaml


behaviors:
Sorter:
trainer_type: ppo
hyperparameters:
batch_size: 512
buffer_size: 40960
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: constant
network_settings:
normalize: False
hidden_units: 128
num_layers: 2
vis_encode_type: simple
reward_signals:
extrinsic:
gamma: 0.99
strength: 1.0
keep_checkpoints: 5
max_steps: 5000000
time_horizon: 256
summary_freq: 10000
threaded: true
environment_parameters:
num_tiles:
curriculum:
- name: Lesson0 # The '-' is important as this is a list
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.3
value: 2.0
- name: Lesson1
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.4
value: 4.0
- name: Lesson2
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.45
value: 6.0
- name: Lesson3
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.5
value: 8.0
- name: Lesson4
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.55
value: 10.0
- name: Lesson5
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.6
value: 12.0
- name: Lesson6
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.65
value: 14.0
- name: Lesson7
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.7
value: 16.0
- name: Lesson8
completion_criteria:
measure: progress
behavior: Sorter
signal_smoothing: true
min_lesson_length: 100
threshold: 0.75
value: 18.0
- name: Lesson9
value: 20.0
env_settings:
num_envs: 8

1001
docs/images/sorter.png
文件差异内容过多而无法显示
查看文件

8
Project/Assets/ML-Agents/Examples/Sorter/Meshes.meta


fileFormatVersion: 2
guid: 21f2df9a3b371479883c5f6a9c1f5314
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

63
Project/Assets/ML-Agents/Examples/Sorter/Meshes/ArenaWalls.fbx


Kaydara FBX Binary �MFBXHeaderExtension\FBXHeaderVersionI�x
FBXVersionI��EncryptionTypeI�CreationTimeStamp�VersionI��YearI��MonthIDayI'HourI ?MinuteIWSecondI t MillisecondI5�.CreatorS)Blender (stable FBX IO) - 2.91.0 - 4.21.3@' SceneInfoSGlobalInfoSceneInfoSUserData TypeSUserData7VersionIdMetaDataeVersionId|TitleS�SubjectS�AuthorS�KeywordsS�RevisionS�CommentS3 Properties70g9PS DocumentUrlSKStringSUrlSS /foobar.fbx�<PSSrcDocumentUrlSKStringSUrlSS /foobar.fbx�$PSOriginalSCompoundSS=LPSOriginal|ApplicationVendorSKStringSSSBlender Foundation�OPSOriginal|ApplicationNameSKStringSSSBlender (stable FBX IO)�APSOriginal|ApplicationVersionSKStringSSS2.91.0DMPSOriginal|DateTime_GMTSDateTimeSSS01/01/1970 00:00:00.000�<PSOriginal|FileNameSKStringSSS /foobar.fbx�%PS LastSavedSCompoundSSMPSLastSaved|ApplicationVendorSKStringSSSBlender FoundationzPPSLastSaved|ApplicationNameSKStringSSSBlender (stable FBX IO)�BPSLastSaved|ApplicationVersionSKStringSSS2.91.0&NPSLastSaved|DateTime_GMTSDateTimeSSS01/01/1970 00:00:00.000uFileIdR(�*��$�¿Ȱ*�+��� CreationTimeS1970-01-01 10:00:00:000�.CreatorS)Blender (stable FBX IO) - 2.91.0 - 4.21.3j GlobalSettings VersionI�] Properties70p)PSUpAxisSintSIntegerSI�-PS
UpAxisSignSintSIntegerSI�,PS FrontAxisSintSIntegerSI# 0PS FrontAxisSignSintSIntegerSI] ,PS CoordAxisSintSIntegerSI� 0PS CoordAxisSignSintSIntegerSI� 1PSOriginalUpAxisSintSIntegerSI����
5PSOriginalUpAxisSignSintSIntegerSIc
8PSUnitScaleFactorSdoubleSNumberSD�?�
@PSOriginalUnitScaleFactorSdoubleSNumberSD�? HPS AmbientColorSColorRGBSColorSDDDV APS DefaultCameraSKStringSSSProducer Perspective� %PSTimeModeSenumSSI � 3PS TimeSpanStartSKTimeSTimeSL
2PS TimeSpanStopSKTimeSTimeSL����
P 8PSCustomFrameRateSdoubleSNumberSD8@� Documents� CountI� DocumentL���SSceneSScened Properties70 &PS SourceObjectSobjectSSW 3PSActiveAnimStackNameSKStringSSS�  RootNodeL�
ReferencesL$ Definitions� VersionIdCountIV
ObjectTypeSGlobalSettingsICountI�
ObjectTypeSGeometry�CountI� PropertyTemplateSFbxMesh� Properties70"APSColorSColorRGBSColorSD�������?D�������?D�������?tDPSBBoxMinSVector3DSVectorSDDD�DPSBBoxMaxSVector3DSVectorSDDD/PSPrimary VisibilitySboolSSI;*PS Casts ShadowsSboolSSIu,PSReceive ShadowsSboolSSI?$
ObjectTypeSModel�CountI2$ PropertyTemplateSFbxNode%$ Properties70V2PSQuaternionInterpolateSenumSSI�KPSRotationOffsetSVector3DSVectorSDDDJPS RotationPivotSVector3DSVectorSDDD_JPS ScalingOffsetSVector3DSVectorSDDD�IPS ScalingPivotSVector3DSVectorSDDD�.PSTranslationActiveSboolSSIKKPSTranslationMinSVector3DSVectorSDDD�KPSTranslationMaxSVector3DSVectorSDDD�,PSTranslationMinXSboolSSI,PSTranslationMinYSboolSSIR,PSTranslationMinZSboolSSI�,PSTranslationMaxXSboolSSI�,PSTranslationMaxYSboolSSI,PSTranslationMaxZSboolSSI8*PS RotationOrderSenumSSI|6PSRotationSpaceForLimitOnlySboolSSI�;PSRotationStiffnessXSdoubleSNumberSD;PSRotationStiffnessYSdoubleSNumberSDW;PSRotationStiffnessZSdoubleSNumberSD�0PSAxisLenSdoubleSNumberSD$@�HPS PreRotationSVector3DSVectorSDDDBIPS PostRotationSVector3DSVectorSDDD{+PSRotationActiveSboolSSI�HPS RotationMinSVector3DSVectorSDDD'HPS RotationMaxSVector3DSVectorSDDD^)PS RotationMinXSboolSSI�)PS RotationMinYSboolSSI�)PS RotationMinZSboolSSI)PS RotationMaxXSboolSSI:)PS RotationMaxYSboolSSIq)PS RotationMaxZSboolSSI�(PS InheritTypeSenumSSI�*PS ScalingActiveSboolSSI4GPS
ScalingMinSVector3DSVectorSDDD�GPS
ScalingMaxSVector3DSVectorSD�?D�?D�?�(PS ScalingMinXSboolSSI�(PS ScalingMinYSboolSSI+(PS ScalingMinZSboolSSIa(PS ScalingMaxXSboolSSI�(PS ScalingMaxYSboolSSI�(PS ScalingMaxZSboolSSI,QPSGeometricTranslationSVector3DSVectorSDDD�NPSGeometricRotationSVector3DSVectorSDDD�MPSGeometricScalingSVector3DSVectorSD�?D�?D�?'6PS MinDampRangeXSdoubleSNumberSDk6PS MinDampRangeYSdoubleSNumberSD�6PS MinDampRangeZSdoubleSNumberSD�6PS MaxDampRangeXSdoubleSNumberSD76PS MaxDampRangeYSdoubleSNumberSD{6PS MaxDampRangeZSdoubleSNumberSD�9PSMinDampStrengthXSdoubleSNumberSD 9PSMinDampStrengthYSdoubleSNumberSDP9PSMinDampStrengthZSdoubleSNumberSD�9PSMaxDampStrengthXSdoubleSNumberSD�9PSMaxDampStrengthYSdoubleSNumberSD% 9PSMaxDampStrengthZSdoubleSNumberSDj 7PSPreferedAngleXSdoubleSNumberSD� 7PSPreferedAngleYSdoubleSNumberSD� 7PSPreferedAngleZSdoubleSNumberSD*!(PSLookAtPropertySobjectSSb!*PSUpVectorPropertySobjectSS�!!PSShowSboolSSI�!8PSNegativePercentShapeSupportSboolSSI"8PSDefaultAttributeIndexSintSIntegerSI����N"#PSFreezeSboolSSI"#PSLODBoxSboolSSI�"NPSLcl TranslationSLcl TranslationSSADDD1#HPS Lcl RotationS Lcl RotationSSADDD�#FPS Lcl ScalingS Lcl ScalingSSAD�?D�?D�?�#2PS
VisibilityS
VisibilitySSAD�?$EPSVisibility InheritanceSVisibility InheritanceSSI��Objects��0GeometryL툒SCircleArena.001GeometrySMesh�$ Properties70�$GeometryVersionI| 6 Verticesd��xe�Y��E�ǀ���Q�NA$�P� KZBK
�J���-
XT
t4B�E}4�QY]"�,A��(>#*�J� nO�4g�����ϧ�����ܹg���8���˟�ҵ���:���������S�G�����v�O+V�x:����v����:l�����SG�~ψg���n�z�m��͆��G/ʼnW�������s����~���]lqp.xd�ݣϞ����|���=���x{�[�O9��/���Wz�OF<]�V��A8���=�� ��8��_^1�y�n�+w�%#�޽v)�������=2�������X��<h���pZ���'����#N�w��]N�w}��>8U�՛���OV~����Y8~���Up�N�W���!w�ի~��N�d��:p��/�|�W��y�8�v~A�Nx��)�/�'���ɫ_�I_�~
�_p�����%���W����n���1�z����c'�c���7-�V��~���>�/�s���B�ɋ����_̧�|�����V�_����� ��_�x��S/�cϞ�q&�Tg�����>ч��F|�j���|Ni�3��Ӭ���#��s>f�ھ~��s�##�ħ�������F�WǁO���z�.�+��,.>Q�����O>�����OI��-�o���O��q����> '�����}<�WpZ�z��O�'\u��#.>�������� ��d�|�ܯ��]'�O�:?�ĈW��|–G,.>����Գ:N�f>�o�(>���s󈇕����K�֟��/�88��������#���ͻG��|���Q���O����$�G����8��%��q�ɪOi�qpT'>��u^N|�.9��8����I��(>�㟰�)�����?Q|'���ًO��[��S{����Z?'� ����U�'p���/'>q��o� �r��=�OE}��1�G�i�\h��'p�'��)���s��)�8��W��L���>�|���}���=c�aR����~*ꗙ��v?g� 8���OI� NQӿ�D�wT���y/�'p������k���O���|fģ����� ��O�{��'p�r�� ������>Q�O)����:����^����0�_�������ʏۼ��+/�y���ki?�j�h�W�S�����?c�� �������8i��ץ�c=ξ�q6/�6��xg_͋m���}�i;��D>�4��[�O�9�y�!{�8���������6/���h��(�͋n��i��Pͫ;�O�b����'��v������}�v��x���i�\m�|z���|�Z�����'���6p��������y��|Z?�xݽ�������l{/<�`���/��" '�������"}�����]g����_6�7���9��/qH�����~��[F<}n��kq�q#6��_�q�U����e��'�xˆw/=j��� '��m���G�迗�>u���'�q'�t��V�?���|�wo���eC���|c�c8?�xվʋ��=�uNy��#��lX�4���w�o�nת�W��c��|��'w���w�\���|>����]/�h���~�Ԉ���F����x�����������.�#�ye}���7��W�w���|�_���O�a�%�>�T������zE���z�7Y=�I�Y�������|8���K:�����������i맠~'?i��/��?Y?e��z��ĉg��ĉ����'���:�t��_��O��>������[=�z����c}Q>^�����'��]p͈{����1[,�P�+:N\�O�ՙ#�S[}�z�D}��L-�W�,�p^��WǁO�+oݰ�dz��}���F���W8ۈ��~�Z��㤅~v�_4���:��׍x�ؗ��-�;N���_RG<�OUu���g����{N�8N|'����O��X|��I���8E|�8�]5��i������|��X� �e�}�t��� ��������:>|"��#�'�q߻�����ɛ�8�$���Nm|��I|�8'7�~��|Ҿ�� ٝ{z�}��� ��ec=�N:ۈ����p�Oj��8���Y>N|"��|��ľ�r������|'�>^|'뼂�ľ�� >�����W����Y�x��;����>�g�����^�Q?���K�S�� >����sn��^8_�� >��l��'�?o��������Z~^�_�/��ħ�|�)��">e��mv�DՃ:����)���7����D��u�|�'o�}��g�݇A��y�]v?'�E�J���9�_�����ϕ�U>Y�Կ�������8�o�D>9ᙇ\��'p�:�Q|�8'�2��{�<��t!�t���/�S������:�k�}羡��)͋����f�{m���>�ټ�u^s_�m^�:/�UOּ��a_�B��<���t͋��:��I�O{��z����y1��k)Ni�\ں>��xg�͋ᆽ���b�a����}�Km^���ؗ��͋厽�/�y5}lo��?ؼO��I>��=ϺQ|R�v���)���9o�s� >��޽��4���6�����^l�|8��4���þ>]�yG�t��x������|����;ʧ�����~/b�[����^pot�6�T�gY:˿��CB����tY���������W�v��=>��t��T�5w�˒��e��p��d�N鬠�SgI�U��.K�_^8Si�`_�,tYΜ#�����Ks����e���������]Μ#�o�a^ ��W�7�,k_�2��5���R�Ag��f>̑�_�ü��g�,�2p�e�/p���v���B��]�y�Et8̅�2p�#�_࠿�5G: ]��e�8��V��K���B�QtY��X�k1t�,� �2��������:������H��0/�'p�Y���L]�p^̋�,p�Y�O�]��b_S-�3: >Qgt|b_�/t�d�B�2��.C���Bg���
�8�2�4q4/��&s�t8�2�4qx�u�y� ���x͑� �|"��D>�I�/p�_�,p�tV�.g�m�_�%��/t����9�IE� ͋�,�Ag��ȧ��� ᠿ�Y�D���2p�e��}���YA�Bg��*��y�'p�_�,�)�t��G��JM�/t���� ���)�E诉#���'Jg��&�L� �����࠳�eY�A����7�_謨:O�����A���"s���<�P:+q>�g�2�y�QU���}}�b_�,t�A����� �N��Կ����_�%�: >M�2��đ�r�Y࠳��đ.C�������}�Ugt� ���Bg�����e3t��k�#���g�,��L����<8Η�AIg���
�e� ]��r�H���Ag�����e�_� �����.]��'����&�t��}���_���_�Y�x�,t�T�2��x�/t8�o�� ]F>�2��t����Y��Y� ]V��˼������ �-��Q~�|�Ρ���tk��oP~Ũ��7sV�c�������X�ߕ|����0ʗH>_���3�����/
_"8���V����!8���%��/�!��?�g�(|�����8q��
���/�ɇ>*���L��|����—�D������g��/
_"8͇����Cp��3�!�Dp�%�?�!>:p���_"�Cp��3�!�Dp�%��g�����3l���8�%�?�8��3��g��ȗ�������3\��D�����p�I�>C�ľ�%�?�_E�C�ľ�|Q�O�O*�D����?�o�(_���p�%�?�8�N>q���W��
�!8������8�'�t��?l����W>� ��:�E��G����~���3$|���_"�ù/�R�v�O��_"�x�����C|�3�������/�!��?���S�3ė�D����?�gH��i�Kd_�{>��9��_�է���!�D�����p���P>Cp��K_b?�񻜻�+�O��3L�8���|�?�g>C|���K��C|���3L�8���3���8� 3>_�_"�É#�!>Cp��'���D� ��E�5p�|��'�|�Q��C|��� �%��/�!8��_p��K_"�Cp<�-�!8� �|���K��C|���3ė8q�K��8��������_"�C���� ��g�/|��g>���g�N�!>� _"8���3���zp�3ė��,�!8���/|�^�Dp�%f�����|��'�|��g��?�g>C'_�đ/�!8�(�!8� �%N���S�?�gN�[qMɗN�[��?�>��V�|������~ |�����~��:6UE*PolygonVertexIndexi x5�w��u������{��RBF)+"%$B�H�"RD��"-��Q���l��-���p��z�����_��z��������iB���[�$!��[��k��L��ۉ�w��f�ބg��mL���Ĕ03�y��J�-����lf� �lCf S���p
S���y���L|�ٌ� � ��z�F�"���qL^���r�R�5>�l:f:� �lmfS��������{v ���{653���{�����yώbr���=��i�/�=������=[�Y����gG2Yy~���]�4���M�Lfz㞭Ĭ`�r~$�d��i�{v�_�{6����-�,cp޳Ø�<?�s��e�Kq��d���qϖa~d���g�0ix~�瞝��×㞽Ό���-�,f���g3)y~�瞝���W➽Œ���-�|�4��g2�x~���������={���=[�����y��gx~��%`!f��}�5� iBBK��4��i�W�vK��xh����W�v+��x-h����ׅvk��xh����� �63�����V� ��mgJ��B�]L1�������v�������o��0�����1&�0�;�d��v��L�c�����.2i�'��&%��]g�����M&�%2�xz�%�����.~�l����H�K���G��2�g�W�vY�S��d���q�5���Q� �]�0�&�.?~�lWߋ�M�+����%ە�w����J����v��-�Gd���F��]|�9ٮ:���lW _���kW_������"� _�f|�}O�����Cz��g3my��ޟ ��Yx �gv3E��O��9L�o��L|ޚ왽L!� �c0�y���3���x�g0�ٟ�2%y����ɞ��O�ے=s���>u�?
���Ȟ��Oe��3G�;x��3���z�7����D��q&;��Cϼ�<����D���=s����L�30����g��?��ndϜa2�<+��L�a���@�DZ��Ÿ���]xwr7�<�������y?}����a
�?���^�'�k\bR�� ��1�������(����3)�o�{�}��L
��B_� �A�4�5��?��d_�<��K�5n0I�/}�����s����p�g�}�J��?�׸�$Ⅱ�� ӊ����#��s6�U����� L}�~��ߏ�d�:�o�h�]M|=�2ٮ� �lW��lW�J�+��ğ%ە���������d�"�A�)�]A�/�I�]>�o��.7~�l���l� ?�w%�e�/���v��xG�]Z�ގl�
��l����G�K�'��ْlw��or�9��_<~�����lwπ7"�]�3���vg�lx��?xN�&����F��χW&����+���E��d�}xq���O�^�l�/�!���W� ��~�+���v��jxn��z�&��l�^�F����g&���7�3��~���Ӓ�~���S��~�����v������o��$�d�m�)�~������;��^���'���{�����#O�=��;���F���3�;�]��"O��p߷f2���O"π]pߗܱ��y������};��ȟ���+��?�y6l������}�����6�|��}K|A买5�tǾ9�}�y���c�C䅰�#t��.��ȋ`|tǾ�s�%�1��c_�%�R�_ݱ���y9�������������Aw����#��5��������kaU| tǾ�{� �� �c_�#�fX�ݱ/���wCw���F�K�{�;�E�}�w�b���}A�`�=�0~�s�ɇ�y?,��������|�ŏCw�s�'"���S��l�?������}f�l��0+~�c���4̄_��ا�/G>��W�;���k�/�4� ��}r���W`J�t�> ~;�u� OH������y?J��o2 <O��c�/ϓGN�����ݱ�������7�4���x��)�U<=t���!rjx �ݱ?�g���dzBw����E���9�;�'𜑳�Sx.����x��~Nr� ݱ� �����w�;�����7�a|2_'w�����6n��|}����%en�)"�8�&�
�cO9������_��EN ���;��������xf���<K���� �c
�9<��������sCw���y"����|���x�ȹ�� t��^(r^x/�)��ŋF.��š;���� �=x)����t�bp^�c� /�$܎W���o�+F.�╡;��*����x5���:�z�Jp^�c���*\�ׁ�د��F�W���;�+��kÕx#���2�����r�.؈���;rC���c�o�1\�7������ \�������[En��Aw������������?�5������������;Bw�?�����;Cw�?���~�w������� ������?� ��?ݱ�������ؿ�?�;��? ݱ�����Nş���O��E� '��;���#���A����g"����g�;�c��"�����������1� ����#�����;�/�#"�#����� �+�����h���P���#�0|,t��Y��ȣ��u��� ���c�`|t�~�f�qp �����-<)4�g�����E���>�"��L���,����KE.w������x��%����ݱ���4܆�{�;���ʑ��-����c���"܈׀����׌\��kCw���D�����;����#ׂ�������F���xc���O�]��ex����=���?�͠;����#� �-�;� 𖑛������o��"� ��m�;�_�Dn���Bw����E�~�w����w�� ��ݱ���=� �ݱ���!�1� �c�!�h���L�;t��}��ȏ�����w�^���ὡ;�o�OF����Bw��Ÿ�������ؿ���N�Bw���E�'���;����F~����ؿ��� ���ؿ���98ݱ���y�2>�c�"�����GAw���GG_��@w����F~��Aw���_��*|ݱ��58�ݱ��O���O������D~>�O���?���-�$> �c�/�۰>�c��A�w��� ���#�����G�Y�����#���Bw��?����φ�طǿ�� ��ρ��?��s�� ݱ��&����(t��O�x���|\�|Ǵ��G^��htǾ)�8���ϗ�@w�����=������;�e��~~��c�_�'�_ ݱ�����3���������D����Bw����"�
k��;�U��������������ae|+tǾ�-�&X�ݱ/����;,����ؗ�wG����@w���{#�������B����"�!��}~�p�}� ~�c�?� ���c��;��������������Ü�i��}�L�0~�c�?���ݱO�_�|f��@w�S�W#_�~�|�c���2L�߄��'�oE������D��/���$<�����<O�?���t~�=�;������|��Ͻ�t��
�&����y�����_��G��w?�����sx��~^����c�c�������{Mw�O�9"{�}������sE�����=ݱ?����}��ރ�c/�� �/
Aw����#{��}��������E�����Mݱ߅���}��)ޣ�c�/���_�Wu�~+^>��1��x����f�Rd�o������ x����x�=�;�k������{Yw�W��#{��}S]���J�^d�jݱ_�7��}�����ݱ���W����o��������d�����q�)�KuEdgesi�hx��S0���^�NV&+����de�2Y�ޕ��de�2Y��L�k^��feoV&+���
Y��LV&+���+'WN��\9�r^j?|���s�{�g׮]���H��Q����s _$�P��KG��e�9��p"'�U�8��q
1��׉�4�����7I�L��Y$q6�&�s��r�%�������Ʌ�������%��K�������+���R�U������Sʵ��븞_R� ����_S����[��ۨ��s�;�-����8�]��&���ý��V��O�����/<����n�Q�q�FO�w��)�� O�O��Y����o����8��y�I^�L1�+�� �1����������,�ˬ�.�������|���� ���mv��$G�n bG�1�ʱG�N'p"��Dќ�)�p*��q��$�ș�Eg�L
�p.��G���d��L������br��.%��ȧ�˹�B���b��jJ��Rʸ��(�z*��n����f7s �R�R��������h�����6��~�y��!�x�nzx�Gy�^��~��Ix�A�x�g�YF�9�g�g�y�I^f�i^�Ufx�Y�x�7��MX�-�f�wXf�wy�U�g��� >b�->��)�|��$����&�c!�c9�0�'�N�D"9�(�9�S��Tb��4N'�3H �39��I���<�8� �`/�dq!������%\J��O�s�\I�\�Քp ��q-�Q��TP� �H7Q�>n��s+5�r���q�4p'�h�.�h�n{i�����v��N�!�x�nzx����q��� �d��d��y�a�e�Q��y�x�q&x����e���^e�טe��y�y�d�E��m�x�eVx��X�}�X�>d���d����O��mv8��@ AB(a�A$QDC,qē@"I$�B*i��A&Yd�C.y�S@!ESB)e�SA%UT����PK�4�Hʹ�J�t�I���K� 0�Ì0��L0���0��,��ˬ���l����f����'�`B%�p"�$�hb�%�xH$�dRH%�t2�$�lr�%�|
(��bJ(��r*���jn��Zn� u��@#M4�B+m��A']t�C/}�3� C 3�(c�3�$SL3�,s̳�"K,��*k���&[f��2�&�P�'�H��&�X��'�D�H&�T�H'�L��&�\�ȧ�B�(��R�(��J��f����:�i��&�i��6�頓.�顗>�`�!�a�1ƙ`�)��a�9�Y`�%�Ya�5��`���f�#�O AB(a�A$QDC,qē@"I$�B*i��A&Yd�C.y�S@!ESB)e�SA%UT����PK�4�Hʹ�J�t�I���K� 0�Ì0��L0���0��,��ˬ���l����f�Oo AB(a�A$QDC,qē@"I$�B*i��A&Yd�C.y�S@!ESB)e�SA%UTs5�r;u��@#M4�B+m��A']t�C/}�3� C 3�(c�3�$SL3�,s̳�"K,��*k���&[f��ȟ � !�0‰ �(��!�8�I �$�I!�4�� �,��!�<�)��"�)��2ʩ��*���~j���zh��fZh��v:褋nz襏~d�aFe�q&�d�if�e�yXd�eVXe�u6���aKY�pLayerElementNormalILVersionIe/LNameSfLMappingInformationTypeSByPolygonVertex�L ReferenceInformationTypeSDirect�p"$Normalsd$x����n[U�7�m�Y�.M�x��Rb��]w�u�v��]������Q�hDR��W�eE�MHmS+1� ��h��;ӱ�o<������|޵���1�s���Z�ѣG�����ѣGO���/�o�� �毇��G?��:��.���𫔟����
��?�����O��_;N��w��.ϣO}ϗ�N�G��[��^���?��W�y�S�<���m�~��7�Z������E]��O��s�K�/��}_�d��ї����|�o8���^���߾�c �d���M<�N��'?�?~�o�����x瑷�~�:���8<�����W{M���G���S�o�ܿ��>~�����_�����ל7�����������з��C����>�9O����ˣ}�h��N\�;^��/���W{ɷyV�:n�m���#�y��œ�������w�����M?��u�{�}�x�����.q%����9_����s�1��i����/�c|u������������z��.O�'�����,?���NY���˓���ݿ���������Ǐ��kV�/�<{�7>�y��]��iwx�g|������6�8/�K�'��ϧ�����:���ӛ/�{��]���˓�L{���1�����i���%���}�S��ex������A��:/�K�3���?�<�#�?���R�tr��4>���w^��.O�'��Oڋ����u�� �<��$?����?���������%�Y>�w}�O~�������?�7��ut;<R�tb�~��(�O{��q���\�m��� �~�����G��'q�E>y�����5�hG����K��&��W?���:��Op����4���{t£?ᡬ�;�8<��|�?���q�|����q!�K�3���_������7�]�?����_�h/~��x��o�ϴ�]�?�YG����<�?�������Q�}��?{^ ���l>%�y����_�zj��O��?���������x����~��Opڋ��G���{$?�'�n�}k�5/L����������O��7R�tb��d�C��w��8��wpy�?�ן=���^��8<��\����?^�������~��w/�_ГvY�m�~���!�}�����׭�<qۥ���g��7�8��wpy�?�џ�eۗ���qx�;�<��S��?��R�$?��u�����n��W��u�o�������C���׬�z���������)ul�������8�y���<����7��yt��e���qB����Ο�}��I���Cuw\��W~l~�,;oo|�K����贬^y��q�^�u��μO\�[���M<�߫_�G�M~�5՛xl���O�y��������ZI<}�|ǃ�z�_댩���8S���� ��H�&Ǔ�g<wǏ���3Ջ���O����7���O�7�L�������u��絎3�[��q�7�L����z�e�S��g��o����7�o�:�7�ȏ_�O�&���_����iW�S��g�wJ{Qe�G��Xp�M<��ӏ�n�����M<ƛͿ򏃪�C\�?�۞�����_��#�S���u���<S^?~�o����^]:���ă�8�z���z,�ێ���C�w~�e��]�o���v�z���G����(;~S���q���}]���z�]��zq�9~�{W��}� ��z����U���_��W�z��w���v��vM�&��ֿ��`�����~ ֛x���ۏ��;��9՛x��;�7���ZG�7�o�?�����3Ջ_&?����'��o�O��To�������m� ��|6�[��qT׭y�vw>N<Ӹ+��^ ��A���3� �����R�w��{���xc�D���i�R��g���;���]�O�&�i�)��� �7�o�����x��&���_���������k�I(��q ?��������=�ō =��w��<�7~���:�\7���t��[�ԝ\������NG���Y�ω;�x���� ����g�G��_��yI\��ΪN�a���K���q��<����:o����n/����<=\'�y�����{�[�C/�8��F����?��Ḡ�u�~N�_�ϜG;��O���w{���s;�u����<-���y.�-�����8�u=������~�q��<��Я���|N�y��o������.O��\����0�~�q\?��ܼ�r�qZ|�y���a��7��������|����>��8��Vḓ�ϧ��}M��*|�O%?�����?����@�������~__����q^����<��������Y�k�K���~�<qyz>�ߒ��<�韔���r�[�ð����a����?�^~�ގs�x�����������~�嫜����s��#|U^��yc������q��Ɏ�����?=�v\{�:��c��ƣ�7Ǎ��������������C��������}/�����>_w\{�:Oտ��ϋo�o=��������;���q]��S����~vw��?n��_mq��϶����$����Ov�vI~�<�G���ul��������G��r�Y���|M?���vI~6_�?��/�7.�vI~6Nl~��zo@�������ώ�־O�����'�{ϗ�7_�x�����S�k����Ǽ9�K[O���綞n�|.��Vj��������}�~���W��[��?�o���oZ����v��y��k��8�([�<G���?�9�]�o�n?�����g�M��W�v1o��|��U�x������?k>H��q�v{5p������r�'���x����:�oy�3|�]\W�?ǩuL�B�����u��.���wZ���Ϋ����x/����y�-�~�G��?q����r��������_��z��?S���}�;�_��u�x�y��y�c�]��?��������=���\��|���������V�?�ގ'pyn?��7��__��:R�O<��<y��_�S�;~O�v<�T���z���}�=��z;�x��Q<�������V>���x���v�����C�O<����:?��To��<������?����.�_v���:R�O<����t��z;�x����^���ގ'pyz{���O�v<��������To��<}�����u�76<��o�!��uCܸ��qd�c|�7��u����u��?���ގ'��w��v^�m�To��<�S���R�O<��8^�w��z;�x������ގ'��s��_�^�p�'��z���w�u�z�/�x�塽<����;��]O���yR��R�O<��0^�(_��To���k+O������x��zW|������P/�{~��<G��|�:�S�O<���Ν��P/����\���������x���q����ގ'py������To��G?�z��ގ'녯�K�����x�������To��<���O��z�~{�'py�x��=�[?���'��o�6��q��z;�x���������q�O<���v/~�O�v<�T�˿����)����.��T?��R�O<��8^�w��z;�x������ގ'py\g��N���;ο��p��_�y��[_����W���Ż��O��)�M�<�����s��� �:���>/^��\ەt޾����?������'��|��0��#���ϧ\U����9x��荿������?�u� Ov%����}���&����O~�����o���{&O<QgX��9����P�+�|��/�c�����B9��'=wp=u���'&���ۧoy���UNx�i�A��]v�X�����Ww�+�����E<ث�k��!n{��Yɮ�g�w���� O<I�ϩ�o?3���m�+�Y�F�;>�C�x�I:�7�w���V�yb�c�W��1��'�4ޫ�8���$���[�Ϛ��<|�'��s��Lŋ���8�]I�/���ya9�G����~v>6���Yq+��"z��t�+��_<��Īw�q��ϑ���?����3���_�O�|���z�����8=�I:�q �'��]I��Ix�I:��
�4O����|���� �4O�ٕtN�Z�o��Z���=�?yx�3�u��'��$�S^���:ip�E�7��M&�O����'< O<I����;���d�dW�3���?���v �g� �?���~f�����Z����ƒ��?���� :�w�?��� ���ɮ��|G�����7��J:�w�O�-��>|��=�3ݏϰ.qƓt�G��y�����u�3=���U:�5��I:����o^�~��~��v%��)�V�F~P��'�k�}�)�����[�������*/��N��]�{���ētү�Ѯ���{7Ѯ��q�u��3��ޤ�q����s�Ip�o�$�tܸ�t�N��*<�D�kߢ�'π߼@�ο{���'��;�S� �Kx�7�d�->�E����[���]I�@�˲�]���t�Wt�����՝�Jz��:��`��v?Y��w�m����7ɮ�g�w��W�Ox�I:�����EW��~�m�+�Y���w|���ēto��8�>�~��7�]I��^��DŽ'���x�~��������]Q����gͻ{��OԹ��;ϙ��h�:�]I�����'��Ǽ���������Λ��d׋��g�Y�+�{`o�9��x��_��|�v1Mz��O���x�~n^�u��G�+��O�!��'���5�S�<�k��?��3<�$�S\�����]I�4���ētN�Z�_�����Fɮ�g�+J��'�ٕtNy<�;[��}���<�J:�w�'� �O�i�c��N��w�O�9�+�������S����x���|G���&��N����#�����c^���3��]����Jz�w�O|,��~�� �����;�E�-��>|�q��:���~�x����7�$��;�g�����C�P�ɮ�G�O�����ho����Az�S�>g�������R=�������O����_��}Ǔ���Ÿ���S<���>�������}�������ۗut>y�;.�?K��7���m�O��{�[�zo��7C���[Kzn䍗������Z�g�O�y|g>��~iߏ���Lv%��/�zY��_�����O���_���pq�O����z����~���y�������W�Kx�I:����8���Jz��}��^��g��x�N�����[��տu�}�Q�����j��O<Ig����]���dW�so�����x�N�����oԿ�m���t>Y�F?;>�x�N�����{�;D��N����� �v]�{w�3��>_�;'���[�Ϛ��<|�'����V����G��#�Ց�Jz�xx��Ny�x�N���?��9~Lv��x&���M:'���� x�+���C��<�� �ētN�
�i��G���a\�΀'{��)��'�����O�� �<�<S\-���Z�+���#��x��i^��|��C�̮�g��ȋ��J:�������w��!�9�3䟥3��ޤs�����c��G�OvMyc���8Oy��x�N��?� �����������ݯ�]S����;;Nq� O<Q�p����:���ɮ��|G��Gx�x���3q�x��=��﨟y <ٕ�L���?��~����}��}W����7T��=n�׮��8��O���5�x�����Nq� u�W���^q������U>�?��G����{���O��N�?���څ�&=�����?���s����߽�����y�S���|�r�+�d<r��B~@9��'� ��~��7�'���a�O~P��'���g�q����)p��}{��J:�������3('<�$��k�iye��[Nv%=On>^���ϑ��O�I^���U���ߺW'�]I��V���x�N�������N�C�+������~�8��O�iW���x��=nW�#��
z�7�����do��8V���x�N�f�k��k��{q�c³]�|�Z'����g���5��z�o�'k�����x�N�1�r�3/P���Hv%=�;�_��'���|G��O:���]/��Ig�����ޤs� ����'����?ϺP'�<�$�S��g��Ѯ�gץ3��ޤs�?�'�}c�k��L�� �<�<S\-~�����}{�+���#��x��i^��K?�3���Cnh�']Ix�I:���;�i�?gvE=C� �y�v�����I����;�1/0��<,ٕ�L�p��9�Ǚ]I������ga��:�+�4��όOx�x��ᾦ�0����<��k��1�Q?�����2/�����9�Un>q�,�����y �<N���x��������d�y���}=��$o�z���3=������O���::��<���Cuɾ��CĴ_��B[籟���O��HU^8�����#�~������,ξm�������Wv�ރ�w�J:���y���o��6ᑧ��X�.��g��~�o��K�;�w���J:���u�gp�<�$��}Rx�>���j�m�"e�E��N�Cr���\��'����� ��T�{�U��Z�]I'�Z����v��N9�~�N� w���Un����dW��~`�����{�'������e�}��u�=▓��N�E<ث�k���+.m;WW���Iv%=��WW�Ox�I:�G�e?s\��x��cq��zV����O��5�x�N�����{u7�]I��^��DŽ'���x�~���������G�kW��|k�Y����u�yL��Od^��c�֑�Jz�xx��Ny�x� �l��!�xL:���]/��Ig�'{�����L���]��m�ՙ��?p~“]������4N��]Q����)x�I:�q �'��]I��Ix�I:��
�4O������ܠ�� <橅'��s��J��'�'���)�(�C�pfW�ٿ�ss���v��=N�{��;⾗�t�������Lx�I:w����������w�]Iϔ�����uy5<�;��q{fW�i����#�p�U��.�2�/�����3��� �Oܬz���n��ŵ+�4�Q?���?ؕt�﨟8 ��w��x�+���NjX�<�}z�?�׃{�g���wX(<��G���y“��e�������v�����%ϵN��}+#?�� O<��:�.��v�O�W��?ٕt�����'<����n]���ã~��߱P��4ٕt2��N�� O<I'���7���Uo�o��dW�C\������u���s����08yC���׮�������������;�^u2��o�q�5/��Xf��|q�J:��ӿ� O<I���ֿ��ߺ��K�v��r�c�c\R_ă�I��w[���X�]���dWҳ������'<�$��#�o?3ި�qk׮�g����4]��'�4ި�qn�W�q3ٕ���i|Lx�I:���7�:ߪ�[+�׮��������=_��'�<�z�q>6�Q'!�#ٕ����� ����K=.�'�[��u�*]��i�չ9@��d׋��o�Y����Կu��g��Uo�+���< O<�����=�*�<{�����z���4N����9ׅ�<ٕ�L���'��s���O�ԙ]Q��w�̿�L�ԙ]I�4����}>�yx�+�����?� �{�·y�Gj?蔧�ٕt�����'��'���:�w^���?C�y�gʇ����x��!���|��ø�z���L��������/;�1/��}O�|G�q�y����Uo�O��]I������\���3�'�c��~�l��{5U�zn��Jz�����%���~���������C���h�q��,��Ny�?yF������~f>�<�~���/�y�߽S����B���'�#��O�����`x�x��OU�m?|��c��:���c�s��+�q_�v��o�\㉧��xs\�N����~����%9�����k������x��Cx�/�x�����������_���d�.�dW��~H�S'�����þG����d�j�ݧ�~Q���kMv%��k-�k\��6��'������}��>���]I�����e�^��'�t_��b�}��_�ɮ��}���hO���O<���ٿ��v���o�Iv%=��'�^ƃ��
�<�{]��]��g��;n�]I�qE?;>y�����:���s�����]I��^��DŽ'���x�~��������]Q������]~ x�I:����<g^�����Hv���J��#.O�O���l��?�d�$�^D<�N�ētNv�3���W�c���^�3�xy�����\���~U������ɮ�g�o��%��?��3���i\��w�O�?�+�����g�A���>L�I:��_�W��~ߟ�����i>���'{��i�-�C�sfW�3��$<�$�S^�������)�9�3䟜o���@�O�i�c���v������ y���#>��9?�ɮ�����_�7�﨟��ߓ]I���~f|“��u�5e�p� ��Jz�w�O�-�����g��Ǽ�~B<�:�q�,����g>�z�]I�t?^�+�.���vE��{>\�|�_��7T���C�A�<�<����?߉��u��c^���u��~�y����zĵK�Z'�/��N����#�@g{��z����N�P:�{p��~��Я꺥��� O<�}F�S'���v���w���'ٕt2��&<�D��=Y���I��'o�����U���I'��γ�����'���k�X񧿧 N�Q����Pv�Mv%��G\��p�Mx�I:��Jw[�)��;:���l�������������ēt�Wt����:���C�v&��{��G������wĵ���~�M�+�!�N�����Ӿ�P~�ߴ�9T=��V����]I��F��τG���x����{�7�]I��^��DŽ'���x�~��������]Q��k�����a�ꚧO��<�~�9��]��dW�s��K^�tʓ�ēt���_�I���c��E��3�O<I�d<S?)|�;��yv�3�xyV^���G�~U~Xy�:�oIvE=�;?7�^�æqzfW�9���Y�����gv%=S<��<��O�i���e}�h������L�< O�&���[:�������.��$=���z����$<�$����2:�w�I{��� �'<S��x�N��c'<�;[�ZMv%=G<|��ß�ētNy5��;�'o8�+�1���g����Q��̮�s����|G��M�dW�i��~�l�����Ǽ�~B<�|�q�,���ֿ�[$��N��3?��.�x��a���7�ٕtʣ~����'�2�L��y�}�d^�~�#���k�:�G�y�#b���Q�}G�:��A��LayerElementUVIqVersionIe-q
NameSUVMapdqMappingInformationTypeSByPolygonVertex�qReferenceInformationTypeS IndexToDirectS��UVd��x��y���;�(�x DAZ%�5 (�b���LM����h������m�xk�A<�ī0���%G ���GLDQ9<pAY��d��߷U�F��?��7��3�������vx�0c��s����F�5��m\Z~���O�8l8jN9n��ͧZ���6����c���a��?{�G�J{�#�'�x�\��3�x�b�������]0�:�7�n���� �-כN�>O0��3~���1������<8���g�|9�ˣ�Q�d{&X��꽰�.�sIu �v^Y���J�v48> �[��Ouޞ����(O7���υ��s������O���M�u���}����� n�Y3�Qg�y3��{u}"��c6�S~O��X�d�8:��[�P�Ǩ8���F��-{m}u����O;��O�lG��/i���T��&��ޮ� ����r�0n8 ����Ե���M��������*|���#��#q�l������1N����9�j,�� �pu���N�>�@ŭ�����^\��a?V���K�Wǰ�Ч�����U{�v!�G�4ezi���Xc�,��6n.��ohZQ����i�������X��F���Ȏl��<�1x�|ů1��=�,�����7�R���LOp^HV��ahc����YP֏�p�C�\�~ܾ��?��� xny.5\ �[��3`r��V�<�����p�q�n�u��m�z@?�&��s:ž{�b�o$��X���C3a��`K������;���m6�9\�f�����]N��^��̼�3����ΙT�Rpl7�p�`��O��6b��׸|����~�8�t�������o�7�z�<�͛h����~�J\g���P����Z����M/���v�����К���sO��0�a�gʺL?9�<w G���^b];�����W��k��yt=��z�n�� �݉�C�=�?+�8�9���|œb�t{������q�\�g�����>^ޓ���qCܴ�:�7�#`w��@���1�6��.�����?���a�ٓk�+�!.0g�)��Ȗ��|G� ~�WJ��� ��L�)l�Пq��k�.�N��a�<�ӄ�@8��3N�]q���'�N�ޚ�q�9��K��s�8l����L�Yr$����~� ˪��������p��?�{��mw�m��r?}�买�Q�[}~K���w�����m���0�5i������i��sv��i��b�-ُ�����8
r��1��/ ik����r���׻�� �v��]���{0�!.���������v���c��S����]� ��ߖ�^��Z<r �}h�0��-����� ��nGί���|���v����]��H6d5� f�#evS��=ٓ#�g��8>��G���?͉~��NeŁ���D��Ɏ��3�χO��q�Œ/ğX�y�sBG��ОyXm��?8�un_�y�v䡾#����~W��p�d�n�{��9v���:����x���1�D��i���p�����ށ���5Njd]s�+���V��D��|�s^<�޲�nޭ�-��z<w<څ�яo�nv:鹪�L?������5�ۈ{�q��q��˪�^�7���=��_����d=��,���x��(��c��� ĺv\灬�k�* ̋�X��Q��~�/��4�������'M��S<�q����r�.`��8�7�ޡ�K��~�K?����;�C��yҍ�V���_\D?�p=��0���A�������� ���x��[�8Y�-voщEpO�����Q���m�Gk���1�>dr$�o�n�I��� �I;>,qxd��������G�|�*Β�3?�O���s�i��]p���9���a�@=�n?o|����~_X/�:I �$�~�����bn&7n��v�����@���#n���|�(�}��rVX�WU�h/ ��,�������O�m���������̋�c^�
�o{�2� ���VzFz{̟�:�<�䵉�<>�<�l�<^�zu;Ɵо{*����ߢot��w/t,y��O������=�,�$�:�%{ao�g0$.��ӑ�a��x���Nő���:�M��'8�ĺ�L�4�5��$�'������_ �����ҧ���ꇃcG���'�ͺ�=y�e^�?�� J��;���q�e\�� ��v/q�%;�?���­���������<�����;�� ��>�WF��� ��<��y{t�+u�(�$��Y�f<aiw��:�����g\ɺ����
�|�ށ|(ݴ�^��nE�����v(��v�o�X����񫢏���~C��}��k������w-׋��������"�F~^�P���"����D\ᶆ��K=T� �t � �K!��b� �o}��7��D�V��ȸ�]��+U{��6� �z���qD�/�k"�Dx��iU~:��[��G�z�=u�U�S�B��a8o����,���w?�߿ju�{��!������Sb�U��� �F�n�Y룖~>p�f�{��Ϋ�kβr�%�ӎ,ya��N���8���?͡��9�������X��l�z����s�:�c?v�t�|�]�x~�j�3S�϶�+��E��� +��0����r��7z?&<�8"�������� ��z�W��X�8! e�@�����8���'l��f��o��Uwu쉸���g�ֳ!G�֯t�� q^C�5�Y�ɗD��a������7�����Z������H?�?d�N��x��%o����݊w�=��W�k���OE/�|F�T�G����8�9���꾖:��]�~=�i�N mqtٯO=�p;w����{zFi���N,����?�}���5��݉��4�w��8�\�y��'m�:��~��@d�|��긯��OZ�)���_�8�5�ח�9t�G��=�{�vd}�T�6�C�I�q,u����נr����X��'��������B������#�h���n�O���x���퍰z�U�}[,����:���=���m��� ��3�jo�{$�� ;����3.�O�{M-��~�S� �#�>�w���h��xV��> �_ԁ"ٓ�|�c�9D{JNd��i%��S�i�����k���Щ�̢g���O�.��㘟a�D�?מ�u �ytf�%��u<����9�#�����?[ʼn�;Sp�㉬�G�c2�q�?��C��G��ݿY�0�ϾK���OڕL;c��EG��Fd��G㟡o�}1�㾁�n�1N��!n�����g�o�8Mu_C���@�?���0v?}�~Y������K�+N��x�T?H�!y����b���č�zT��0�n! ���;2��*���N�k�y�����t]��S_�np�G]D�Ax�q��Q�?h���C?.�VI�/�G��-�}��H3��=��P��b����9���u���<��A��r\ɋ���]�''����+Ǎ������.��n���v���x ���L��:���p���u,�=^�q叴~ᔞ�w�Α3�S�D���z|G%]������~���q@z����Pqj@�O�.���k�K?觱�]-�~"��M-�a}m�T7h�=�M�7��#�3n˰�"��6�7�v���x>2N�zʼ-�S���j��������&��7�����a��O�Q�SQt_���zRɻN����O���~?1��|�|K�v��:m��n�j�n�~#�������&y������%����q7��{ ��ERzCVl�O������i�K��;q_E� ?�����S�o�;T���}�uX���*�[�-��*^p��I�`t������HN��=�N�z'��W� =`���؏v����y�a����z ���9����ÅsPW����F��^?��g����u�-�d�=�;aO,��c�}�:w�: ˺
5���i�X�U�/�{�W�_�ùKGR�UH^)�E��yֽ��ᯃ�j�Q�3�u4��j}A��l����?jȢ��������7%�d]B�����5�_����P����|�~]T���o��_o���8���X_)ߏ��z��E�����Ɇ��7�/u�¢���ϻ���u��u�IX ���������-�h �jTsX��~�j�Z�N�A�5��~Q�9��#�W /��],�Kδ�v���� 6��w� u@'��aC�}?ݰoY�nXo����n� �����%0�R�>����a�U߿�G-��[w�u{͒��a+��5t{�V���������#+��UVIndexi  x-���UF/�� ����������F�$��ADPBA@@�@�P���㚽����9�̚�_�Ĩ+�DDTgƍ��y """1s#��'en��`�dמ�_���¥`w��L�L��nn�d������9r�X�T\Kz�0����;���<��}�+΃�-����#�' ����9'�� �F2����!�+p��?�yΒ}F�,�p�>#+,����?�M\!�i�Mh�ߣ(�޻O�g��]���>���8�0��ܭE^G.��|ɹ�g��/�hJ�ՙ�-!���P�g�ǯw����}����5�gx��~)��n�J|�w�r�ɹ9ӻ���g����]̅�|FU8��Mg޻m97`� �v%#7��f���g.��e!'d��{��]��w/�r��陙8 '�H�Fg.Q�[�Nn���n9r^fn|f�d�n >;3 �7���ٚ��y�]f~�1�0�� �u�%�%�ܝ<���7�����ח�Bj\�:��m#�%煋P���_�|���\�y *��0��.�Ur>� |�%����tM�m�s�_�܊��0\krG�1��:��3�����<
p^Sȉ��� �$̙����!�d��T�,<�-�(D�7�oĮ�-����j0��iE~L�@��|�qu�Uq����\�|���������srt�L��$� �,8s\� q���yfv{B^�����3��_�y5�>�"y � �*�ÙOqo��&�ӗ�P�'�m!&g];�:1!���.���X�����%�M�{@�
�9/�i� t
�+�y9+�!�c~�~ ,�X���=a6�C���^}�q�p��+�O9O�����*p��w�n�%3��qm�=�so3� ������A
�\��!1�ǜg������e�+`�t�d8����.x�2�,�/�e٥gf��g�dn� g��=��8d���sK8
�q�&���u�ӌܙ�ε�)�s#~7~4�;v�����s�M� ��;���)a ����'��䳸't�3�3��B[��;��`%�T̞�(�{�����U+��t;�1kr~HnǬ����e`D�ͦSV�,�:0_q���'��ȏ�����$�g����B�|�w��3����|j�텫�,�̉[e�0�X���O ��񿓧2�@o�H|yf%�w��� e�3��G܁��g����tu��<8��S�37�N��|���̵q�@gn�/:�~�08s^|<Йw2�Μ�t� �Y�g�� :�?p98so|�����߁�� Wt��<�MЙ��w��\��^��:��VЙ�����\�t�e0t� �o�cB^�|t����u�A�08�(|Йo�!Й��2u�+̽�3����y'� μ�!����L'��;��@gN��;8�4|?Й������?��|�yt�G� �3���%Ag>��:s �JЙ�3��!Y�S�������σ;�|��52%Y�/f_�}�����F�a�b�5�8���1��0Ŏ;_�s��?���̗!���k��=1�ϴ�lϹ6{}k�bvw�������;bǝ�={���^���ro$���� 'fw~v�nY?1`v����p�����=�/b�g���\1?�͂B��a�)�Ď� �����3�Ŝ�nT���o‰�]$�0�8{}����A�if!:��qO���ݱ%�T�}�Y��cևoٵ����]0�����������oB�;g{��=1��}� g?�����|_�N~�?�{�3gp��a�ٝ��s9������G��FC:v��n)g��{���~�HOG����}�^������/8����2�g�aw�;b�]I�^v�A'f�C+������K��ݕ'��{}����P�����^�����]W��������^��=��jw��^���ot�M ���W"�0p�=1�����$d��0G�"�*��UݭG��]Vr�ʰ��;���{�9秣ߊ����b�օ]M�b�]A:��E�����;?w��|6��s;��A'�}ozbv���8��쫟0��5����ts1w�Ŝ�}��v�uf���� L;�j������}o����Y�G�-�vv�u�b�3���}ﳣ������l����0zb�~���Վ>^��gڇ�yd;��r�#�8�1=t�I�b�.5|�َ>S�i���sg;�u��<��������s&𷍽���Ks�����7%���1�bW��g;��x1��t�,dG� N�������߬nq��_v�u枘o�󷱷CG�7g1_a�L׎�;^̃�~�;:�W���?��:t����y$� '�s,X�ݭL;�����
v���BG�/f���]���s��\���]�llG���W0���gi;�z��\�n�go;�_8��8��%���䎘;C3�evv����w�n������������~w��/`�~W���w�9����� t�k8�y3#���}��3AZ��w;��81�]vr!���81g�Hr>���W ���A�����;�1`W��$t���b�u�w����81�aW�ܜ�}߀��[�C�wo���!����lG5`��nL$�q7���G�c�~m���u]�<��o#�p���A�㎘{�ې��ϒ���bn �� �q�19��{�1t�0gq�/�,���d�w~;�xL1'K��y����M5-97yf��'vҁ����lG_�����u+�!َ�K��y�R�����o��\.⛃�I��/�+���kF�w�+�5�=�w��'���.⇇��1g1_b������'pO�����;��81�� %' �4���Ar���g ���G���LL�]&�\b���||�����,������x1�}��`G'����d󳣯IO�%����*����Y�G�m�َ�:w����D��oy��qbn �����o���ߊ�ot���w����烻؜���9���f�[���bv�r���������]NЯÉ�]fr>vv���Ĝ�-<w4~{b�3`���ǘ�����;b�î�A��������[���ofw�M���^�E����#�qב�����*�W�v��q�5�x�lG?�������'} zbv&s����s,������s̱ >�-L;�ޠ'�����r�������2 C�} zb.���
��}]s~|�f���:�Ϲ��#WǗ �'o"#7����;���;h��� �qׂ܍9���������~t��.�?�.�qw����Ľ��w����}�\\������9�8�̤��R�w� /y.��½U`�]*rf~���i������NF�Z�z��.#9/�"� �7��q����NEf)��;�v�3��\�-�����:���kB����`��|+�� w��k`fך|��� 츻��Ϲ���8�v܍�I�ϸ���DN̜��y6�;��LvI��#� ��w��)��X�>;��1��S�?&g�� ��[���9�Nv�g���e̒P��>'���A8vE���[��g�c�`��|�@�+�縣`�]irUvu�_��B ���<�����������H�:��G�X��������q������g�A�����v����Й��v���.:3s<>5� w�ag;陫��)�q���;t֒�B9��.�VӵS���v����5t6�*C5��./�Sd;ՙ�U;����s>͹2�{�W����Ж|~ �E��qW�g��܎}O揸[`��m|���'|f?���t�sWr&:":�q�~*�Ā(���w#�0�9�st�b��w�!+y|L~�^������G!3�����k�w������y�����?T��LayerI��VersionId(� LayerElement��TypeSLayerElementNormal�
TypedIndexI�� LayerElemente�TypeSLayerElementUV��
TypedIndexI|�.ModelLZ��/SCircle Arena.002ModelSMesh�VersionI��� Properties70v�NPSLcl TranslationSLcl TranslationSSAD Y�D��Vy@D��/����8PSDefaultAttributeIndexSintSIntegerSI��(PS InheritTypeSenumSSI�
MultiLayerI6� MultiTakeIL�ShadingCo�CullingS
CullingOff�� ConnectionsȔCSOOLZ��/L��CSOOL툒LZ��/4�Takes'�CurrentS��� ���f�v���&~��Z�j���~�� �u�)

247
Project/Assets/ML-Agents/Examples/Sorter/Meshes/ArenaWalls.fbx.meta


fileFormatVersion: 2
guid: f6ecb5c7cab484e639c060714bfd6d51
ModelImporter:
serializedVersion: 23
fileIDToRecycleName:
100000: //RootNode
100002: BezierCurve
100004: Camera
100006: Circle
100008: Circle Arena
100010: Circle Arena.001
100012: Circle Arena.002
100014: Circle.001
100016: Circle.002
100018: Circle.003
100020: Circle.004
100022: Cone
100024: Cone.002
100026: Cone.003
100028: Cone.004
100030: Cone.005
100032: Icosphere
100034: Icosphere.001
100036: Icosphere.002
100038: Icosphere.003
100040: Icosphere.004
100042: Light
100044: Mball
100046: Roundcube.001
100048: Roundcube.002
100050: Roundcube.003
100052: SantaHat
100054: Shield
100056: Shield.001
100058: Text
100060: Tree
400000: //RootNode
400002: BezierCurve
400004: Camera
400006: Circle
400008: Circle Arena
400010: Circle Arena.001
400012: Circle Arena.002
400014: Circle.001
400016: Circle.002
400018: Circle.003
400020: Circle.004
400022: Cone
400024: Cone.002
400026: Cone.003
400028: Cone.004
400030: Cone.005
400032: Icosphere
400034: Icosphere.001
400036: Icosphere.002
400038: Icosphere.003
400040: Icosphere.004
400042: Light
400044: Mball
400046: Roundcube.001
400048: Roundcube.002
400050: Roundcube.003
400052: SantaHat
400054: Shield
400056: Shield.001
400058: Text
400060: Tree
2000000: Camera
2100000: No Name
2100002: Red
2100004: White
2100006: green
2100008: Brown
2300000: //RootNode
2300002: BezierCurve
2300004: Circle
2300006: Circle Arena
2300008: Circle Arena.001
2300010: Circle Arena.002
2300012: Circle.001
2300014: Circle.002
2300016: Circle.003
2300018: Circle.004
2300020: Cone
2300022: Cone.002
2300024: Cone.003
2300026: Cone.004
2300028: Cone.005
2300030: Icosphere
2300032: Icosphere.001
2300034: Icosphere.002
2300036: Icosphere.003
2300038: Icosphere.004
2300040: Mball
2300042: Roundcube.001
2300044: Roundcube.002
2300046: Roundcube.003
2300048: SantaHat
2300050: Shield
2300052: Shield.001
2300054: Text
2300056: Tree
3300000: //RootNode
3300002: BezierCurve
3300004: Circle
3300006: Circle Arena
3300008: Circle Arena.001
3300010: Circle Arena.002
3300012: Circle.001
3300014: Circle.002
3300016: Circle.003
3300018: Circle.004
3300020: Cone
3300022: Cone.002
3300024: Cone.003
3300026: Cone.004
3300028: Cone.005
3300030: Icosphere
3300032: Icosphere.001
3300034: Icosphere.002
3300036: Icosphere.003
3300038: Icosphere.004
3300040: Mball
3300042: Roundcube.001
3300044: Roundcube.002
3300046: Roundcube.003
3300048: SantaHat
3300050: Shield
3300052: Shield.001
3300054: Text
3300056: Tree
4300000: Circle Arena.002
4300002: Shield
4300004: Roundcube.001
4300006: Roundcube.002
4300008: Shield.001
4300010: Circle Arena
4300012: Text
4300014: Circle Arena.001
4300016: Cone
4300018: SantaHat
4300020: Cone.002
4300022: Cone.003
4300024: Cone.004
4300026: Cone.005
4300028: Roundcube.003
4300030: Icosphere
4300032: Mball
4300034: BezierCurve
4300036: Circle
4300038: Circle.001
4300040: Icosphere.001
4300042: Icosphere.002
4300044: Circle.002
4300046: Circle.003
4300048: Circle.004
4300050: Tree
4300052: Icosphere.003
4300054: Icosphere.004
10800000: Light
2186277476908879412: ImportLogs
externalObjects: {}
materials:
importMaterials: 1
materialName: 0
materialSearch: 1
materialLocation: 1
animations:
legacyGenerateAnimations: 4
bakeSimulation: 0
resampleCurves: 1
optimizeGameObjects: 0
motionNodeName:
rigImportErrors:
rigImportWarnings:
animationImportErrors:
animationImportWarnings:
animationRetargetingWarnings:
animationDoRetargetingWarnings: 0
importAnimatedCustomProperties: 0
importConstraints: 0
animationCompression: 1
animationRotationError: 0.5
animationPositionError: 0.5
animationScaleError: 0.5
animationWrapMode: 0
extraExposedTransformPaths: []
extraUserProperties: []
clipAnimations: []
isReadable: 1
meshes:
lODScreenPercentages: []
globalScale: 1
meshCompression: 0
addColliders: 0
useSRGBMaterialColor: 1
importVisibility: 1
importBlendShapes: 1
importCameras: 1
importLights: 1
swapUVChannels: 0
generateSecondaryUV: 0
useFileUnits: 1
optimizeMeshForGPU: 1
keepQuads: 0
weldVertices: 1
preserveHierarchy: 0
indexFormat: 0
secondaryUVAngleDistortion: 8
secondaryUVAreaDistortion: 15.000001
secondaryUVHardAngle: 88
secondaryUVPackMargin: 4
useFileScale: 1
previousCalculatedGlobalScale: 1
hasPreviousCalculatedGlobalScale: 0
tangentSpace:
normalSmoothAngle: 60
normalImportMode: 0
tangentImportMode: 3
normalCalculationMode: 4
legacyComputeAllNormalsFromSmoothingGroupsWhenMeshHasBlendShapes: 0
blendShapeNormalImportMode: 1
normalSmoothingSource: 0
importAnimation: 1
copyAvatar: 0
humanDescription:
serializedVersion: 2
human: []
skeleton: []
armTwist: 0.5
foreArmTwist: 0.5
upperLegTwist: 0.5
legTwist: 0.5
armStretch: 0.05
legStretch: 0.05
feetSpacing: 0
rootMotionBoneName:
hasTranslationDoF: 0
hasExtraRoot: 0
skeletonHasParents: 1
lastHumanDescriptionAvatarSource: {instanceID: 0}
animationType: 0
humanoidOversampling: 1
additionalBone: 0
userData:
assetBundleName:
assetBundleVariant:

8
Project/Assets/ML-Agents/Examples/Sorter/Prefabs.meta


fileFormatVersion: 2
guid: af7cee3bddc2e4ed595824b3c6d542b6
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

8
Project/Assets/ML-Agents/Examples/Sorter/Scenes.meta


fileFormatVersion: 2
guid: 4c130af3da8f146a795356f021688b89
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

8
Project/Assets/ML-Agents/Examples/Sorter/Scripts.meta


fileFormatVersion: 2
guid: 7e13230b4597f444eb241e0309a786b4
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

8
Project/Assets/ML-Agents/Examples/Sorter/TFModels.meta


fileFormatVersion: 2
guid: 03d978c974e06423baa61d796282ad7b
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

15
Project/Assets/ML-Agents/Examples/Sorter/TFModels/Sorter.onnx.meta


fileFormatVersion: 2
guid: a2b17bcb0df4d448893e800c34d87c4c
ScriptedImporter:
fileIDToRecycleName:
11400000: main obj
11400002: model data
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:
script: {fileID: 11500000, guid: 683b6cb6d0a474744822c888b46772c9, type: 3}
optimizeModel: 1
forceArbitraryBatchSize: 1
treatErrorsAsWarnings: 0
importMode: 1

1001
Project/Assets/ML-Agents/Examples/Sorter/TFModels/Sorter.onnx
文件差异内容过多而无法显示
查看文件

7
Project/Assets/ML-Agents/Examples/Sorter/Prefabs/Area.prefab.meta


fileFormatVersion: 2
guid: 1cf3ef0a332884c299335f3fbe8a21fc
PrefabImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

1001
Project/Assets/ML-Agents/Examples/Sorter/Prefabs/Area.prefab
文件差异内容过多而无法显示
查看文件

11
Project/Assets/ML-Agents/Examples/Sorter/Scripts/SorterAgent.cs.meta


fileFormatVersion: 2
guid: 107ccb3d53379468eb5ba1b7ac443919
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

11
Project/Assets/ML-Agents/Examples/Sorter/Scripts/NumberTile.cs.meta


fileFormatVersion: 2
guid: 03b2e6d9493cc4a92acf7f3b8b438aa4
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

34
Project/Assets/ML-Agents/Examples/Sorter/Scripts/NumberTile.cs


using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class NumberTile : MonoBehaviour
{
public int NumberValue;
public Material DefaultMaterial;
public Material SuccessMaterial;
private bool m_Visited = false;
private MeshRenderer m_Renderer;
public bool IsVisited
{
get { return m_Visited; }
}
public void VisitTile()
{
m_Renderer.sharedMaterial = SuccessMaterial;
m_Visited = true;
}
public void ResetTile()
{
if (m_Renderer is null)
{
m_Renderer = GetComponentInChildren<MeshRenderer>();
}
m_Renderer.sharedMaterial = DefaultMaterial;
m_Visited = false;
}
}

273
Project/Assets/ML-Agents/Examples/Sorter/Scripts/SorterAgent.cs


using System.Collections.Generic;
using UnityEngine;
using Unity.MLAgents;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Sensors;
using Random = UnityEngine.Random;
public class SorterAgent : Agent
{
[Range(1, 20)]
public int DefaultMaxNumTiles;
private const int k_HighestTileValue = 20;
int m_NumberOfTilesToSpawn;
int m_MaxNumberOfTiles;
PushBlockSettings m_PushBlockSettings;
Rigidbody m_AgentRb;
// The BufferSensorComponent is the Sensor that allows the Agent to observe
// a variable number of items (here, numbered tiles)
BufferSensorComponent m_BufferSensor;
public List<NumberTile> NumberTilesList = new List<NumberTile>();
private List<NumberTile> CurrentlyVisibleTilesList = new List<NumberTile>();
private List<Transform> AlreadyTouchedList = new List<Transform>();
private List<int> m_UsedPositionsList = new List<int>();
private Vector3 m_StartingPos;
GameObject m_Area;
EnvironmentParameters m_ResetParams;
private int m_NextExpectedTileIndex;
public override void Initialize()
{
m_Area = transform.parent.gameObject;
m_MaxNumberOfTiles = k_HighestTileValue;
m_ResetParams = Academy.Instance.EnvironmentParameters;
m_BufferSensor = GetComponent<BufferSensorComponent>();
m_PushBlockSettings = FindObjectOfType<PushBlockSettings>();
m_AgentRb = GetComponent<Rigidbody>();
m_StartingPos = transform.position;
}
public override void OnEpisodeBegin()
{
m_MaxNumberOfTiles = (int)m_ResetParams.GetWithDefault("num_tiles", DefaultMaxNumTiles);
m_NumberOfTilesToSpawn = Random.Range(1, m_MaxNumberOfTiles + 1);
SelectTilesToShow();
SetTilePositions();
transform.position = m_StartingPos;
m_AgentRb.velocity = Vector3.zero;
m_AgentRb.angularVelocity = Vector3.zero;
}
public override void CollectObservations(VectorSensor sensor)
{
sensor.AddObservation((transform.position.x - m_Area.transform.position.x) / 20f);
sensor.AddObservation((transform.position.z - m_Area.transform.position.z) / 20f);
sensor.AddObservation(transform.forward.x);
sensor.AddObservation(transform.forward.z);
foreach (var item in CurrentlyVisibleTilesList)
{
// Each observation / tile in the BufferSensor will have 22 values
// The first 20 are one hot encoding of the value of the tile
// The 21st and 22nd are the position of the tile relative to the agent
// The 23rd is a boolean : 1 if the tile was visited already and 0 otherwise
float[] listObservation = new float[k_HighestTileValue + 3];
listObservation[item.NumberValue] = 1.0f;
var tileTransform = item.transform.GetChild(1);
listObservation[k_HighestTileValue] = (tileTransform.position.x - transform.position.x) / 20f;
listObservation[k_HighestTileValue + 1] = (tileTransform.position.z - transform.position.z) / 20f;
listObservation[k_HighestTileValue + 2] = item.IsVisited ? 1.0f : 0.0f;
// Here, the observation for the tile is added to the BufferSensor
m_BufferSensor.AppendObservation(listObservation);
};
}
private void OnCollisionEnter(Collision col)
{
if (!col.gameObject.CompareTag("tile"))
{
return;
}
if (AlreadyTouchedList.Contains(col.transform))
{
return;
}
if (col.transform.parent != CurrentlyVisibleTilesList[m_NextExpectedTileIndex].transform)
{
// The Agent Failed
AddReward(-1);
EndEpisode();
}
else
{
// The Agent Succeeded
AddReward(1);
var tile = col.gameObject.GetComponentInParent<NumberTile>();
tile.VisitTile();
m_NextExpectedTileIndex++;
AlreadyTouchedList.Add(col.transform);
//We got all of them. Can reset now.
if (m_NextExpectedTileIndex == m_NumberOfTilesToSpawn)
{
EndEpisode();
}
}
}
void SetTilePositions()
{
m_UsedPositionsList.Clear();
//Disable all. We will enable the ones selected
foreach (var item in NumberTilesList)
{
item.ResetTile();
item.gameObject.SetActive(false);
}
foreach (var item in CurrentlyVisibleTilesList)
{
//Select a rnd spawnAngle
bool posChosen = false;
int rndPosIndx = 0;
while (!posChosen)
{
rndPosIndx = Random.Range(0, k_HighestTileValue);
if (!m_UsedPositionsList.Contains(rndPosIndx))
{
m_UsedPositionsList.Add(rndPosIndx);
posChosen = true;
}
}
item.transform.localRotation = Quaternion.Euler(0, rndPosIndx * (360f / k_HighestTileValue), 0);
item.gameObject.SetActive(true);
}
}
void SelectTilesToShow()
{
CurrentlyVisibleTilesList.Clear();
AlreadyTouchedList.Clear();
int numLeft = m_NumberOfTilesToSpawn;
while (numLeft > 0)
{
int rndInt = Random.Range(0, k_HighestTileValue);
var tmp = NumberTilesList[rndInt];
if (!CurrentlyVisibleTilesList.Contains(tmp))
{
CurrentlyVisibleTilesList.Add(tmp);
numLeft--;
}
}
//Sort Ascending
CurrentlyVisibleTilesList.Sort((x, y) => x.NumberValue.CompareTo(y.NumberValue));
m_NextExpectedTileIndex = 0;
}
/// <summary>
/// Moves the agent according to the selected action.
/// </summary>
public void MoveAgent(ActionSegment<int> act)
{
var dirToGo = Vector3.zero;
var rotateDir = Vector3.zero;
var forwardAxis = act[0];
var rightAxis = act[1];
var rotateAxis = act[2];
switch (forwardAxis)
{
case 1:
dirToGo = transform.forward * 1f;
break;
case 2:
dirToGo = transform.forward * -1f;
break;
}
switch (rightAxis)
{
case 1:
dirToGo = transform.right * 1f;
break;
case 2:
dirToGo = transform.right * -1f;
break;
}
switch (rotateAxis)
{
case 1:
rotateDir = transform.up * -1f;
break;
case 2:
rotateDir = transform.up * 1f;
break;
}
transform.Rotate(rotateDir, Time.deltaTime * 200f);
m_AgentRb.AddForce(dirToGo * m_PushBlockSettings.agentRunSpeed,
ForceMode.VelocityChange);
}
/// <summary>
/// Called every step of the engine. Here the agent takes an action.
/// </summary>
public override void OnActionReceived(ActionBuffers actionBuffers)
{
// Move the agent using the action.
MoveAgent(actionBuffers.DiscreteActions);
// Penalty given each step to encourage agent to finish task quickly.
AddReward(-1f / MaxStep);
}
public override void Heuristic(in ActionBuffers actionsOut)
{
var discreteActionsOut = actionsOut.DiscreteActions;
discreteActionsOut.Clear();
//forward
if (Input.GetKey(KeyCode.W))
{
discreteActionsOut[0] = 1;
}
if (Input.GetKey(KeyCode.S))
{
discreteActionsOut[0] = 2;
}
//rotate
if (Input.GetKey(KeyCode.A))
{
discreteActionsOut[2] = 1;
}
if (Input.GetKey(KeyCode.D))
{
discreteActionsOut[2] = 2;
}
//right
if (Input.GetKey(KeyCode.E))
{
discreteActionsOut[1] = 1;
}
if (Input.GetKey(KeyCode.Q))
{
discreteActionsOut[1] = 2;
}
}
}

9
Project/Assets/ML-Agents/Examples/Sorter/Scenes/Sorter.unity.meta


fileFormatVersion: 2
guid: f23c5f09c95ad48768a41974a2f1523c
timeCreated: 1506808980
licenseType: Pro
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

1001
Project/Assets/ML-Agents/Examples/Sorter/Scenes/Sorter.unity
文件差异内容过多而无法显示
查看文件

正在加载...
取消
保存