浏览代码

Merge branch 'master' into hh/develop/loco-crawler-variable-speed

/hh-32-observation-crawler
HH 4 年前
当前提交
a9d9ea4c
共有 42 个文件被更改,包括 534 次插入245 次删除
  1. 10
      DevProject/Packages/manifest.json
  2. 4
      DevProject/ProjectSettings/ProjectVersion.txt
  3. 60
      Project/Assets/ML-Agents/Examples/Basic/Prefabs/Basic.prefab
  4. 53
      Project/Assets/ML-Agents/Examples/Basic/Scenes/Basic.unity
  5. 23
      Project/Assets/ML-Agents/Examples/Basic/Scripts/BasicController.cs
  6. 15
      com.unity.ml-agents/CHANGELOG.md
  7. 14
      com.unity.ml-agents/Editor/BehaviorParametersEditor.cs
  8. 7
      com.unity.ml-agents/Runtime/Academy.cs
  9. 17
      com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs
  10. 9
      com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs
  11. 3
      com.unity.ml-agents/Runtime/Actuators/ActuatorDiscreteActionMask.cs
  12. 49
      com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs
  13. 16
      com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs
  14. 2
      com.unity.ml-agents/Runtime/Actuators/IActuator.cs
  15. 10
      com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs
  16. 25
      com.unity.ml-agents/Runtime/Agent.cs
  17. 11
      com.unity.ml-agents/Runtime/Agent.deprecated.cs
  18. 34
      com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs
  19. 5
      com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs
  20. 27
      com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs
  21. 16
      com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs
  22. 8
      com.unity.ml-agents/Runtime/Demonstrations/DemonstrationWriter.cs
  23. 3
      com.unity.ml-agents/Runtime/DiscreteActionMasker.cs
  24. 90
      com.unity.ml-agents/Runtime/Inference/BarracudaModelParamLoader.cs
  25. 10
      com.unity.ml-agents/Runtime/Inference/ModelRunner.cs
  26. 13
      com.unity.ml-agents/Runtime/Inference/TensorApplier.cs
  27. 7
      com.unity.ml-agents/Runtime/Policies/BarracudaPolicy.cs
  28. 31
      com.unity.ml-agents/Runtime/Policies/BehaviorParameters.cs
  29. 4
      com.unity.ml-agents/Runtime/Policies/HeuristicPolicy.cs
  30. 9
      com.unity.ml-agents/Runtime/Policies/RemotePolicy.cs
  31. 2
      com.unity.ml-agents/Tests/Editor/Actuators/ActuatorManagerTests.cs
  32. 4
      com.unity.ml-agents/Tests/Editor/Actuators/TestActuator.cs
  33. 3
      com.unity.ml-agents/Tests/Editor/BehaviorParameterTests.cs
  34. 18
      com.unity.ml-agents/Tests/Editor/Communicator/GrpcExtensionsTests.cs
  35. 5
      com.unity.ml-agents/Tests/Editor/EditModeTestInternalBrainTensorApplier.cs
  36. 31
      com.unity.ml-agents/Tests/Editor/ModelRunnerTest.cs
  37. 33
      com.unity.ml-agents/Tests/Editor/ParameterLoaderTest.cs
  38. 6
      com.unity.ml-agents/package.json
  39. 2
      config/ppo/StrikersVsGoalie.yaml
  40. 2
      test_requirements.txt
  41. 85
      Project/Assets/ML-Agents/Examples/Basic/Scripts/BasicActuatorComponent.cs
  42. 3
      Project/Assets/ML-Agents/Examples/Basic/Scripts/BasicActuatorComponent.cs.meta

10
DevProject/Packages/manifest.json


"dependencies": {
"com.unity.2d.sprite": "1.0.0",
"com.unity.2d.tilemap": "1.0.0",
"com.unity.ads": "3.4.4",
"com.unity.ads": "3.4.7",
"com.unity.ide.vscode": "1.1.4",
"com.unity.ide.vscode": "1.2.1",
"com.unity.package-validation-suite": "0.11.0-preview",
"com.unity.package-validation-suite": "0.14.0-preview",
"com.unity.test-framework": "1.1.13",
"com.unity.test-framework": "1.1.14",
"com.unity.xr.legacyinputhelpers": "1.3.11",
"com.unity.xr.legacyinputhelpers": "2.1.4",
"com.unity.modules.ai": "1.0.0",
"com.unity.modules.androidjni": "1.0.0",
"com.unity.modules.animation": "1.0.0",

4
DevProject/ProjectSettings/ProjectVersion.txt


m_EditorVersion: 2019.3.11f1
m_EditorVersionWithRevision: 2019.3.11f1 (ceef2d848e70)
m_EditorVersion: 2019.4.7f1
m_EditorVersionWithRevision: 2019.4.7f1 (e992b1a16e65)

60
Project/Assets/ML-Agents/Examples/Basic/Prefabs/Basic.prefab


- component: {fileID: 114502619508238574}
- component: {fileID: 114827551040495112}
- component: {fileID: 6790639061807687247}
- component: {fileID: 748290217173149218}
- component: {fileID: 4562193454555387360}
- component: {fileID: 2066777822704547994}
m_Layer: 0
m_Name: BasicAgent
m_TagString: Untagged

m_Name:
m_EditorClassIdentifier:
m_BrainParameters:
vectorObservationSize: 20
numStackedVectorObservations: 1
vectorActionSize: 03000000
vectorActionDescriptions: []
vectorActionSpaceType: 0
VectorObservationSize: 0
NumStackedVectorObservations: 1
VectorActionSize:
VectorActionDescriptions: []
VectorActionSpaceType: 0
m_Model: {fileID: 11400000, guid: 468c183196f1844f69e125c99dd135a1, type: 3}
m_InferenceDevice: 0
m_BehaviorType: 0

m_UseChildActuators: 1
m_ObservableAttributeHandling: 0
--- !u!114 &114827551040495112
MonoBehaviour:
m_ObjectHideFlags: 0

m_Name:
m_EditorClassIdentifier:
timeBetweenDecisionsAtInference: 0.15
m_Position: 0
position: 0
largeGoal: {fileID: 1445405232822318}
smallGoal: {fileID: 1054019269138802}
--- !u!114 &6790639061807687247

m_Script: {fileID: 11500000, guid: 3a6da8f78a394c6ab027688eab81e04d, type: 3}
m_Name:
m_EditorClassIdentifier:
debugCommandLineOverride:
--- !u!114 &748290217173149218
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1263463520136984}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 88b6042bc9a5d4aa58d931eae49442e5, type: 3}
m_Name:
m_EditorClassIdentifier:
agentParameters:
maxStep: 0
hasUpgradedFromAgentParameters: 1
MaxStep: 0
--- !u!114 &4562193454555387360
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1263463520136984}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 4ce4e199dabb494e8764b09f4c378098, type: 3}
m_Name:
m_EditorClassIdentifier:
basicController: {fileID: 114827551040495112}
--- !u!114 &2066777822704547994
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1263463520136984}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 6ee410d6d45349218d5e69bb2a347c63, type: 3}
m_Name:
m_EditorClassIdentifier:
basicController: {fileID: 114827551040495112}
--- !u!1 &1284220331185348
GameObject:
m_ObjectHideFlags: 0

53
Project/Assets/ML-Agents/Examples/Basic/Scenes/Basic.unity


objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: 5889392e3f05b448a8a06c5def6c2dec, type: 3}
--- !u!1 &1335907378 stripped
GameObject:
m_CorrespondingSourceObject: {fileID: 1263463520136984, guid: c5eb289873aca4f5a8cc59c7464ab7c1,
type: 3}
m_PrefabInstance: {fileID: 1783603361}
m_PrefabAsset: {fileID: 0}
--- !u!114 &1335907380
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1335907378}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 88b6042bc9a5d4aa58d931eae49442e5, type: 3}
m_Name:
m_EditorClassIdentifier:
agentParameters:
maxStep: 0
hasUpgradedFromAgentParameters: 1
maxStep: 0
--- !u!114 &1335907381 stripped
MonoBehaviour:
m_CorrespondingSourceObject: {fileID: 114827551040495112, guid: c5eb289873aca4f5a8cc59c7464ab7c1,
type: 3}
m_PrefabInstance: {fileID: 1783603361}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1335907378}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 624480a72e46148118ab2e2d89b537de, type: 3}
m_Name:
m_EditorClassIdentifier:
--- !u!114 &1335907384
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 1335907378}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 6ee410d6d45349218d5e69bb2a347c63, type: 3}
m_Name:
m_EditorClassIdentifier:
basicController: {fileID: 1335907381}
--- !u!1001 &1502457254
PrefabInstance:
m_ObjectHideFlags: 0

propertyPath: m_BrainParameters.vectorObservationSize
value: 0
objectReference: {fileID: 0}
- target: {fileID: 114502619508238574, guid: c5eb289873aca4f5a8cc59c7464ab7c1,
type: 3}
propertyPath: m_BrainParameters.VectorObservationSize
value: 0
objectReference: {fileID: 0}
m_RemovedComponents: []
m_SourcePrefab: {fileID: 100100000, guid: c5eb289873aca4f5a8cc59c7464ab7c1, type: 3}
--- !u!1 &1889211226

maximumDeltaTime: 0.33333334
solverIterations: 6
solverVelocityIterations: 1
reuseCollisionCallbacks: 1
--- !u!4 &1889211228
Transform:
m_ObjectHideFlags: 0

23
Project/Assets/ML-Agents/Examples/Basic/Scripts/BasicController.cs


/// <summary>
/// Controls the movement of the GameObject based on the actions received.
/// </summary>
/// <param name="vectorAction"></param>
public void ApplyAction(ActionSegment<int> vectorAction)
/// <param name="direction"></param>
public void MoveDirection(int direction)
var movement = vectorAction[0];
var direction = 0;
switch (movement)
{
case 1:
direction = -1;
break;
case 2:
direction = 1;
break;
}
position += direction;
if (position < k_MinPosition) { position = k_MinPosition; }
if (position > k_MaxPosition) { position = k_MaxPosition; }

}
if (Academy.Instance.IsCommunicatorOn)
{
// Apply the previous step's actions
ApplyAction(m_Agent.GetStoredActionBuffers().DiscreteActions);
m_Agent?.RequestDecision();
}
else

// Apply the previous step's actions
ApplyAction(m_Agent.GetStoredActionBuffers().DiscreteActions);
m_TimeSinceDecision = 0f;
m_Agent?.RequestDecision();
}

15
com.unity.ml-agents/CHANGELOG.md


### Minor Changes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
### Bug Fixes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
## [1.3.0-preview] - 2020-08-12
### Major Changes
#### com.unity.ml-agents (C#)
#### ml-agents / ml-agents-envs / gym-unity (Python)
### Minor Changes
#### com.unity.ml-agents (C#)
- Update Barracuda to 1.0.2.
- Enabled C# formatting using `dotnet-format`.
#### ml-agents / ml-agents-envs / gym-unity (Python)

- The package dependencies were updated to include the built-in packages that are used also. (#4384)
#### ml-agents / ml-agents-envs / gym-unity (Python)
## [1.3.0-preview] - 2020-08-12

14
com.unity.ml-agents/Editor/BehaviorParametersEditor.cs


using System.Collections.Generic;
using UnityEditor;
using Unity.Barracuda;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Policies;
using Unity.MLAgents.Sensors;
using Unity.MLAgents.Sensors.Reflection;

var behaviorParameters = (BehaviorParameters)target;
// Grab the sensor components, since we need them to determine the observation sizes.
// TODO make these methods of BehaviorParameters
SensorComponent[] sensorComponents;
if (behaviorParameters.UseChildSensors)
{

{
sensorComponents = behaviorParameters.GetComponents<SensorComponent>();
}
ActuatorComponent[] actuatorComponents;
if (behaviorParameters.UseChildActuators)
{
actuatorComponents = behaviorParameters.GetComponentsInChildren<ActuatorComponent>();
}
else
{
actuatorComponents = behaviorParameters.GetComponents<ActuatorComponent>();
}
// Get the total size of the sensors generated by ObservableAttributes.

if (brainParameters != null)
{
var failedChecks = Inference.BarracudaModelParamLoader.CheckModel(
barracudaModel, brainParameters, sensorComponents,
barracudaModel, brainParameters, sensorComponents, actuatorComponents,
observableAttributeSensorTotalSize, behaviorParameters.BehaviorType
);
foreach (var check in failedChecks)

7
com.unity.ml-agents/Runtime/Academy.cs


#if UNITY_EDITOR
using UnityEditor;
#endif
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Inference;
using Unity.MLAgents.Policies;
using Unity.MLAgents.SideChannels;

/// NNModel and the InferenceDevice as provided.
/// </summary>
/// <param name="model">The NNModel the ModelRunner must use.</param>
/// <param name="brainParameters">The BrainParameters used to create the ModelRunner.</param>
/// <param name="actionSpec"> Description of the action spaces for the Agent.</param>
NNModel model, BrainParameters brainParameters, InferenceDevice inferenceDevice)
NNModel model, ActionSpec actionSpec, InferenceDevice inferenceDevice)
modelRunner = new ModelRunner(model, brainParameters, inferenceDevice, m_InferenceSeed);
modelRunner = new ModelRunner(model, actionSpec, inferenceDevice, m_InferenceSeed);
m_ModelRunners.Add(modelRunner);
m_InferenceSeed++;
}

17
com.unity.ml-agents/Runtime/Actuators/ActionSpec.cs


/// <param name="branchSizes">The array of branch sizes for the discrete action space. Each index
/// contains the number of actions available for that branch.</param>
/// <returns>An Discrete ActionSpec initialized with the array of branch sizes.</returns>
public static ActionSpec MakeDiscrete(int[] branchSizes)
public static ActionSpec MakeDiscrete(params int[] branchSizes)
{
var numActions = branchSizes.Length;
var actuatorSpace = new ActionSpec(0, numActions, branchSizes);

ActionSpec(int numContinuousActions, int numDiscreteActions, int[] branchSizes = null)
internal ActionSpec(int numContinuousActions, int numDiscreteActions, int[] branchSizes = null)
}
/// <summary>
/// Temporary check that the ActionSpec uses either all continuous or all discrete actions.
/// This should be removed once the trainer supports them.
/// </summary>
/// <exception cref="UnityAgentsException"></exception>
internal void CheckNotHybrid()
{
if (NumContinuousActions > 0 && NumDiscreteActions > 0)
{
throw new UnityAgentsException("ActionSpecs must be all continuous or all discrete.");
}
}
}
}

9
com.unity.ml-agents/Runtime/Actuators/ActuatorComponent.cs


/// Editor components for creating Actuators. Generally an IActuator component should
/// have a corresponding ActuatorComponent.
/// </summary>
internal abstract class ActuatorComponent : MonoBehaviour
public abstract class ActuatorComponent : MonoBehaviour
{
/// <summary>
/// Create the IActuator. This is called by the Agent when it is initialized.

/// <summary>
/// The specification of the Action space for this ActuatorComponent.
/// This must produce the same results as the corresponding IActuator's ActionSpec.
/// </summary>
/// <seealso cref="ActionSpec"/>
public abstract ActionSpec ActionSpec { get; }
}
}

3
com.unity.ml-agents/Runtime/Actuators/ActuatorDiscreteActionMask.cs


/// </summary>
public int CurrentBranchOffset { get; set; }
internal ActuatorDiscreteActionMask(IList<IActuator> actuators, int sumOfDiscreteBranchSizes, int numBranches)
internal ActuatorDiscreteActionMask(IList<IActuator> actuators, int sumOfDiscreteBranchSizes, int numBranches, int[] branchSizes = null)
m_BranchSizes = branchSizes;
}
/// <inheritdoc cref="IDiscreteActionMask.WriteMask"/>

49
com.unity.ml-agents/Runtime/Actuators/ActuatorManager.cs


// An implementation of IDiscreteActionMask that allows for writing to it based on an offset.
ActuatorDiscreteActionMask m_DiscreteActionMask;
ActionSpec m_CombinedActionSpec;
/// <summary>
/// Flag used to check if our IActuators are ready for execution.
/// </summary>

var discreteActions = numDiscreteBranches == 0 ? ActionSegment<int>.Empty : new ActionSegment<int>(new int[numDiscreteBranches]);
StoredActions = new ActionBuffers(continuousActions, discreteActions);
m_DiscreteActionMask = new ActuatorDiscreteActionMask(actuators, sumOfDiscreteBranches, numDiscreteBranches);
m_CombinedActionSpec = CombineActionSpecs(actuators);
m_DiscreteActionMask = new ActuatorDiscreteActionMask(actuators, sumOfDiscreteBranches, numDiscreteBranches, m_CombinedActionSpec.BranchSizes);
}
internal static ActionSpec CombineActionSpecs(IList<IActuator> actuators)
{
int numContinuousActions = 0;
int numDiscreteActions = 0;
foreach (var actuator in actuators)
{
numContinuousActions += actuator.ActionSpec.NumContinuousActions;
numDiscreteActions += actuator.ActionSpec.NumDiscreteActions;
}
int[] combinedBranchSizes;
if (numDiscreteActions == 0)
{
combinedBranchSizes = Array.Empty<int>();
}
else
{
combinedBranchSizes = new int[numDiscreteActions];
var start = 0;
for (var i = 0; i < actuators.Count; i++)
{
var branchSizes = actuators[i].ActionSpec.BranchSizes;
if (branchSizes != null)
{
Array.Copy(branchSizes, 0, combinedBranchSizes, start, branchSizes.Length);
start += branchSizes.Length;
}
}
}
return new ActionSpec(numContinuousActions, numDiscreteActions, combinedBranchSizes);
}
/// <summary>
/// Returns an ActionSpec representing the concatenation of all IActuator's ActionSpecs
/// </summary>
/// <returns></returns>
public ActionSpec GetCombinedActionSpec()
{
ReadyActuatorsForExecution();
return m_CombinedActionSpec;
}
/// <summary>

16
com.unity.ml-agents/Runtime/Actuators/IActionReceiver.cs


/// <seealso cref="IActionReceiver.OnActionReceived"/>
void WriteDiscreteActionMask(IDiscreteActionMask actionMask);
}
/// <summary>
/// Helper methods to be shared by all classes that implement <see cref="IActionReceiver"/>.
/// </summary>
public static class ActionReceiverExtensions
{
/// <summary>
/// Returns the number of discrete branches + the number of continuous actions.
/// </summary>
/// <param name="actionReceiver"></param>
/// <returns></returns>
public static int TotalNumberOfActions(this IActionReceiver actionReceiver)
{
return actionReceiver.ActionSpec.NumContinuousActions + actionReceiver.ActionSpec.NumDiscreteActions;
}
}
}

2
com.unity.ml-agents/Runtime/Actuators/IActuator.cs


/// </summary>
public interface IActuator : IActionReceiver
{
int TotalNumberOfActions { get; }
/// <summary>
/// Gets the name of this IActuator which will be used to sort it.
/// </summary>

10
com.unity.ml-agents/Runtime/Actuators/VectorActuator.cs


Name = name + suffix;
}
/// <inheritdoc />
/// <inheritdoc />
public void OnActionReceived(ActionBuffers actionBuffers)
{
ActionBuffers = actionBuffers;

/// <inheritdoc />
public void WriteDiscreteActionMask(IDiscreteActionMask actionMask)
{
m_ActionReceiver.WriteDiscreteActionMask(actionMask);

/// Returns the number of discrete branches + the number of continuous actions.
/// </summary>
public int TotalNumberOfActions => ActionSpec.NumContinuousActions +
ActionSpec.NumDiscreteActions;
/// <summary>
/// <inheritdoc />
public string Name { get; }
}
}

25
com.unity.ml-agents/Runtime/Agent.cs


Academy.Instance.DecideAction += DecideAction;
Academy.Instance.AgentAct += AgentStep;
Academy.Instance.AgentForceReset += _AgentReset;
m_Brain = m_PolicyFactory.GeneratePolicy(Heuristic);
using (TimerStack.Instance.Scoped("InitializeActuators"))
{
InitializeActuators();
}
m_Brain = m_PolicyFactory.GeneratePolicy(m_ActuatorManager.GetCombinedActionSpec(), Heuristic);
ResetData();
Initialize();

}
using (TimerStack.Instance.Scoped("InitializeActuators"))
{
InitializeActuators();
}
m_Info.storedVectorActions = new float[m_ActuatorManager.TotalNumberOfActions];

return;
}
m_Brain?.Dispose();
m_Brain = m_PolicyFactory.GeneratePolicy(Heuristic);
m_Brain = m_PolicyFactory.GeneratePolicy(m_ActuatorManager.GetCombinedActionSpec(), Heuristic);
}
/// <summary>

}
// Support legacy OnActionReceived
// TODO don't set this up if the sizes are 0?
m_LegacyActionCache = new float[m_VectorActuator.TotalNumberOfActions];
m_LegacyActionCache = new float[m_VectorActuator.TotalNumberOfActions()];
m_ActuatorManager.Add(m_VectorActuator);

/// actions. When using discrete actions, the agent will not perform the masked
/// action.
/// </summary>
/// <param name="actionMasker">
/// The action masker for the agent.
/// <param name="actionMask">
/// The action mask for the agent.
/// </param>
/// <remarks>
/// When using Discrete Control, you can prevent the Agent from using a certain

public virtual void OnEpisodeBegin() { }
/// <summary>
/// Gets the last ActionBuffer for this agent.
/// Gets the most recent ActionBuffer for this agent.
/// <returns>The most recent ActionBuffer for this agent</returns>
public ActionBuffers GetStoredActionBuffers()
{
return m_ActuatorManager.StoredActions;

11
com.unity.ml-agents/Runtime/Agent.deprecated.cs


{
public partial class Agent
{
/// <summary>
/// Deprecated, use <see cref="WriteDiscreteActionMask"/> instead.
/// </summary>
/// <param name="actionMasker"></param>
/// This method passes in a float array that is to be populated with actions. The actions
/// This method passes in a float array that is to be populated with actions.
/// </summary>
/// <param name="actionsOut"></param>
public virtual void Heuristic(float[] actionsOut)

}
/// <summary>
/// Deprecated, use <see cref="OnActionReceived(ActionBuffers)"/> instead.
/// </summary>
/// <param name="vectorAction"></param>
public virtual void OnActionReceived(float[] vectorAction) { }
/// <summary>

{
return m_Info.storedVectorActions;
}
}
}

34
com.unity.ml-agents/Runtime/Communicator/GrpcExtensions.cs


using Unity.MLAgents.CommunicatorObjects;
using UnityEngine;
using System.Runtime.CompilerServices;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Sensors;
using Unity.MLAgents.Demonstrations;
using Unity.MLAgents.Policies;

#region BrainParameters
/// <summary>
/// Converts a Brain into to a Protobuf BrainInfoProto so it can be sent
/// Converts a BrainParameters into to a BrainParametersProto so it can be sent.
/// </summary>
/// <returns>The BrainInfoProto generated.</returns>
/// <param name="bp">The instance of BrainParameter to extend.</param>

{
brainParametersProto.VectorActionDescriptions.AddRange(bp.VectorActionDescriptions);
}
return brainParametersProto;
}
/// <summary>
/// Converts an ActionSpec into to a Protobuf BrainInfoProto so it can be sent.
/// </summary>
/// <returns>The BrainInfoProto generated.</returns>
/// <param name="actionSpec"> Description of the action spaces for the Agent.</param>
/// <param name="name">The name of the brain.</param>
/// <param name="isTraining">Whether or not the Brain is training.</param>
public static BrainParametersProto ToBrainParametersProto(this ActionSpec actionSpec, string name, bool isTraining)
{
actionSpec.CheckNotHybrid();
var brainParametersProto = new BrainParametersProto
{
BrainName = name,
IsTraining = isTraining
};
if (actionSpec.NumContinuousActions > 0)
{
brainParametersProto.VectorActionSize.Add(actionSpec.NumContinuousActions);
brainParametersProto.VectorActionSpaceType = SpaceTypeProto.Continuous;
}
else if (actionSpec.NumDiscreteActions > 0)
{
brainParametersProto.VectorActionSize.AddRange(actionSpec.BranchSizes);
brainParametersProto.VectorActionSpaceType = SpaceTypeProto.Discrete;
}
// TODO handle ActionDescriptions?
return brainParametersProto;
}

5
com.unity.ml-agents/Runtime/Communicator/ICommunicator.cs


using System;
using System.Collections.Generic;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Policies;
using Unity.MLAgents.Sensors;

/// Registers a new Brain to the Communicator.
/// </summary>
/// <param name="name">The name or key uniquely identifying the Brain.</param>
/// <param name="brainParameters">The Parameters for the Brain being registered.</param>
void SubscribeBrain(string name, BrainParameters brainParameters);
/// <param name="actionSpec"> Description of the action spaces for the Agent.</param>
void SubscribeBrain(string name, ActionSpec actionSpec);
/// <summary>
/// Sends the observations of one Agent.

27
com.unity.ml-agents/Runtime/Communicator/RpcCommunicator.cs


using System.Collections.Generic;
using System.Linq;
using UnityEngine;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.CommunicatorObjects;
using Unity.MLAgents.Sensors;
using Unity.MLAgents.Policies;

// Brains that we have sent over the communicator with agents.
HashSet<string> m_SentBrainKeys = new HashSet<string>();
Dictionary<string, BrainParameters> m_UnsentBrainKeys = new Dictionary<string, BrainParameters>();
Dictionary<string, ActionSpec> m_UnsentBrainKeys = new Dictionary<string, ActionSpec>();
#if UNITY_EDITOR || UNITY_STANDALONE_WIN || UNITY_STANDALONE_OSX || UNITY_STANDALONE_LINUX

/// Adds the brain to the list of brains which will be sending information to External.
/// </summary>
/// <param name="brainKey">Brain key.</param>
/// <param name="brainParameters">Brain parameters needed to send to the trainer.</param>
public void SubscribeBrain(string brainKey, BrainParameters brainParameters)
/// <param name="actionSpec"> Description of the action spaces for the Agent.</param>
public void SubscribeBrain(string brainKey, ActionSpec actionSpec)
{
if (m_BehaviorNames.Contains(brainKey))
{

new UnityRLOutputProto.Types.ListAgentInfoProto()
);
CacheBrainParameters(brainKey, brainParameters);
CacheActionSpec(brainKey, actionSpec);
}
void UpdateEnvironmentWithInput(UnityRLInputProto rlInput)

message.RlOutput.SideChannel = ByteString.CopyFrom(messageAggregated);
var input = Exchange(message);
UpdateSentBrainParameters(tempUnityRlInitializationOutput);
UpdateSentActionSpec(tempUnityRlInitializationOutput);
foreach (var k in m_CurrentUnityRlOutput.AgentInfos.Keys)
{

};
}
void CacheBrainParameters(string behaviorName, BrainParameters brainParameters)
void CacheActionSpec(string behaviorName, ActionSpec actionSpec)
{
if (m_SentBrainKeys.Contains(behaviorName))
{

// TODO We should check that if m_unsentBrainKeys has brainKey, it equals brainParameters
m_UnsentBrainKeys[behaviorName] = brainParameters;
// TODO We should check that if m_unsentBrainKeys has brainKey, it equals actionSpec
m_UnsentBrainKeys[behaviorName] = actionSpec;
}
UnityRLInitializationOutputProto GetTempUnityRlInitializationOutput()

{
if (m_CurrentUnityRlOutput.AgentInfos[behaviorName].CalculateSize() > 0)
{
// Only send the BrainParameters if there is a non empty list of
// Only send the actionSpec if there is a non empty list of
// observation when receiving the BrainParameters
// observation when receiving the ActionSpec
var brainParameters = m_UnsentBrainKeys[behaviorName];
output.BrainParameters.Add(brainParameters.ToProto(behaviorName, true));
var actionSpec = m_UnsentBrainKeys[behaviorName];
output.BrainParameters.Add(actionSpec.ToBrainParametersProto(behaviorName, true));
}
}
}

void UpdateSentBrainParameters(UnityRLInitializationOutputProto output)
void UpdateSentActionSpec(UnityRLInitializationOutputProto output)
{
if (output == null)
{

16
com.unity.ml-agents/Runtime/Demonstrations/DemonstrationRecorder.cs


public bool Record;
/// <summary>
/// Number of steps to record. The editor will stop playing when it reaches this threshold.
/// Set to zero to record indefinitely.
/// </summary>
[Tooltip("Number of steps to record. The editor will stop playing when it reaches this threshold. " +
"Set to zero to record indefinitely.")]
public int NumStepsToRecord = 0;
/// <summary>
/// Base demonstration file name. If multiple files are saved, the additional filenames
/// will have a sequence of unique numbers appended.
/// </summary>

if (Record)
{
LazyInitialize();
}
if (NumStepsToRecord > 0 && m_DemoWriter.NumSteps >= NumStepsToRecord)
{
Application.Quit(0);
#if UNITY_EDITOR
UnityEditor.EditorApplication.isPlaying = false;
#endif
}
}

8
com.unity.ml-agents/Runtime/Demonstrations/DemonstrationWriter.cs


}
/// <summary>
/// Number of steps written so far.
/// </summary>
internal int NumSteps
{
get { return m_MetaData.numberSteps; }
}
/// <summary>
/// Writes the initial data to the stream.
/// </summary>
/// <param name="demonstrationName">Base name of the demonstration file(s).</param>

3
com.unity.ml-agents/Runtime/DiscreteActionMasker.cs


m_Delegate.WriteMask(branch, actionIndices);
}
/// <inheritdoc />
/// <inheritdoc />
/// <inheritdoc />
public void ResetMask()
{
m_Delegate.ResetMask();

90
com.unity.ml-agents/Runtime/Inference/BarracudaModelParamLoader.cs


using System.Collections.Generic;
using System.Linq;
using Unity.Barracuda;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Sensors;
using Unity.MLAgents.Policies;

/// The BrainParameters that are used verify the compatibility with the InferenceEngine
/// </param>
/// <param name="sensorComponents">Attached sensor components</param>
/// <param name="actuatorComponents">Attached actuator components</param>
SensorComponent[] sensorComponents, int observableAttributeTotalSize = 0,
SensorComponent[] sensorComponents, ActuatorComponent[] actuatorComponents,
int observableAttributeTotalSize = 0,
BehaviorType behaviorType = BehaviorType.Default)
{
List<string> failedModelChecks = new List<string>();

return failedModelChecks;
}
var modelDiscreteActionSize = isContinuous == ModelActionType.Discrete ? actionSize : 0;
var modelContinuousActionSize = isContinuous == ModelActionType.Continuous ? actionSize : 0;
failedModelChecks.AddRange(
CheckIntScalarPresenceHelper(new Dictionary<string, int>()
{

CheckInputTensorShape(model, brainParameters, sensorComponents, observableAttributeTotalSize)
);
failedModelChecks.AddRange(
CheckOutputTensorShape(model, brainParameters, isContinuous, actionSize)
CheckOutputTensorShape(model, brainParameters, actuatorComponents, isContinuous, modelContinuousActionSize, modelDiscreteActionSize)
);
return failedModelChecks;
}

/// <param name="brainParameters">
/// The BrainParameters that are used verify the compatibility with the InferenceEngine
/// </param>
/// <param name="actuatorComponents">Array of attached actuator components.</param>
/// <param name="modelActionSize">
/// The size of the action output that is expected by the model.
/// <param name="modelContinuousActionSize">
/// The size of the continuous action output that is expected by the model.
/// </param>
/// <param name="modelSumDiscreteBranchSizes">
/// The size of the discrete action output that is expected by the model.
/// </param>
/// <returns>
/// A IEnumerable of string corresponding to the incompatible shapes between model

Model model,
BrainParameters brainParameters,
ActuatorComponent[] actuatorComponents,
int modelActionSize)
int modelContinuousActionSize, int modelSumDiscreteBranchSizes)
{
var failedModelChecks = new List<string>();
if (isContinuous == ModelActionType.Unknown)

"suggest Continuous Control.");
return failedModelChecks;
}
var tensorTester = new Dictionary<string, Func<BrainParameters, TensorShape?, int, string>>();
if (brainParameters.VectorActionSpaceType == SpaceType.Continuous)
var tensorTester = new Dictionary<string, Func<BrainParameters, ActuatorComponent[], TensorShape?, int, int, string>>();
// This will need to change a bit for hybrid action spaces.
if (isContinuous == ModelActionType.Continuous)
{
tensorTester[TensorNames.ActionOutput] = CheckContinuousActionOutputShape;
}

}
Func<BrainParameters, TensorShape?, int, string> tester = tensorTester[name];
var error = tester.Invoke(brainParameters, model.GetShapeByName(name), modelActionSize);
var tester = tensorTester[name];
var error = tester.Invoke(brainParameters, actuatorComponents, model.GetShapeByName(name), modelContinuousActionSize, modelSumDiscreteBranchSizes);
if (error != null)
{
failedModelChecks.Add(error);

/// <param name="brainParameters">
/// The BrainParameters that are used verify the compatibility with the InferenceEngine
/// </param>
/// <param name="actuatorComponents">Array of attached actuator components.</param>
/// <param name="modelActionSize">
/// The size of the action output that is expected by the model.
/// <param name="modelContinuousActionSize">
/// The size of the continuous action output that is expected by the model.
/// </param>
/// <param name="modelSumDiscreteBranchSizes">
/// The size of the discrete action output that is expected by the model.
/// </param>
/// <returns>
/// If the Check failed, returns a string containing information about why the

BrainParameters brainParameters, TensorShape? shape, int modelActionSize)
BrainParameters brainParameters, ActuatorComponent[] actuatorComponents, TensorShape? shape, int modelContinuousActionSize, int modelSumDiscreteBranchSizes)
var bpActionSize = brainParameters.VectorActionSize.Sum();
if (modelActionSize != bpActionSize)
var sumOfDiscreteBranchSizes = 0;
if (brainParameters.VectorActionSpaceType == SpaceType.Discrete)
return "Action Size of the model does not match. The BrainParameters expect " +
$"{bpActionSize} but the model contains {modelActionSize}.";
sumOfDiscreteBranchSizes += brainParameters.VectorActionSize.Sum();
}
foreach (var actuatorComponent in actuatorComponents)
{
var actionSpec = actuatorComponent.ActionSpec;
sumOfDiscreteBranchSizes += actionSpec.SumOfDiscreteBranchSizes;
}
if (modelSumDiscreteBranchSizes != sumOfDiscreteBranchSizes)
{
return "Discrete Action Size of the model does not match. The BrainParameters expect " +
$"{sumOfDiscreteBranchSizes} but the model contains {modelSumDiscreteBranchSizes}.";
}
return null;
}

/// <param name="brainParameters">
/// The BrainParameters that are used verify the compatibility with the InferenceEngine
/// </param>
/// <param name="actuatorComponents">Array of attached actuator components.</param>
/// <param name="modelActionSize">
/// The size of the action output that is expected by the model.
/// <param name="modelContinuousActionSize">
/// The size of the continuous action output that is expected by the model.
/// </param>
/// <param name="modelSumDiscreteBranchSizes">
/// The size of the discrete action output that is expected by the model.
BrainParameters brainParameters, TensorShape? shape, int modelActionSize)
BrainParameters brainParameters, ActuatorComponent[] actuatorComponents, TensorShape? shape, int modelContinuousActionSize, int modelSumDiscreteBranchSizes)
var bpActionSize = brainParameters.VectorActionSize[0];
if (modelActionSize != bpActionSize)
var numContinuousActions = 0;
if (brainParameters.VectorActionSpaceType == SpaceType.Continuous)
return "Action Size of the model does not match. The BrainParameters expect " +
$"{bpActionSize} but the model contains {modelActionSize}.";
numContinuousActions += brainParameters.NumActions;
}
foreach (var actuatorComponent in actuatorComponents)
{
var actionSpec = actuatorComponent.ActionSpec;
numContinuousActions += actionSpec.NumContinuousActions;
}
if (modelContinuousActionSize != numContinuousActions)
{
return "Continuous Action Size of the model does not match. The BrainParameters and ActuatorComponents expect " +
$"{numContinuousActions} but the model contains {modelContinuousActionSize}.";
}
return null;
}

10
com.unity.ml-agents/Runtime/Inference/ModelRunner.cs


using System.Collections.Generic;
using Unity.Barracuda;
using UnityEngine.Profiling;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Policies;
using Unity.MLAgents.Policies;
namespace Unity.MLAgents.Inference
{

/// the agents
/// </summary>
/// <param name="model"> The Barracuda model to load </param>
/// <param name="brainParameters"> The parameters of the Brain used to generate the
/// placeholder tensors </param>
/// <param name="actionSpec"> Description of the action spaces for the Agent.</param>
/// <param name="inferenceDevice"> Inference execution device. CPU is the fastest
/// option for most of ML Agents models. </param>
/// <param name="seed"> The seed that will be used to initialize the RandomNormal

public ModelRunner(
NNModel model,
BrainParameters brainParameters,
ActionSpec actionSpec,
InferenceDevice inferenceDevice = InferenceDevice.CPU,
int seed = 0)
{

m_TensorGenerator = new TensorGenerator(
seed, m_TensorAllocator, m_Memories, barracudaModel);
m_TensorApplier = new TensorApplier(
brainParameters, seed, m_TensorAllocator, m_Memories, barracudaModel);
actionSpec, seed, m_TensorAllocator, m_Memories, barracudaModel);
}
static Dictionary<string, Tensor> PrepareBarracudaInputs(IEnumerable<TensorProxy> infInputs)

13
com.unity.ml-agents/Runtime/Inference/TensorApplier.cs


using System.Collections.Generic;
using Unity.Barracuda;
using Unity.MLAgents.Actuators;
namespace Unity.MLAgents.Inference
{

/// <summary>
/// Returns a new TensorAppliers object.
/// </summary>
/// <param name="bp"> The BrainParameters used to determine what Appliers will be
/// used</param>
/// <param name="actionSpec"> Description of the action spaces for the Agent.</param>
BrainParameters bp,
ActionSpec actionSpec,
if (bp.VectorActionSpaceType == SpaceType.Continuous)
actionSpec.CheckNotHybrid();
if (actionSpec.NumContinuousActions > 0)
{
m_Dict[TensorNames.ActionOutput] = new ContinuousActionOutputApplier();
}

new DiscreteActionOutputApplier(bp.VectorActionSize, seed, allocator);
new DiscreteActionOutputApplier(actionSpec.BranchSizes, seed, allocator);
}
m_Dict[TensorNames.RecurrentOutput] = new MemoryOutputApplier(memories);

7
com.unity.ml-agents/Runtime/Policies/BarracudaPolicy.cs


/// <inheritdoc />
public BarracudaPolicy(
BrainParameters brainParameters,
ActionSpec actionSpec,
var modelRunner = Academy.Instance.GetOrCreateModelRunner(model, brainParameters, inferenceDevice);
var modelRunner = Academy.Instance.GetOrCreateModelRunner(model, actionSpec, inferenceDevice);
m_SpaceType = brainParameters.VectorActionSpaceType;
actionSpec.CheckNotHybrid();
m_SpaceType = actionSpec.NumContinuousActions > 0 ? SpaceType.Continuous : SpaceType.Discrete;
}
/// <inheritdoc />

31
com.unity.ml-agents/Runtime/Policies/BehaviorParameters.cs


using System;
using UnityEngine;
using UnityEngine.Serialization;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Sensors.Reflection;
namespace Unity.MLAgents.Policies

get { return m_BehaviorName + "?team=" + TeamId; }
}
internal IPolicy GeneratePolicy(HeuristicPolicy.ActionGenerator heuristic)
internal IPolicy GeneratePolicy(ActionSpec actionSpec, HeuristicPolicy.ActionGenerator heuristic)
return GenerateHeuristicPolicy(heuristic);
return new HeuristicPolicy(heuristic, actionSpec);
case BehaviorType.InferenceOnly:
{
if (m_Model == null)

"Either assign a model, or change to a different Behavior Type."
);
}
return new BarracudaPolicy(m_BrainParameters, m_Model, m_InferenceDevice);
return new BarracudaPolicy(actionSpec, m_Model, m_InferenceDevice);
return new RemotePolicy(m_BrainParameters, FullyQualifiedBehaviorName);
return new RemotePolicy(actionSpec, FullyQualifiedBehaviorName);
return new BarracudaPolicy(m_BrainParameters, m_Model, m_InferenceDevice);
return new BarracudaPolicy(actionSpec, m_Model, m_InferenceDevice);
return GenerateHeuristicPolicy(heuristic);
return new HeuristicPolicy(heuristic, actionSpec);
return GenerateHeuristicPolicy(heuristic);
}
}
internal IPolicy GenerateHeuristicPolicy(HeuristicPolicy.ActionGenerator heuristic)
{
var numContinuousActions = 0;
var numDiscreteActions = 0;
if (m_BrainParameters.VectorActionSpaceType == SpaceType.Continuous)
{
numContinuousActions = m_BrainParameters.NumActions;
return new HeuristicPolicy(heuristic, actionSpec);
else if (m_BrainParameters.VectorActionSpaceType == SpaceType.Discrete)
{
numDiscreteActions = m_BrainParameters.NumActions;
}
return new HeuristicPolicy(heuristic, numContinuousActions, numDiscreteActions);
}
internal void UpdateAgentPolicy()

4
com.unity.ml-agents/Runtime/Policies/HeuristicPolicy.cs


/// <inheritdoc />
public HeuristicPolicy(ActionGenerator heuristic, int numContinuousActions, int numDiscreteActions)
public HeuristicPolicy(ActionGenerator heuristic, ActionSpec actionSpec)
var numContinuousActions = actionSpec.NumContinuousActions;
var numDiscreteActions = actionSpec.NumDiscreteActions;
var continuousDecision = new ActionSegment<float>(new float[numContinuousActions], 0, numContinuousActions);
var discreteDecision = new ActionSegment<int>(new int[numDiscreteActions], 0, numDiscreteActions);
m_ActionBuffers = new ActionBuffers(continuousDecision, discreteDecision);

9
com.unity.ml-agents/Runtime/Policies/RemotePolicy.cs


/// <inheritdoc />
public RemotePolicy(
BrainParameters brainParameters,
ActionSpec actionSpec,
m_SpaceType = brainParameters.VectorActionSpaceType;
m_Communicator.SubscribeBrain(m_FullyQualifiedBehaviorName, brainParameters);
m_Communicator.SubscribeBrain(m_FullyQualifiedBehaviorName, actionSpec);
actionSpec.CheckNotHybrid();
m_SpaceType = actionSpec.NumContinuousActions > 0 ? SpaceType.Continuous : SpaceType.Discrete;
}
/// <inheritdoc />

{
m_Communicator?.DecideBatch();
var actions = m_Communicator?.GetActions(m_FullyQualifiedBehaviorName, m_AgentId);
// TODO figure out how to handle this with multiple space types.
if (m_SpaceType == SpaceType.Continuous)
{
m_LastActionBuffer = new ActionBuffers(actions, Array.Empty<int>());

2
com.unity.ml-agents/Tests/Editor/Actuators/ActuatorManagerTests.cs


var actuator2 = new TestActuator(ActionSpec.MakeContinuous(3), "actuator2");
manager.Add(actuator1);
manager.Add(actuator2);
manager.ReadyActuatorsForExecution(new[] { actuator1, actuator2 }, 3, 10, 4);
manager.ReadyActuatorsForExecution(new[] { actuator1, actuator2 }, 3, 10, 4);
}
[Test]

4
com.unity.ml-agents/Tests/Editor/Actuators/TestActuator.cs


public TestActuator(ActionSpec actuatorSpace, string name)
{
ActionSpec = actuatorSpace;
TotalNumberOfActions = actuatorSpace.NumContinuousActions +
actuatorSpace.NumDiscreteActions;
Name = name;
}

}
}
public int TotalNumberOfActions { get; }
public ActionSpec ActionSpec { get; }
public string Name { get; }

3
com.unity.ml-agents/Tests/Editor/BehaviorParameterTests.cs


var gameObj = new GameObject();
var bp = gameObj.AddComponent<BehaviorParameters>();
bp.BehaviorType = BehaviorType.InferenceOnly;
var actionSpec = new ActionSpec();
bp.GeneratePolicy(DummyHeuristic);
bp.GeneratePolicy(actionSpec, DummyHeuristic);
});
}
}

18
com.unity.ml-agents/Tests/Editor/Communicator/GrpcExtensionsTests.cs


using UnityEngine;
using Unity.MLAgents.Policies;
using Unity.MLAgents.Demonstrations;
using Unity.MLAgents.Sensors;
using Unity.MLAgents.Actuators;
namespace Unity.MLAgents.Tests
{

// Should be able to convert a default instance to proto.
var brain = new BrainParameters();
brain.ToProto("foo", false);
}
[Test]
public void TestDefaultActionSpecToProto()
{
// Should be able to convert a default instance to proto.
var actionSpec = new ActionSpec();
actionSpec.ToBrainParametersProto("foo", false);
// Continuous
actionSpec = ActionSpec.MakeContinuous(3);
actionSpec.ToBrainParametersProto("foo", false);
// Discrete
actionSpec = ActionSpec.MakeDiscrete(1, 2, 3);
actionSpec.ToBrainParametersProto("foo", false);
}
[Test]

5
com.unity.ml-agents/Tests/Editor/EditModeTestInternalBrainTensorApplier.cs


using System.Collections.Generic;
using NUnit.Framework;
using Unity.Barracuda;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Inference;
using Unity.MLAgents.Policies;

[Test]
public void Construction()
{
var bp = new BrainParameters();
var actionSpec = new ActionSpec();
var tensorGenerator = new TensorApplier(bp, 0, alloc, mem);
var tensorGenerator = new TensorApplier(actionSpec, 0, alloc, mem);
Assert.IsNotNull(tensorGenerator);
alloc.Dispose();
}

31
com.unity.ml-agents/Tests/Editor/ModelRunnerTest.cs


using UnityEngine;
using UnityEditor;
using Unity.Barracuda;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Inference;
using Unity.MLAgents.Sensors;
using Unity.MLAgents.Policies;

Test3DSensorComponent sensor_21_20_3;
Test3DSensorComponent sensor_20_22_3;
BrainParameters GetContinuous2vis8vec2actionBrainParameters()
ActionSpec GetContinuous2vis8vec2actionActionSpec()
var validBrainParameters = new BrainParameters();
validBrainParameters.VectorObservationSize = 8;
validBrainParameters.VectorActionSize = new[] { 2 };
validBrainParameters.NumStackedVectorObservations = 1;
validBrainParameters.VectorActionSpaceType = SpaceType.Continuous;
return validBrainParameters;
return ActionSpec.MakeContinuous(2);
BrainParameters GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters()
ActionSpec GetDiscrete1vis0vec_2_3action_recurrModelActionSpec()
var validBrainParameters = new BrainParameters();
validBrainParameters.VectorObservationSize = 0;
validBrainParameters.VectorActionSize = new[] { 2, 3 };
validBrainParameters.NumStackedVectorObservations = 1;
validBrainParameters.VectorActionSpaceType = SpaceType.Discrete;
return validBrainParameters;
return ActionSpec.MakeDiscrete(2, 3);
}
[SetUp]

[Test]
public void TestCreation()
{
var modelRunner = new ModelRunner(continuous2vis8vec2actionModel, GetContinuous2vis8vec2actionBrainParameters());
var modelRunner = new ModelRunner(continuous2vis8vec2actionModel, GetContinuous2vis8vec2actionActionSpec());
modelRunner = new ModelRunner(discrete1vis0vec_2_3action_recurrModel, GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters());
modelRunner = new ModelRunner(discrete1vis0vec_2_3action_recurrModel, GetDiscrete1vis0vec_2_3action_recurrModelActionSpec());
modelRunner.Dispose();
}

var modelRunner = new ModelRunner(continuous2vis8vec2actionModel, GetContinuous2vis8vec2actionBrainParameters(), InferenceDevice.CPU);
var modelRunner = new ModelRunner(continuous2vis8vec2actionModel, GetContinuous2vis8vec2actionActionSpec(), InferenceDevice.CPU);
Assert.True(modelRunner.HasModel(continuous2vis8vec2actionModel, InferenceDevice.CPU));
Assert.False(modelRunner.HasModel(continuous2vis8vec2actionModel, InferenceDevice.GPU));
Assert.False(modelRunner.HasModel(discrete1vis0vec_2_3action_recurrModel, InferenceDevice.CPU));

[Test]
public void TestRunModel()
{
var brainParameters = GetDiscrete1vis0vec_2_3action_recurrModelBrainParameters();
var modelRunner = new ModelRunner(discrete1vis0vec_2_3action_recurrModel, brainParameters);
var actionSpec = GetDiscrete1vis0vec_2_3action_recurrModelActionSpec();
var modelRunner = new ModelRunner(discrete1vis0vec_2_3action_recurrModel, actionSpec);
var info1 = new AgentInfo();
info1.episodeId = 1;
modelRunner.PutObservations(info1, new[] { sensor_21_20_3.CreateSensor() }.ToList());

Assert.IsNotNull(modelRunner.GetAction(1));
Assert.IsNotNull(modelRunner.GetAction(2));
Assert.IsNull(modelRunner.GetAction(3));
Assert.AreEqual(brainParameters.VectorActionSize.Count(), modelRunner.GetAction(1).Count());
Assert.AreEqual(actionSpec.NumDiscreteActions, modelRunner.GetAction(1).Count());
modelRunner.Dispose();
}
}

33
com.unity.ml-agents/Tests/Editor/ParameterLoaderTest.cs


using UnityEngine;
using UnityEditor;
using Unity.Barracuda;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Inference;
using Unity.MLAgents.Sensors;
using Unity.MLAgents.Policies;

var model = ModelLoader.Load(continuous2vis8vec2actionModel);
var validBrainParameters = GetContinuous2vis8vec2actionBrainParameters();
var errors = BarracudaModelParamLoader.CheckModel(model, validBrainParameters, new SensorComponent[] { sensor_21_20_3, sensor_20_22_3 });
var errors = BarracudaModelParamLoader.CheckModel(
model, validBrainParameters,