浏览代码

Making some fields and properties internal (#3342)

* Making some fields and properties internal

* Fixing the formating

* Making more things internal

* Adressing the comments

* reverting the changes made to the recorder

* WriteAdapter public

* Have to make AgentInfo and TensorProxy public because of changes to write adapter and the demorecorder
/asymm-envs
GitHub 5 年前
当前提交
9b72aab2
共有 26 个文件被更改,包括 84 次插入77 次删除
  1. 20
      com.unity.ml-agents/Runtime/Academy.cs
  2. 2
      com.unity.ml-agents/Runtime/ActionMasker.cs
  3. 19
      com.unity.ml-agents/Runtime/Agent.cs
  4. 2
      com.unity.ml-agents/Runtime/Constants.cs
  5. 2
      com.unity.ml-agents/Runtime/EpisodeIdCounter.cs
  6. 24
      com.unity.ml-agents/Runtime/Grpc/GrpcExtensions.cs
  7. 2
      com.unity.ml-agents/Runtime/Grpc/RpcCommunicator.cs
  8. 2
      com.unity.ml-agents/Runtime/ICommunicator.cs
  9. 8
      com.unity.ml-agents/Runtime/InferenceBrain/ApplierImpl.cs
  10. 2
      com.unity.ml-agents/Runtime/InferenceBrain/BarracudaModelParamLoader.cs
  11. 20
      com.unity.ml-agents/Runtime/InferenceBrain/GeneratorImpl.cs
  12. 4
      com.unity.ml-agents/Runtime/InferenceBrain/ModelRunner.cs
  13. 2
      com.unity.ml-agents/Runtime/InferenceBrain/TensorApplier.cs
  14. 2
      com.unity.ml-agents/Runtime/InferenceBrain/TensorGenerator.cs
  15. 2
      com.unity.ml-agents/Runtime/InferenceBrain/TensorNames.cs
  16. 6
      com.unity.ml-agents/Runtime/InferenceBrain/TensorProxy.cs
  17. 2
      com.unity.ml-agents/Runtime/Policy/BarracudaPolicy.cs
  18. 9
      com.unity.ml-agents/Runtime/Policy/BehaviorParameters.cs
  19. 2
      com.unity.ml-agents/Runtime/Policy/HeuristicPolicy.cs
  20. 2
      com.unity.ml-agents/Runtime/Policy/IPolicy.cs
  21. 2
      com.unity.ml-agents/Runtime/Policy/RemotePolicy.cs
  22. 2
      com.unity.ml-agents/Runtime/Startup.cs
  23. 2
      com.unity.ml-agents/Runtime/Utilities.cs
  24. 6
      com.unity.ml-agents/Tests/Editor/MLAgentsEditModeTest.cs
  25. 4
      com.unity.ml-agents/Runtime/AssemblyInfo.cs
  26. 11
      com.unity.ml-agents/Runtime/AssemblyInfo.cs.meta

20
com.unity.ml-agents/Runtime/Academy.cs


/// <summary>
/// Helper class to step the Academy during FixedUpdate phase.
/// </summary>
public class AcademyFixedUpdateStepper : MonoBehaviour
internal class AcademyFixedUpdateStepper : MonoBehaviour
{
void FixedUpdate()
{

int m_TotalStepCount;
/// Pointer to the communicator currently in use by the Academy.
public ICommunicator Communicator;
internal ICommunicator Communicator;
bool m_Initialized;
List<ModelRunner> m_ModelRunners = new List<ModelRunner>();

// Signals to all the Agents at each environment step so they can use
// their Policy to decide on their next action.
public event System.Action DecideAction;
internal event System.Action DecideAction;
public event System.Action DestroyAction;
internal event System.Action DestroyAction;
public event System.Action<int> AgentSetStatus;
internal event System.Action<int> AgentSetStatus;
public event System.Action AgentSendState;
internal event System.Action AgentSendState;
public event System.Action AgentAct;
internal event System.Action AgentAct;
public event System.Action AgentForceReset;
internal event System.Action AgentForceReset;
// Signals that the Academy has been reset by the training process
public event System.Action OnEnvironmentReset;

/// Initialize the Academy if it hasn't already been initialized.
/// This method is always safe to call; it will have no effect if the Academy is already initialized.
/// </summary>
public void LazyInitialization()
internal void LazyInitialization()
{
if (!m_Initialized)
{

/// <param name="inferenceDevice"> The inference device (CPU or GPU)
/// the ModelRunner will use </param>
/// <returns> The ModelRunner compatible with the input settings</returns>
public ModelRunner GetOrCreateModelRunner(
internal ModelRunner GetOrCreateModelRunner(
NNModel model, BrainParameters brainParameters, InferenceDevice inferenceDevice)
{
var modelRunner = m_ModelRunners.Find(x => x.HasModel(model, inferenceDevice));

2
com.unity.ml-agents/Runtime/ActionMasker.cs


namespace MLAgents
{
public class ActionMasker
internal class ActionMasker
{
/// When using discrete control, is the starting indices of the actions
/// when all the branches are concatenated with each other.

19
com.unity.ml-agents/Runtime/Agent.cs


/// Struct that contains the action information sent from the Brain to the
/// Agent.
/// </summary>
public struct AgentAction
internal struct AgentAction
{
public float[] vectorActions;
}

/// Currently generated from attached SensorComponents, and a legacy VectorSensor
/// </summary>
[FormerlySerializedAs("m_Sensors")]
public List<ISensor> sensors;
internal List<ISensor> sensors;
public VectorSensor collectObservationsSensor;
internal VectorSensor collectObservationsSensor;
/// MonoBehaviour function that is called when the attached GameObject
/// becomes enabled or active.

/// Set up the list of ISensors on the Agent. By default, this will select any
/// SensorBase's attached to the Agent.
/// </summary>
public void InitializeSensors()
internal void InitializeSensors()
{
// Get all attached sensor components
SensorComponent[] attachedSensorComponents;

AgentReset();
}
public void UpdateAgentAction(AgentAction action)
internal void UpdateAgentAction(AgentAction action)
}
/// <summary>
/// Updates the vector action.
/// </summary>
/// <param name="vectorActions">Vector actions.</param>
public void UpdateVectorAction(float[] vectorActions)
{
m_Action.vectorActions = vectorActions;
}
/// <summary>

2
com.unity.ml-agents/Runtime/Constants.cs


/// <summary>
/// Grouping for use in AddComponentMenu (instead of nesting the menus).
/// </summary>
public enum MenuGroup
internal enum MenuGroup
{
Default = 0,
Sensors = 50

2
com.unity.ml-agents/Runtime/EpisodeIdCounter.cs


namespace MLAgents
{
public static class EpisodeIdCounter
internal static class EpisodeIdCounter
{
private static int Counter;
public static int GetEpisodeId()

24
com.unity.ml-agents/Runtime/Grpc/GrpcExtensions.cs


namespace MLAgents
{
public static class GrpcExtensions
internal static class GrpcExtensions
internal static AgentInfoActionPairProto ToInfoActionPairProto(this AgentInfo ai)
public static AgentInfoActionPairProto ToInfoActionPairProto(this AgentInfo ai)
{
var agentInfoProto = ai.ToAgentInfoProto();

/// Converts a AgentInfo to a protobuf generated AgentInfoProto
/// </summary>
/// <returns>The protobuf version of the AgentInfo.</returns>
internal static AgentInfoProto ToAgentInfoProto(this AgentInfo ai)
public static AgentInfoProto ToAgentInfoProto(this AgentInfo ai)
{
var agentInfoProto = new AgentInfoProto
{

/// <param name="bp">The instance of BrainParameter to extend.</param>
/// <param name="name">The name of the brain.</param>
/// <param name="isTraining">Whether or not the Brain is training.</param>
internal static BrainParametersProto ToProto(this BrainParameters bp, string name, bool isTraining)
public static BrainParametersProto ToProto(this BrainParameters bp, string name, bool isTraining)
{
var brainParametersProto = new BrainParametersProto
{

/// <summary>
/// Convert metadata object to proto object.
/// </summary>
internal static DemonstrationMetaProto ToProto(this DemonstrationMetaData dm)
public static DemonstrationMetaProto ToProto(this DemonstrationMetaData dm)
{
var demoProto = new DemonstrationMetaProto
{

/// <summary>
/// Initialize metadata values based on proto object.
/// </summary>
internal static DemonstrationMetaData ToDemonstrationMetaData(this DemonstrationMetaProto demoProto)
public static DemonstrationMetaData ToDemonstrationMetaData(this DemonstrationMetaProto demoProto)
{
var dm = new DemonstrationMetaData
{

/// </summary>
/// <param name="bpp">An instance of a brain parameters protobuf object.</param>
/// <returns>A BrainParameters struct.</returns>
internal static BrainParameters ToBrainParameters(this BrainParametersProto bpp)
public static BrainParameters ToBrainParameters(this BrainParametersProto bpp)
{
var bp = new BrainParameters
{

return bp;
}
internal static UnityRLInitParameters ToUnityRLInitParameters(this UnityRLInitializationInputProto inputProto)
public static UnityRLInitParameters ToUnityRLInitParameters(this UnityRLInitializationInputProto inputProto)
{
return new UnityRLInitParameters
{

internal static AgentAction ToAgentAction(this AgentActionProto aap)
public static AgentAction ToAgentAction(this AgentActionProto aap)
{
return new AgentAction
{

internal static List<AgentAction> ToAgentActionList(this UnityRLInputProto.Types.ListAgentActionProto proto)
public static List<AgentAction> ToAgentActionList(this UnityRLInputProto.Types.ListAgentActionProto proto)
{
var agentActions = new List<AgentAction>(proto.Value.Count);
foreach (var ap in proto.Value)

return agentActions;
}
internal static ObservationProto ToProto(this Observation obs)
public static ObservationProto ToProto(this Observation obs)
{
ObservationProto obsProto = null;

/// <param name="sensor"></param>
/// <param name="writeAdapter"></param>
/// <returns></returns>
internal static ObservationProto GetObservationProto(this ISensor sensor, WriteAdapter writeAdapter)
public static ObservationProto GetObservationProto(this ISensor sensor, WriteAdapter writeAdapter)
{
var shape = sensor.GetObservationShape();
ObservationProto observationProto = null;

2
com.unity.ml-agents/Runtime/Grpc/RpcCommunicator.cs


namespace MLAgents
{
/// Responsible for communication with External using gRPC.
public class RpcCommunicator : ICommunicator
internal class RpcCommunicator : ICommunicator
{
public event QuitCommandHandler QuitCommandReceived;
public event ResetCommandHandler ResetCommandReceived;

2
com.unity.ml-agents/Runtime/ICommunicator.cs


UnityOutput and UnityInput can be extended to provide functionalities beyond RL
UnityRLOutput and UnityRLInput can be extended to provide new RL functionalities
*/
public interface ICommunicator : IDisposable
internal interface ICommunicator : IDisposable
{
/// <summary>
/// Quit was received by the communicator.

8
com.unity.ml-agents/Runtime/InferenceBrain/ApplierImpl.cs


/// The Applier for the Continuous Action output tensor. Tensor is assumed to contain the
/// continuous action data of the agents in the batch.
/// </summary>
public class ContinuousActionOutputApplier : TensorApplier.IApplier
internal class ContinuousActionOutputApplier : TensorApplier.IApplier
{
public void Apply(TensorProxy tensorProxy, IEnumerable<int> actionIds, Dictionary<int, float[]> lastActions)
{

/// The Applier for the Discrete Action output tensor. Uses multinomial to sample discrete
/// actions from the logits contained in the tensor.
/// </summary>
public class DiscreteActionOutputApplier : TensorApplier.IApplier
internal class DiscreteActionOutputApplier : TensorApplier.IApplier
{
readonly int[] m_ActionSize;
readonly Multinomial m_Multinomial;

/// The Applier for the Memory output tensor. Tensor is assumed to contain the new
/// memory data of the agents in the batch.
/// </summary>
public class MemoryOutputApplier : TensorApplier.IApplier
internal class MemoryOutputApplier : TensorApplier.IApplier
{
Dictionary<int, List<float>> m_Memories;

}
}
public class BarracudaMemoryOutputApplier : TensorApplier.IApplier
internal class BarracudaMemoryOutputApplier : TensorApplier.IApplier
{
readonly int m_MemoriesCount;
readonly int m_MemoryIndex;

2
com.unity.ml-agents/Runtime/InferenceBrain/BarracudaModelParamLoader.cs


/// Prepares the Tensors for the Learning Brain and exposes a list of failed checks if Model
/// and BrainParameters are incompatible.
/// </summary>
public class BarracudaModelParamLoader
internal class BarracudaModelParamLoader
{
enum ModelActionType
{

20
com.unity.ml-agents/Runtime/InferenceBrain/GeneratorImpl.cs


/// and initializes its content to be zeros. Will only work on 2-dimensional tensors.
/// The second dimension of the Tensor will not be modified.
/// </summary>
public class BiDimensionalOutputGenerator : TensorGenerator.IGenerator
internal class BiDimensionalOutputGenerator : TensorGenerator.IGenerator
{
readonly ITensorAllocator m_Allocator;

/// Generates the Tensor corresponding to the BatchSize input : Will be a one dimensional
/// integer array of size 1 containing the batch size.
/// </summary>
public class BatchSizeGenerator : TensorGenerator.IGenerator
internal class BatchSizeGenerator : TensorGenerator.IGenerator
{
readonly ITensorAllocator m_Allocator;

/// Note : the sequence length is always one since recurrent networks only predict for
/// one step at the time.
/// </summary>
public class SequenceLengthGenerator : TensorGenerator.IGenerator
internal class SequenceLengthGenerator : TensorGenerator.IGenerator
{
readonly ITensorAllocator m_Allocator;

/// It will use the Vector Observation data contained in the agentInfo to fill the data
/// of the tensor.
/// </summary>
public class VectorObservationGenerator : TensorGenerator.IGenerator
internal class VectorObservationGenerator : TensorGenerator.IGenerator
{
readonly ITensorAllocator m_Allocator;
List<int> m_SensorIndices = new List<int>();

/// It will use the Memory data contained in the agentInfo to fill the data
/// of the tensor.
/// </summary>
public class RecurrentInputGenerator : TensorGenerator.IGenerator
internal class RecurrentInputGenerator : TensorGenerator.IGenerator
{
private readonly ITensorAllocator m_Allocator;
Dictionary<int, List<float>> m_Memories;

}
}
public class BarracudaRecurrentInputGenerator : TensorGenerator.IGenerator
internal class BarracudaRecurrentInputGenerator : TensorGenerator.IGenerator
{
int m_MemoriesCount;
readonly int m_MemoryIndex;

/// It will use the previous action data contained in the agentInfo to fill the data
/// of the tensor.
/// </summary>
public class PreviousActionInputGenerator : TensorGenerator.IGenerator
internal class PreviousActionInputGenerator : TensorGenerator.IGenerator
{
readonly ITensorAllocator m_Allocator;

/// It will use the Action Mask data contained in the agentInfo to fill the data
/// of the tensor.
/// </summary>
public class ActionMaskInputGenerator : TensorGenerator.IGenerator
internal class ActionMaskInputGenerator : TensorGenerator.IGenerator
{
readonly ITensorAllocator m_Allocator;

/// dimensional float array of dimension [batchSize x actionSize].
/// It will use the generate random input data from a normal Distribution.
/// </summary>
public class RandomNormalInputGenerator : TensorGenerator.IGenerator
internal class RandomNormalInputGenerator : TensorGenerator.IGenerator
{
readonly RandomNormal m_RandomNormal;
readonly ITensorAllocator m_Allocator;

/// It will use the Texture input data contained in the agentInfo to fill the data
/// of the tensor.
/// </summary>
public class VisualObservationInputGenerator : TensorGenerator.IGenerator
internal class VisualObservationInputGenerator : TensorGenerator.IGenerator
{
readonly int m_SensorIndex;
readonly ITensorAllocator m_Allocator;

4
com.unity.ml-agents/Runtime/InferenceBrain/ModelRunner.cs


namespace MLAgents.InferenceBrain
{
public struct AgentInfoSensorsPair
internal struct AgentInfoSensorsPair
public class ModelRunner
internal class ModelRunner
{
List<AgentInfoSensorsPair> m_Infos = new List<AgentInfoSensorsPair>();
Dictionary<int, float[]> m_LastActionsReceived = new Dictionary<int, float[]>();

2
com.unity.ml-agents/Runtime/InferenceBrain/TensorApplier.cs


/// This action takes as input the tensor and the Dictionary of Agent to AgentInfo for
/// the current batch.
/// </summary>
public class TensorApplier
internal class TensorApplier
{
/// <summary>
/// A tensor Applier's Execute method takes a tensor and a Dictionary of Agent to AgentInfo.

2
com.unity.ml-agents/Runtime/InferenceBrain/TensorGenerator.cs


/// When the TensorProxy is an Output of the model, only the shape of the Tensor will be
/// modified using the current batch size. The data will be pre-filled with zeros.
/// </summary>
public class TensorGenerator
internal class TensorGenerator
{
public interface IGenerator
{

2
com.unity.ml-agents/Runtime/InferenceBrain/TensorNames.cs


/// <summary>
/// Contains the names of the input and output tensors for the Inference Brain.
/// </summary>
public static class TensorNames
internal static class TensorNames
{
public const string BatchSizePlaceholder = "batch_size";
public const string SequenceLengthPlaceholder = "sequence_length";

6
com.unity.ml-agents/Runtime/InferenceBrain/TensorProxy.cs


public Tensor data;
}
public static class TensorUtils
internal static class TensorUtils
{
public static void ResizeTensor(TensorProxy tensor, int batch, ITensorAllocator allocator)
{

{
if (src.height == 1 && src.width == 1)
{
return new long[] {src.batch, src.channels};
return new long[] { src.batch, src.channels };
return new long[] {src.batch, src.height, src.width, src.channels};
return new long[] { src.batch, src.height, src.width, src.channels };
}
public static TensorProxy TensorProxyFromBarracuda(Tensor src, string nameOverride = null)

2
com.unity.ml-agents/Runtime/Policy/BarracudaPolicy.cs


/// every step. It uses a ModelRunner that is shared accross all
/// Barracuda Policies that use the same model and inference devices.
/// </summary>
public class BarracudaPolicy : IPolicy
internal class BarracudaPolicy : IPolicy
{
protected ModelRunner m_ModelRunner;

9
com.unity.ml-agents/Runtime/Policy/BehaviorParameters.cs


[HideInInspector]
[SerializeField]
string m_BehaviorName = "My Behavior";
[HideInInspector][SerializeField]
[HideInInspector]
[SerializeField]
int m_TeamID = 0;
[HideInInspector]
[SerializeField]

public string behaviorName
{
get { return m_BehaviorName;}
get { return m_BehaviorName; }
}
/// <summary>

{
get { return m_BehaviorName + "?team=" + m_TeamID;}
get { return m_BehaviorName + "?team=" + m_TeamID; }
public IPolicy GeneratePolicy(Func<float[]> heuristic)
internal IPolicy GeneratePolicy(Func<float[]> heuristic)
{
switch (m_BehaviorType)
{

2
com.unity.ml-agents/Runtime/Policy/HeuristicPolicy.cs


/// to take decisions each time the RequestDecision method is
/// called.
/// </summary>
public class HeuristicPolicy : IPolicy
internal class HeuristicPolicy : IPolicy
{
Func<float[]> m_Heuristic;
float[] m_LastDecision;

2
com.unity.ml-agents/Runtime/Policy/IPolicy.cs


/// will not be taken immediately but will be taken before or when
/// DecideAction is called.
/// </summary>
public interface IPolicy : IDisposable
internal interface IPolicy : IDisposable
{
/// <summary>
/// Signals the Brain that the Agent needs a Decision. The Policy

2
com.unity.ml-agents/Runtime/Policy/RemotePolicy.cs


/// The Remote Policy only works when training.
/// When training your Agents, the RemotePolicy will be controlled by Python.
/// </summary>
public class RemotePolicy : IPolicy
internal class RemotePolicy : IPolicy
{
int m_AgentId;

2
com.unity.ml-agents/Runtime/Startup.cs


namespace MLAgents
{
public class Startup : MonoBehaviour
internal class Startup : MonoBehaviour
{
const string k_SceneVariableName = "SCENE_NAME";

2
com.unity.ml-agents/Runtime/Utilities.cs


namespace MLAgents
{
public static class Utilities
internal static class Utilities
{
/// <summary>
/// Puts a Texture2D into a WriteAdapter.

6
com.unity.ml-agents/Tests/Editor/MLAgentsEditModeTest.cs


namespace MLAgents.Tests
{
public class TestPolicy : IPolicy
internal class TestPolicy : IPolicy
{
public void RequestDecision(AgentInfo info, List<ISensor> sensors) { }

public class TestAgent : Agent
{
public AgentInfo _Info
internal AgentInfo _Info
{
get
{

}
}
public void SetPolicy(IPolicy policy)
internal void SetPolicy(IPolicy policy)
{
typeof(Agent).GetField("m_Brain", BindingFlags.Instance | BindingFlags.NonPublic).SetValue(this, policy);
}

4
com.unity.ml-agents/Runtime/AssemblyInfo.cs


using System.Runtime.CompilerServices;
[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor.Tests")]
[assembly: InternalsVisibleTo("Unity.ML-Agents.Editor")]

11
com.unity.ml-agents/Runtime/AssemblyInfo.cs.meta


fileFormatVersion: 2
guid: b433ecadea36c4af9a3dc65e359a3ca0
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
正在加载...
取消
保存