浏览代码

Moving the tensor applier arround

/ai-hw-2021/tensor-applier
vincentpierre 4 年前
当前提交
a9ca4a7d
共有 5 个文件被更改,包括 153 次插入121 次删除
  1. 93
      com.unity.ml-agents/Runtime/Inference/ApplierImpl.cs
  2. 18
      com.unity.ml-agents/Runtime/Inference/BarracudaModelExtensions.cs
  3. 22
      com.unity.ml-agents/Runtime/Policies/TrainingModelRunner.cs
  4. 130
      com.unity.ml-agents/Runtime/Inference/TrainingForwardTensorApplier.cs
  5. 11
      com.unity.ml-agents/Runtime/Inference/TrainingForwardTensorApplier.cs.meta

93
com.unity.ml-agents/Runtime/Inference/ApplierImpl.cs


}
}
internal class MaxActionOutputApplier : TensorApplier.IApplier
{
readonly ActionSpec m_ActionSpec;
public MaxActionOutputApplier(ActionSpec actionSpec, int seed, ITensorAllocator allocator)
{
m_ActionSpec = actionSpec;
}
public void Apply(TensorProxy tensorProxy, IList<int> actionIds, Dictionary<int, ActionBuffers> lastActions)
{
var agentIndex = 0;
var actionSpaceSize = tensorProxy.shape[tensorProxy.shape.Length - 1];
for (var i = 0; i < actionIds.Count; i++)
{
var agentId = actionIds[i];
if (lastActions.ContainsKey(agentId))
{
var actionBuffer = lastActions[agentId];
if (actionBuffer.IsEmpty())
{
actionBuffer = new ActionBuffers(m_ActionSpec);
lastActions[agentId] = actionBuffer;
}
var discreteBuffer = actionBuffer.DiscreteActions;
var maxIndex = 0;
var maxValue = 0;
for (var j = 0; j < actionSpaceSize; j++)
{
var value = (int)tensorProxy.data[agentIndex, j];
if (value > maxValue)
{
maxIndex = j;
}
}
var actionSize = discreteBuffer.Length;
discreteBuffer[0] = maxIndex;
}
agentIndex++;
}
}
}
internal class ContinuousFromDiscreteOutputApplier : TensorApplier.IApplier
{
readonly ActionSpec m_ActionSpec;
int m_NumDiscretization;
public ContinuousFromDiscreteOutputApplier(ActionSpec actionSpec, int seed, ITensorAllocator allocator, int numDiscretization)
{
m_ActionSpec = actionSpec;
m_NumDiscretization = numDiscretization;
}
public void Apply(TensorProxy tensorProxy, IList<int> actionIds, Dictionary<int, ActionBuffers> lastActions)
{
var agentIndex = 0;
var actionSpaceSize = tensorProxy.shape[tensorProxy.shape.Length - 1];
for (var i = 0; i < actionIds.Count; i++)
{
var agentId = actionIds[i];
if (lastActions.ContainsKey(agentId))
{
var actionBuffer = lastActions[agentId];
if (actionBuffer.IsEmpty())
{
actionBuffer = new ActionBuffers(m_ActionSpec);
lastActions[agentId] = actionBuffer;
}
var continuousBuffer = actionBuffer.ContinuousActions;
var maxIndex = 0;
var maxValue = 0;
for (var j = 0; j < actionSpaceSize; j++)
{
var value = (int)tensorProxy.data[agentIndex, j];
if (value > maxValue)
{
maxIndex = j;
}
}
continuousBuffer[0] = ((maxIndex/m_NumDiscretization)/(m_NumDiscretization-1)/2)-1;
continuousBuffer[1] = ((maxIndex%m_NumDiscretization)/(m_NumDiscretization-1)/2)-1;
}
agentIndex++;
}
}
}
/// <summary>
/// The Applier for the Discrete Action output tensor. Uses multinomial to sample discrete
/// actions from the logits contained in the tensor.

18
com.unity.ml-agents/Runtime/Inference/BarracudaModelExtensions.cs


return names.ToArray();
}
public static string[] GetTrainingOutputNames(this Model model)
{
var names = new List<string>();
if (model == null)
{
return names.ToArray();
}
names.Add(TensorNames.TrainingStateOut);
names.Add(TensorNames.OuputLoss);
names.Add(TensorNames.TrainingOutput);
names.Sort();
return names.ToArray();
}
/// <summary>
/// Check if the model has continuous action outputs.
/// </summary>

22
com.unity.ml-agents/Runtime/Policies/TrainingModelRunner.cs


ITensorAllocator m_TensorAllocator;
TensorGenerator m_TensorGenerator;
TrainingTensorGenerator m_TrainingTensorGenerator;
TensorApplier m_TensorApplier;
TrainingForwardTensorApplier m_TensorApplier;
string[] m_TrainingOutputNames;
IReadOnlyList<TensorProxy> m_TrainingInputs;
List<TensorProxy> m_TrainingOutputs;
Dictionary<string, Tensor> m_InputsByName;

m_TrainingInputs = barracudaModel.GetTrainingInputTensors();
m_OutputNames = barracudaModel.GetOutputNames();
m_TrainingOutputNames = barracudaModel.GetTrainingOutputNames();
InitializeTrainingState(barracudaModel);
m_TensorApplier = new TensorApplier(
actionSpec, seed, m_TensorAllocator, m_Memories, barracudaModel);
m_TensorApplier = new TrainingForwardTensorApplier(
actionSpec, seed, m_TensorAllocator, barracudaModel);
void InitializeTrainingState()
void InitializeTrainingState(Model barracudaModel)
// TODO: initialize m_TrainingState
m_TrainingState = new TensorProxy
{
data = barracudaModel.GetTensorByName(TensorNames.InitialTrainingState)
};
}
void PrepareBarracudaInputs(IReadOnlyList<TensorProxy> infInputs)

// Execute the Model
m_Engine.Execute(m_InputsByName);
FetchBarracudaOutputs(m_TrainingOutputNames);
// m_TensorApplier.UpdateModel(m_TrainingOutputs, m_OrderedAgentsRequestingDecisions, m_LastActionsReceived);
FetchBarracudaOutputs(new string[] { TensorNames.TrainingStateOut });
m_TrainingState = m_TrainingOutputs[0];
}
public ActionBuffers GetAction(int agentId)

130
com.unity.ml-agents/Runtime/Inference/TrainingForwardTensorApplier.cs


using System.Collections.Generic;
using Unity.Barracuda;
using Unity.MLAgents.Actuators;
using System.Linq;
using Unity.MLAgents.Inference.Utils;
using UnityEngine;
namespace Unity.MLAgents.Inference
{
/// <summary>
/// Mapping between the output tensor names and the method that will use the
/// output tensors and the Agents present in the batch to update their action, memories and
/// value estimates.
/// A TensorApplier implements a Dictionary of strings (node names) to an Action.
/// This action takes as input the tensor and the Dictionary of Agent to AgentInfo for
/// the current batch.
/// </summary>
internal class TrainingForwardTensorApplier
{
readonly Dictionary<string, TensorApplier.IApplier> m_Dict = new Dictionary<string, TensorApplier.IApplier>();
/// <summary>
/// Returns a new TensorAppliers object.
/// </summary>
/// <param name="actionSpec"> Description of the actions for the Agent.</param>
/// <param name="seed"> The seed the Appliers will be initialized with.</param>
/// <param name="allocator"> Tensor allocator</param>
/// <param name="memories">Dictionary of AgentInfo.id to memory used to pass to the inference model.</param>
/// <param name="barracudaModel"></param>
public TrainingForwardTensorApplier(
ActionSpec actionSpec,
int seed,
ITensorAllocator allocator,
object barracudaModel = null)
{
// If model is null, no inference to run and exception is thrown before reaching here.
if (barracudaModel == null)
{
return;
}
if (actionSpec.NumContinuousActions > 0)
{
throw new System.Exception("Cannot do continuous actions");
}
if (actionSpec.NumDiscreteActions != 1)
{
throw new System.Exception("Cannot do multi discrete actions, only single discrete");
}
var model = (Model)barracudaModel;
m_Dict[TensorNames.TrainingOutput] = new MaxActionOutputApplier(actionSpec, seed, allocator);
}
/// <summary>
/// Updates the state of the agents based on the data present in the tensor.
/// </summary>
/// <param name="tensors"> Enumerable of tensors containing the data.</param>
/// <param name="actionIds"> List of Agents Ids that will be updated using the tensor's data</param>
/// <param name="lastActions"> Dictionary of AgentId to Actions to be updated</param>
/// <exception cref="UnityAgentsException"> One of the tensor does not have an
/// associated applier.</exception>
public void ApplyTensors(
IReadOnlyList<TensorProxy> tensors, IList<int> actionIds, Dictionary<int, ActionBuffers> lastActions)
{
for (var tensorIndex = 0; tensorIndex < tensors.Count; tensorIndex++)
{
var tensor = tensors[tensorIndex];
if (!m_Dict.ContainsKey(tensor.name))
{
throw new UnityAgentsException(
$"Unknown tensorProxy expected as output : {tensor.name}");
}
m_Dict[tensor.name].Apply(tensor, actionIds, lastActions);
}
}
}
internal class MaxActionOutputApplier : TensorApplier.IApplier
{
readonly ActionSpec m_ActionSpec;
public MaxActionOutputApplier(ActionSpec actionSpec, int seed, ITensorAllocator allocator)
{
m_ActionSpec = actionSpec;
}
public void Apply(TensorProxy tensorProxy, IList<int> actionIds, Dictionary<int, ActionBuffers> lastActions)
{
var agentIndex = 0;
var actionSpaceSize = tensorProxy.shape[tensorProxy.shape.Length - 1];
for (var i = 0; i < actionIds.Count; i++)
{
var agentId = actionIds[i];
if (lastActions.ContainsKey(agentId))
{
var actionBuffer = lastActions[agentId];
if (actionBuffer.IsEmpty())
{
actionBuffer = new ActionBuffers(m_ActionSpec);
lastActions[agentId] = actionBuffer;
}
var discreteBuffer = actionBuffer.DiscreteActions;
var maxIndex = 0;
var maxValue = 0;
for (var j = 0; j < actionSpaceSize; j++)
{
var value = (int)tensorProxy.data[agentIndex, j];
if (value > maxValue)
{
maxIndex = j;
}
}
var actionSize = discreteBuffer.Length;
discreteBuffer[0] = maxIndex;
}
agentIndex++;
}
}
}
}

11
com.unity.ml-agents/Runtime/Inference/TrainingForwardTensorApplier.cs.meta


fileFormatVersion: 2
guid: eaafcce9c7c794667bc726e40e420824
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:
正在加载...
取消
保存