using System.Collections.Generic; using UnityEngine; using Barracuda; using UnityEngine.Profiling; namespace MLAgents.InferenceBrain { public class ModelRunner : IBatchedDecisionMaker { private ITensorAllocator m_TensorAllocator; private TensorGenerator m_TensorGenerator; private TensorApplier m_TensorApplier; private NNModel m_Model; private InferenceDevice m_InferenceDevice; private IWorker m_Engine; private bool m_Verbose = false; private string[] m_OutputNames; private IReadOnlyList m_InferenceInputs; private IReadOnlyList m_InferenceOutputs; /// /// Initializes the Brain with the Model that it will use when selecting actions for /// the agents /// /// The Barracuda model to load /// The parameters of the Brain used to generate the /// placeholder tensors /// Inference execution device. CPU is the fastest /// option for most of ML Agents models. /// The seed that will be used to initialize the RandomNormal /// and Multinomial objects used when running inference. /// Throws an error when the model is null /// public ModelRunner( NNModel model, BrainParameters brainParameters, InferenceDevice inferenceDevice = InferenceDevice.CPU, int seed = 0) { Model barracudaModel; m_Model = model; m_InferenceDevice = inferenceDevice; m_TensorAllocator = new TensorCachingAllocator(); if (model != null) { #if BARRACUDA_VERBOSE m_Verbose = true; #endif D.logEnabled = m_Verbose; barracudaModel = ModelLoader.Load(model.Value); var executionDevice = inferenceDevice == InferenceDevice.GPU ? BarracudaWorkerFactory.Type.ComputePrecompiled : BarracudaWorkerFactory.Type.CSharp; m_Engine = BarracudaWorkerFactory.CreateWorker(executionDevice, barracudaModel, m_Verbose); } else { barracudaModel = null; m_Engine = null; } m_InferenceInputs = BarracudaModelParamLoader.GetInputTensors(barracudaModel); m_OutputNames = BarracudaModelParamLoader.GetOutputNames(barracudaModel); m_TensorGenerator = new TensorGenerator(brainParameters, seed, m_TensorAllocator, barracudaModel); m_TensorApplier = new TensorApplier(brainParameters, seed, m_TensorAllocator, barracudaModel); } private static Dictionary PrepareBarracudaInputs(IEnumerable infInputs) { var inputs = new Dictionary(); foreach (var inp in infInputs) { inputs[inp.name] = inp.data; } return inputs; } public void Dispose() { if (m_Engine != null) m_Engine.Dispose(); m_TensorAllocator?.Reset(false); } private List FetchBarracudaOutputs(string[] names) { var outputs = new List(); foreach (var n in names) { var output = m_Engine.Peek(n); outputs.Add(TensorUtils.TensorProxyFromBarracuda(output, n)); } return outputs; } public void PutObservations(string key, ICollection agents) { var currentBatchSize = agents.Count; if (currentBatchSize == 0) { return; } Profiler.BeginSample("LearningBrain.DecideAction"); if (m_Engine == null) { Debug.LogError($"No model was present for the Brain {m_Model.name}."); return; } Profiler.BeginSample($"MLAgents.{m_Model.name}.GenerateTensors"); // Prepare the input tensors to be feed into the engine m_TensorGenerator.GenerateTensors(m_InferenceInputs, currentBatchSize, agents); Profiler.EndSample(); Profiler.BeginSample($"MLAgents.{m_Model.name}.PrepareBarracudaInputs"); var inputs = PrepareBarracudaInputs(m_InferenceInputs); Profiler.EndSample(); // Execute the Model Profiler.BeginSample($"MLAgents.{m_Model.name}.ExecuteGraph"); m_Engine.Execute(inputs); Profiler.EndSample(); Profiler.BeginSample($"MLAgents.{m_Model.name}.FetchBarracudaOutputs"); m_InferenceOutputs = FetchBarracudaOutputs(m_OutputNames); Profiler.EndSample(); Profiler.BeginSample($"MLAgents.{m_Model.name}.ApplyTensors"); // Update the outputs m_TensorApplier.ApplyTensors(m_InferenceOutputs, agents); Profiler.EndSample(); Profiler.EndSample(); } public bool HasModel(NNModel other, InferenceDevice otherInferenceDevice) { return m_Model == other && m_InferenceDevice == otherInferenceDevice; } } }