浏览代码

TensorFlowSharp is no more (#2590)

* TensorFlowSharp is no more

* Removed old documents
/develop-gpu-test
GitHub 5 年前
当前提交
82bf38ef
共有 8 个文件被更改,包括 4 次插入839 次删除
  1. 59
      UnitySDK/Assets/ML-Agents/Scripts/LearningBrain.cs
  2. 13
      docs/FAQ.md
  3. 2
      docs/dox-ml-agents.conf
  4. 3
      UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/ModelParamLoader.cs.meta
  5. 533
      UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/ModelParamLoader.cs
  6. 217
      UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/TFSharpInferenceEngine.cs
  7. 12
      UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/TFSharpInferenceEngine.cs.meta
  8. 4
      docs/Using-TensorFlow-Sharp-in-Unity.md

59
UnitySDK/Assets/ML-Agents/Scripts/LearningBrain.cs


private ITensorAllocator m_TensorAllocator;
private TensorGenerator m_TensorGenerator;
private TensorApplier m_TensorApplier;
#if ENABLE_TENSORFLOW
public TextAsset model;
private ModelParamLoader _modelParamLoader;
private TFSharpInferenceEngine _engine;
#else
public NNModel model;
private Model m_BarracudaModel;
private IWorker m_Engine;

private string[] m_OutputNames;
#endif
[Tooltip("Inference execution device. CPU is the fastest option for most of ML Agents models. " +
"(This field is not applicable for training).")]

if (m_TensorAllocator == null)
m_TensorAllocator = new TensorCachingAllocator();
#if ENABLE_TENSORFLOW
_engine = new TFSharpInferenceEngine();
_engine.PrepareModel(model.bytes);
}
else
{
_engine = null;
}
_modelParamLoader = ModelParamLoader.GetLoaderAndCheck(_engine, brainParameters);
_inferenceInputs = _modelParamLoader.GetInputTensors();
_inferenceOutputs = _modelParamLoader.GetOutputTensors();
_tensorGenerator = new TensorGenerator(brainParameters, seed, _tensorAllocator);
_tensorApplier = new TensorApplier(brainParameters, seed, _tensorAllocator);
#else
if (model != null)
{
#if BARRACUDA_VERBOSE
#if BARRACUDA_VERBOSE
#endif
#endif
D.logEnabled = m_Verbose;

m_OutputNames = m_ModelParamLoader.GetOutputNames();
m_TensorGenerator = new TensorGenerator(brainParameters, seed, m_TensorAllocator, m_BarracudaModel);
m_TensorApplier = new TensorApplier(brainParameters, seed, m_TensorAllocator, m_BarracudaModel);
#endif
}
/// <summary>

/// Brain Parameters</returns>
public IEnumerable<string> GetModelFailedChecks()
{
#if ENABLE_TENSORFLOW
return (_modelParamLoader != null) ? _modelParamLoader.GetChecks() : new List<string>();
#else
#endif
}
/// <inheritdoc />

}
Profiler.BeginSample("LearningBrain.DecideAction");
#if ENABLE_TENSORFLOW
if (_engine == null)
{
Debug.LogError($"No model was present for the Brain {name}.");
return;
}
// Prepare the input tensors to be feed into the engine
_tensorGenerator.GenerateTensors(_inferenceInputs, currentBatchSize, agentInfos);
// Prepare the output tensors to be feed into the engine
_tensorGenerator.GenerateTensors(_inferenceOutputs, currentBatchSize, agentInfos);
// Execute the Model
Profiler.BeginSample($"MLAgents.{name}.ExecuteGraph");
_engine.ExecuteGraph(_inferenceInputs, _inferenceOutputs);
Profiler.EndSample();
// Update the outputs
_tensorApplier.ApplyTensors(_inferenceOutputs, agentInfos);
#else
if (m_Engine == null)
{
Debug.LogError($"No model was present for the Brain {name}.");

// Update the outputs
m_TensorApplier.ApplyTensors(m_InferenceOutputs, m_AgentInfos);
Profiler.EndSample();
#endif
#if !ENABLE_TENSORFLOW
protected Dictionary<string, Tensor> PrepareBarracudaInputs(IEnumerable<TensorProxy> infInputs)
{
var inputs = new Dictionary<string, Tensor>();

return outputs;
}
#endif
#if !ENABLE_TENSORFLOW
#endif
m_TensorAllocator?.Reset(false);
}
}

13
docs/FAQ.md


to [Setting Up The ML-Agents Toolkit Within
Unity](Installation.md#setting-up-ml-agent-within-unity) for solution.
## TensorFlowSharp flag not turned on
If you have already imported the TensorFlowSharp plugin, but haven't set
ENABLE_TENSORFLOW flag for your scripting define symbols, you will see the
following error message:
```console
UnityAgentsException: The brain 3DBallLearning was set to inference mode but the TensorFlow library is not present in the Unity project.
```
This error message occurs because the TensorFlowSharp plugin won't be used
without the ENABLE_TENSORFLOW flag, refer to [Setting Up The ML-Agents Toolkit
Within Unity](Installation.md#setting-up-ml-agent-within-unity) for solution.
## Environment Permission Error
If you directly import your Unity environment without building it in the

2
docs/dox-ml-agents.conf


# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED = ENABLE_TENSORFLOW
PREDEFINED =
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The

3
UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/ModelParamLoader.cs.meta


fileFormatVersion: 2
guid: 259e3a0e37204794a885219327bd4c02
timeCreated: 1539197357

533
UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/ModelParamLoader.cs


#if ENABLE_TENSORFLOW
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using Barracuda;
namespace MLAgents.InferenceBrain
{
/// <summary>
/// Prepares the Tensors for the Learning Brain and exposes a list of failed checks if Model
/// and BrainParameters are incompatible.
/// </summary>
public class ModelParamLoader
{
private enum ModelActionType
{
Unknown,
Discrete,
Continuous
}
private const long ApiVersion = 2;
private TFSharpInferenceEngine _engine;
private BrainParameters _brainParameters;
private List<string> _failedModelChecks = new List<string>();
/// <summary>
/// Factory for the ModelParamLoader : Creates a ModelParamLoader and runs the checks
/// on it.
/// </summary>
/// <param name="engine"> The InferenceEngine we get the parameters and the checks from
/// </param>
/// <param name="brainParameters"> The BrainParamters that are used verify the
/// compatibility with the InferenceEngine</param>
/// <returns></returns>
public static ModelParamLoader GetLoaderAndCheck(TFSharpInferenceEngine engine,
BrainParameters brainParameters)
{
ModelParamLoader modelParamLoader = new ModelParamLoader(engine, brainParameters);
modelParamLoader.GenerateChecks();
return modelParamLoader;
}
private ModelParamLoader(TFSharpInferenceEngine engine, BrainParameters brainParameters)
{
_engine = engine;
_brainParameters = brainParameters;
}
/// <summary>
/// Generates the Tensor inputs that are expected to be present in the Model.
/// </summary>
/// <returns>TensorProxy IEnumerable with the expected Tensor inputs</returns>
public IReadOnlyList<TensorProxy> GetInputTensors()
{
return _engine?.InputFeatures();
}
/// <summary>
/// Generates the Tensor outputs that are expected to be present in the Model.
/// </summary>
/// <returns>TensorProxy IEnumerable with the expected Tensor outputs</returns>
public IReadOnlyList<TensorProxy> GetOutputTensors()
{
var tensorList = new List<TensorProxy>();
if (_brainParameters.vectorActionSpaceType == SpaceType.continuous)
{
tensorList.Add(new TensorProxy()
{
Name = TensorNames.ActionOutput,
Shape = new long[]
{
-1, _brainParameters.vectorActionSize[0]
},
ValueType = TensorProxy.TensorType.FloatingPoint,
Data = null
});
}
else
{
tensorList.Add(
new TensorProxy()
{
Name = TensorNames.ActionOutput,
Shape = new long[]
{
-1, _brainParameters.vectorActionSize.Sum()
},
ValueType = TensorProxy.TensorType.FloatingPoint,
Data = null
});
}
var memory = GetIntScalar(TensorNames.MemorySize);
if (memory > 0)
{
tensorList.Add(new TensorProxy()
{
Name = TensorNames.RecurrentOutput,
Shape = new long[2]
{
-1, memory
},
ValueType = TensorProxy.TensorType.FloatingPoint,
Data = null
});
}
return tensorList;
}
/// <summary>
/// Queries the InferenceEngine for the value of a variable in the graph given its name.
/// Only works with int32 Tensors with zero dimensions containing a unique element.
/// If the node was not found or could not be retrieved, the value -1 will be returned.
/// </summary>
/// <param name="name">The name of the Tensor variable</param>
/// <returns>The value of the scalar variable in the model. (-1 if not found)</returns>
private int GetIntScalar(string name)
{
var outputs = new TensorProxy[]
{
new TensorProxy()
{
Name = name,
ValueType = TensorProxy.TensorType.Integer,
Shape = new long[] {},
Data = new Tensor(1, 1)
},
};
try
{
_engine.ExecuteGraph(new TensorProxy[0], outputs);
}
catch (Exception ex)
{
UnityEngine.Debug.LogError($"Failed to execute GetIntScalar()\n{ex}");
return -1;
}
return (int)outputs[0].Data[0];
}
/// <summary>
/// Retrieves an IEnumerable of string corresponding to the failed compatibility checks
/// between the InferenceEngine and the BrainParameters.
/// </summary>
public IEnumerable<string> GetChecks()
{
return _failedModelChecks;
}
/// <summary>
/// Generates the list of failed checks that failed when comparing the data from the Model
/// and from the BrainParameters
/// </summary>
private void GenerateChecks()
{
_failedModelChecks.Clear();
if (_engine == null)
{
_failedModelChecks.Add(
"There is no model for this Brain, cannot run inference. " +
"(But can still train)");
return;
}
var modelApiVersion = GetIntScalar(TensorNames.VersionNumber);
var memorySize = GetIntScalar(TensorNames.MemorySize);
var isContinuousInt = GetIntScalar(TensorNames.IsContinuousControl);
var isContinuous = GetActionType(isContinuousInt);
var actionSize = GetIntScalar(TensorNames.ActionOutputShape);
if (modelApiVersion == -1)
{
_failedModelChecks.Add(
"Model was not trained using the right version of ML-Agents. Cannot use this " +
"model.");
return;
}
if (modelApiVersion != ApiVersion)
{
_failedModelChecks.Add(
$"Version of the trainer the model was trained with ({modelApiVersion}) " +
$"is not compatible with the Brain's version ({ApiVersion}).");
return;
}
CheckIntScalarPresenceHelper(new Dictionary<string, int>()
{
{TensorNames.MemorySize, memorySize},
{TensorNames.IsContinuousControl, isContinuousInt},
{TensorNames.ActionOutputShape, actionSize}
});
CheckInputTensorPresence(memorySize, isContinuous);
CheckOutputTensorPresence(memorySize);
CheckInputTensorShape();
CheckOutputTensorShape(isContinuous, actionSize);
}
/// <summary>
/// Converts the integer value in the model corresponding to the type of control to a
/// ModelActionType.
/// </summary>
/// <param name="isContinuousInt"> The integer value in the model indicating the
/// type of control</param>
/// <returns>The equivalent ModelActionType</returns>
private static ModelActionType GetActionType(int isContinuousInt)
{
ModelActionType isContinuous;
switch (isContinuousInt)
{
case 0:
isContinuous = ModelActionType.Discrete;
break;
case 1:
isContinuous = ModelActionType.Continuous;
break;
default:
isContinuous = ModelActionType.Unknown;
break;
}
return isContinuous;
}
/// <summary>
/// Given a Dictionary of node names to int values, create checks if the values have the
/// invalid value of -1.
/// </summary>
/// <param name="requiredScalarFields"> Mapping from node names to int values</param>
private void CheckIntScalarPresenceHelper(Dictionary<string, int> requiredScalarFields)
{
foreach (var field in requiredScalarFields)
if (field.Value == -1)
{
_failedModelChecks.Add(
$"Missing node in the model provided : {field.Key}");
}
}
/// <summary>
/// Generates failed checks that correspond to inputs expected by the model that are not
/// present in the BrainParameters.
/// </summary>
/// <param name="memory"> The memory size that the model is expecting/</param>
/// <param name="isContinuous"> Whether the model is expecting continuous or
/// discrete control.</param>
/// <returns>A IEnumerable of string corresponding to the failed input presence
/// checks.</returns>
private void CheckInputTensorPresence(int memory, ModelActionType isContinuous)
{
var tensorsNames = GetInputTensors().Select(x => x.Name).ToList();
// If there is no Vector Observation Input but the Brain Parameters expect one.
if ((_brainParameters.vectorObservationSize != 0) &&
(!tensorsNames.Contains(TensorNames.VectorObservationPlacholder)))
{
_failedModelChecks.Add(
"The model does not contain a Vector Observation Placeholder Input. " +
"You must set the Vector Observation Space Size to 0.");
}
// If there are not enough Visual Observation Input compared to what the
// Brain Parameters expect.
for (var visObsIndex = 0;
visObsIndex < _brainParameters.cameraResolutions.Length;
visObsIndex++)
{
if (!tensorsNames.Contains(
TensorNames.VisualObservationPlaceholderPrefix + visObsIndex))
{
_failedModelChecks.Add(
"The model does not contain a Visual Observation Placeholder Input " +
"for visual observation " + visObsIndex + ".");
}
}
// If the model has a non-negative memory size but requires a recurrent input
if (memory > 0)
{
if (!tensorsNames.Contains(TensorNames.RecurrentInPlaceholder))
{
_failedModelChecks.Add(
"The model does not contain a Recurrent Input Node but has memory_size.");
}
}
// If the model uses discrete control but does not have an input for action masks
if (isContinuous == ModelActionType.Discrete)
{
if (!tensorsNames.Contains(TensorNames.ActionMaskPlaceholder))
{
_failedModelChecks.Add(
"The model does not contain an Action Mask but is using Discrete Control.");
}
}
}
/// <summary>
/// Generates failed checks that correspond to outputs expected by the model that are not
/// present in the BrainParameters.
/// </summary>
/// <param name="memory"> The memory size that the model is expecting/</param>
/// <returns>A IEnumerable of string corresponding to the failed output presence
/// checks.</returns>
private void CheckOutputTensorPresence(int memory)
{
var tensorsNames = GetOutputTensors().Select(x => x.Name).ToList();
// If there is no Action Output.
if (!tensorsNames.Contains(TensorNames.ActionOutput))
{
_failedModelChecks.Add("The model does not contain an Action Output Node.");
}
// If there is no Recurrent Output but the model is Recurrent.
if (memory > 0)
{
if (!tensorsNames.Contains(TensorNames.RecurrentOutput))
{
_failedModelChecks.Add(
"The model does not contain a Recurrent Output Node but has memory_size.");
}
}
}
/// <summary>
/// Generates failed checks that correspond to inputs shapes incompatibilities between
/// the model and the BrainParameters.
/// </summary>
private void CheckInputTensorShape()
{
var tensorTester =
new Dictionary<string, Func<TensorProxy, string>>()
{
{TensorNames.VectorObservationPlacholder, CheckVectorObsShape},
{TensorNames.PreviousActionPlaceholder, CheckPreviousActionShape},
{TensorNames.RandomNormalEpsilonPlaceholder, ((tensor) => null)},
{TensorNames.ActionMaskPlaceholder, ((tensor) => null)},
{TensorNames.SequenceLengthPlaceholder, ((tensor) => null)},
{TensorNames.RecurrentInPlaceholder, ((tensor) => null)},
};
for (var obsIndex = 0; obsIndex < _brainParameters.cameraResolutions.Length; obsIndex++)
{
var index = obsIndex;
tensorTester[TensorNames.VisualObservationPlaceholderPrefix + obsIndex] =
(tensor) => CheckVisualObsShape(tensor, index);
}
// If the model expects an input but it is not in this list
foreach (var tensor in GetInputTensors())
{
if (!tensorTester.ContainsKey(tensor.Name))
{
_failedModelChecks.Add(
"Model requires an unknown input named : " + tensor.Name);
}
else
{
var tester = tensorTester[tensor.Name];
var error = tester.Invoke(tensor);
if (error != null)
{
_failedModelChecks.Add(error);
}
}
}
}
/// <summary>
/// Checks that the shape of the Vector Observation input placeholder is the same in the
/// model and in the Brain Parameters.
/// </summary>
/// <param name="tensor"> The tensor that is expected by the model</param>
/// <returns>If the Check failed, returns a string containing information about why the
/// check failed. If the check passed, returns null.</returns>
private string CheckVectorObsShape(TensorProxy tensor)
{
var vecObsSizeBp = _brainParameters.vectorObservationSize;
var numStackedVector = _brainParameters.numStackedVectorObservations;
var totalVecObsSizeT = tensor.Shape[1];
if (vecObsSizeBp * numStackedVector != totalVecObsSizeT)
{
return string.Format(
"Vector Observation Size of the model does not match. " +
"Received {0} x {1} but was expecting {2}.",
vecObsSizeBp, numStackedVector, totalVecObsSizeT);
}
return null;
}
/// <summary>
/// Checks that the shape of the Previous Vector Action input placeholder is the same in the
/// model and in the Brain Parameters.
/// </summary>
/// <param name="tensor"> The tensor that is expected by the model</param>
/// <returns>If the Check failed, returns a string containing information about why the
/// check failed. If the check passed, returns null.</returns>
private string CheckPreviousActionShape(TensorProxy tensor)
{
var numberActionsBp = _brainParameters.vectorActionSize.Length;
var numberActionsT = tensor.Shape[1];
if (numberActionsBp != numberActionsT)
{
return string.Format(
"Previous Action Size of the model does not match. " +
"Received {0} but was expecting {1}.",
numberActionsBp, numberActionsT);
}
return null;
}
/// <summary>
/// Checks that the shape of the visual observation input placeholder is the same in the
/// model and in the Brain Parameters.
/// </summary>
/// <param name="tensor"> The tensor that is expected by the model</param>
/// <param name="visObsIndex"> The index of the visual observation.</param>
/// <returns>If the Check failed, returns a string containing information about why the
/// check failed. If the check passed, returns null.</returns>
private string CheckVisualObsShape(TensorProxy tensor, int visObsIndex)
{
var resolutionBp = _brainParameters.cameraResolutions[visObsIndex];
var widthBp = resolutionBp.width;
var heightBp = resolutionBp.height;
var pixelBp = resolutionBp.blackAndWhite ? 1 : 3;
var heightT = tensor.Shape[1];
var widthT = tensor.Shape[2];
var pixelT = tensor.Shape[3];
if ((widthBp != widthT) || (heightBp != heightT) || (pixelBp != pixelT))
{
return string.Format(
"The visual Observation {0} of the model does not match. " +
"Received TensorProxy of shape [?x{1}x{2}x{3}] but was expecting [?x{4}x{5}x{6}].",
visObsIndex, widthBp, heightBp, pixelBp, widthT, heightT, pixelT);
}
return null;
}
/// <summary>
/// Generates failed checks that correspond to output shapes incompatibilities between
/// the model and the BrainParameters.
/// </summary>
/// <param name="isContinuous"> Whether the model is expecting continuous or
/// discrete control.</param>
/// <param name="modelActionSize"> The size of the action output that is expected
/// by the model.</param>
/// <returns>A IEnumerable of string corresponding to the incompatible shapes between
/// model and BrainParameters.</returns>
private void CheckOutputTensorShape(ModelActionType isContinuous, int modelActionSize)
{
if (isContinuous == ModelActionType.Unknown)
{
_failedModelChecks.Add(
"Cannot infer type of Control from the provided model.");
return;
}
if (isContinuous == ModelActionType.Continuous &&
_brainParameters.vectorActionSpaceType != SpaceType.continuous)
{
_failedModelChecks.Add(
"Model has been trained using Continuous Control but the Brain Parameters " +
"suggest Discrete Control.");
return;
}
if (isContinuous == ModelActionType.Discrete &&
_brainParameters.vectorActionSpaceType != SpaceType.discrete)
{
_failedModelChecks.Add(
"Model has been trained using Discrete Control but the Brain Parameters " +
"suggest Continuous Control.");
return;
}
var tensorTester = new Dictionary<string, Func<TensorProxy, int, string>>();
if (_brainParameters.vectorActionSpaceType == SpaceType.continuous)
{
tensorTester[TensorNames.ActionOutput] = CheckContinuousActionOutputShape;
}
else
{
tensorTester[TensorNames.ActionOutput] = CheckDiscreteActionOutputShape;
}
// If the model expects an output but it is not in this list
foreach (var tensor in GetOutputTensors())
{
if (tensorTester.ContainsKey(tensor.Name))
{
var tester = tensorTester[tensor.Name];
var error = tester.Invoke(tensor, modelActionSize);
if (error != null)
{
_failedModelChecks.Add(error);
}
}
}
}
/// <summary>
/// Checks that the shape of the discrete action output is the same in the
/// model and in the Brain Parameters.
/// </summary>
/// <param name="tensor"> The tensor that is expected by the model</param>
/// <param name="modelActionSize"> The size of the action output that is expected
/// by the model.</param>
/// <returns>If the Check failed, returns a string containing information about why the
/// check failed. If the check passed, returns null.</returns>
private string CheckDiscreteActionOutputShape(TensorProxy tensor, int modelActionSize)
{
var bpActionSize = _brainParameters.vectorActionSize.Sum();
if (modelActionSize != bpActionSize)
{
return string.Format(
"Action Size of the model does not match. " +
"The BrainParameters expect {0} but the model contains {1}.",
bpActionSize, modelActionSize);
}
return null;
}
/// <summary>
/// Checks that the shape of the continuous action output is the same in the
/// model and in the Brain Parameters.
/// </summary>
/// <param name="tensor"> The tensor that is expected by the model</param>
/// <param name="modelActionSize"> The size of the action output that is expected
/// by the model.</param>
/// <returns>If the Check failed, returns a string containing information about why the
/// check failed. If the check passed, returns null.</returns>
private string CheckContinuousActionOutputShape(TensorProxy tensor, int modelActionSize)
{
var bpActionSize = _brainParameters.vectorActionSize[0];
if (modelActionSize != bpActionSize)
{
return string.Format(
"Action Size of the model does not match. " +
"The BrainParameters expect {0} but the model contains {1}.",
bpActionSize, modelActionSize);
}
return null;
}
}
}
#endif

217
UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/TFSharpInferenceEngine.cs


#if ENABLE_TENSORFLOW
using System.Collections.Generic;
using TensorFlow;
using System.Linq;
using System;
using UnityEngine.Profiling;
using System.Runtime.InteropServices;
using Barracuda;
using UnityEngine;
namespace MLAgents.InferenceBrain
{
/// <summary>
/// TFSharpInferenceEngine - Inference engine utilizing the TensorFlow Sharp package to run inference
/// on frozen TensorFlow models
/// </summary>
public class TFSharpInferenceEngine
{
private TFGraph m_graph;
private TFSession m_session;
public void PrepareModel(byte[] model)
{
Profiler.BeginSample("TFSharpInferenceComponent.PrepareModel");
#if UNITY_ANDROID && !UNITY_EDITOR
// This needs to ba called only once and will raise an exception if called multiple times
try
{
TensorFlowSharp.Android.NativeBinding.Init();
}
catch
{
}
#endif
m_graph = new TFGraph();
m_graph.Import(model);
m_session = new TFSession(m_graph);
Profiler.EndSample();
}
public int ExecuteGraph(IEnumerable<TensorProxy> inputs_it, IEnumerable<TensorProxy> outputs_it)
{
Profiler.BeginSample("TFSharpInferenceComponent.ExecuteGraph");
TensorProxy[] inputs = inputs_it.ToArray();
TensorProxy[] outputs = outputs_it.ToArray();
// TODO: Can/should we pre-allocate that?
TFSession.Runner runner = m_session.GetRunner();
inputs.ToList().ForEach((TensorProxy input) =>
{
if (input.Shape.Length == 0)
{
var data = input.Data[0];
if (input.DataType == typeof(int))
{
runner.AddInput(m_graph[input.Name][0], (int)data);
}
else
{
runner.AddInput(m_graph[input.Name][0], (float)data);
}
}
else
{
runner.AddInput(m_graph[input.Name][0], input.DataType == typeof(int) ?
TensorUtils.BarracudaToIntArray(input.Data) :
TensorUtils.BarracudaToFloatArray(input.Data));
}
});
// TODO: better way to pre-allocate this?
outputs.ToList().ForEach(s => runner.Fetch(s.Name));
TFStatus status = new TFStatus();
Profiler.BeginSample("TFSharpInferenceComponent.ExecuteGraph.RunnerRun");
var out_tensors = runner.Run(status);
Profiler.EndSample();
if (!status.Ok)
{
Debug.LogError(status.StatusMessage);
return -1;
}
Debug.Assert(outputs.Length == out_tensors.Length);
for (var i = 0; i < outputs.Length; ++i)
{
if (outputs[i].Shape.Length == 0)
{
// Handle scalars
outputs[i].Data = new Tensor(1, 1);
outputs[i].Data[0] = (float)(int)out_tensors[i].GetValue();
}
else
{
outputs[i].Data = TensorUtils.ArrayToBarracuda(out_tensors[i].GetValue() as Array);
}
}
Profiler.EndSample();
// TODO: create error codes
return 0;
}
[DllImport("libtensorflow")]
private static extern unsafe void TF_OperationGetAttrType(IntPtr oper, string attr_name,
TFDataType* value, IntPtr status);
[DllImport("libtensorflow")]
private static extern unsafe void TF_OperationGetAttrShape(IntPtr oper, string attr_name, long[] value,
int num_dims, IntPtr status);
private TensorProxy GetOpMetadata(TFOperation op)
{
TFStatus status = new TFStatus();
// Query the shape
long[] shape = null;
var shape_attr = op.GetAttributeMetadata("shape", status);
if (!status.Ok || shape_attr.TotalSize <= 0)
{
Debug.LogWarning($"Operation {op.Name} does not contain shape attribute or it" +
$" doesn't contain valid shape data! Status: {status.StatusMessage}");
}
else
{
if (shape_attr.IsList)
{
throw new NotImplementedException("Querying lists is not implemented yet!");
}
else
{
TFStatus s = new TFStatus();
long[] dims = new long[shape_attr.TotalSize];
TF_OperationGetAttrShape(op.Handle, "shape", dims, (int)shape_attr.TotalSize,
s.Handle);
if (!status.Ok)
{
throw new FormatException("Could not query model for op shape (" + op.Name + ")");
}
else
{
shape = new long[dims.Length];
for (int i = 0; i < shape_attr.TotalSize; ++i)
{
if (dims[i] == -1)
{
// we have to use batchsize 1
shape[i] = 1;
}
else
{
shape[i] = dims[i];
}
}
}
}
}
// Query the data type
TFDataType type_value = new TFDataType();
unsafe
{
TFStatus s = new TFStatus();
TF_OperationGetAttrType(op.Handle, "dtype", &type_value, s.Handle);
if (!s.Ok)
{
Debug.LogWarning("Operation " + op.Name +
": error retrieving dtype, assuming float!");
type_value = TFDataType.Float;
}
}
TensorProxy.TensorType placeholder_type = TensorProxy.TensorType.FloatingPoint;
switch (type_value)
{
case TFDataType.Float:
placeholder_type = TensorProxy.TensorType.FloatingPoint;
break;
case TFDataType.Int32:
placeholder_type = TensorProxy.TensorType.Integer;
break;
default:
Debug.LogWarning("Operation " + op.Name +
" is not a float/integer. Proceed at your own risk!");
break;
}
TensorProxy t = new TensorProxy
{
Data = null,
Name = op.Name,
Shape = shape,
ValueType = placeholder_type
};
return t;
}
public IReadOnlyList<TensorProxy> InputFeatures()
{
List<TensorProxy> inputs = new List<TensorProxy>();
foreach (var op in m_graph.GetEnumerator())
{
if (op.OpType == "Placeholder")
{
inputs.Add(GetOpMetadata(op));
}
}
return inputs;
}
}
}
#endif

12
UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/TFSharpInferenceEngine.cs.meta


fileFormatVersion: 2
guid: 120cbe3fa702f4e428f57ae1d893a0a7
timeCreated: 1535148728
licenseType: Free
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

4
docs/Using-TensorFlow-Sharp-in-Unity.md


# Using TensorFlowSharp in Unity
As of version 0.7.0, we have included our own Inference Engine as a replacement for TFS. Please refer to the [release notes](https://github.com/Unity-Technologies/ml-agents/releases/tag/0.7.0) and [Unity Inference Engine documentation](Unity-Inference-Engine.md)
正在加载...
取消
保存