浏览代码
Reducing complexity on a number of classes. (#2480)
Reducing complexity on a number of classes. (#2480)
Only cosmetic and readability improvements. No functional changes were intended. Utilities.cs - Fixed comments across file - Made class static - Removed unnecessary imports - Removed unused method arguments - Renamed variables as appropriate to make usage clearer - In AddRangeNoAlloc, disabled (by comment) Rider’s suggestion to revert to use of built-in Range field (Fixed) - In TextureToTensorProxy, swapped order of first two arguments to be more in-line with convention of input, output UtilitiesTests.cs - Removed unnecessary imports - Simplified array creation commands GeneratorImp.cs - Rider automatically deleted spaces on empty lines - Changed call to TextureToTensorProxy to mirror new argument ordering * Clean-up to UnityAgentsException.cs - Removed unnecessary imports - Fixed comment warning - Fixed method header * Improvements to Startup.cs - Created const for SCENE_NAME field - Fixed strin.../develop-gpu-test
GitHub
5 年前
当前提交
69613a01
共有 22 个文件被更改,包括 1066 次插入 和 925 次删除
-
63UnitySDK/Assets/ML-Agents/Editor/Tests/EditModeTestInternalBrainTensorApplier.cs
-
111UnitySDK/Assets/ML-Agents/Editor/Tests/EditModeTestInternalBrainTensorGenerator.cs
-
184UnitySDK/Assets/ML-Agents/Editor/Tests/MultinomialTest.cs
-
130UnitySDK/Assets/ML-Agents/Editor/Tests/RandomNormalTest.cs
-
19UnitySDK/Assets/ML-Agents/Editor/Tests/UtilitiesTests.cs
-
143UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/ApplierImpl.cs
-
255UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/BarracudaModelParamLoader.cs
-
144UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/GeneratorImpl.cs
-
34UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/TensorApplier.cs
-
66UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/TensorGenerator.cs
-
228UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/TensorProxy.cs
-
98UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/Utils/Multinomial.cs
-
62UnitySDK/Assets/ML-Agents/Scripts/InferenceBrain/Utils/RandomNormal.cs
-
2UnitySDK/Assets/ML-Agents/Scripts/LearningBrain.cs
-
19UnitySDK/Assets/ML-Agents/Scripts/Startup.cs
-
19UnitySDK/Assets/ML-Agents/Scripts/UnityAgentsException.cs
-
109UnitySDK/Assets/ML-Agents/Scripts/Utilities.cs
-
194UnitySDK/Assets/ML-Agents/Editor/Tests/DiscreteActionOutputApplierTest.cs
-
11UnitySDK/Assets/ML-Agents/Editor/Tests/DiscreteActionOutputApplierTest.cs.meta
-
83UnitySDK/Assets/ML-Agents/Editor/Tests/TensorUtilsTest.cs
-
11UnitySDK/Assets/ML-Agents/Editor/Tests/TensorUtilsTest.cs.meta
-
6UnitySDK/UnitySDK.sln.DotSettings
|
|||
using System; |
|||
using System.Collections.Generic; |
|||
using System.Runtime.InteropServices; |
|||
using UnityEngine; |
|||
using MLAgents.InferenceBrain.Utils; |
|||
|
|||
/// <summary>
|
|||
/// Tensor - A class to encapsulate a Tensor used for inference.
|
|||
///
|
|||
/// This class contains the Array that holds the data array, the shapes, type and the placeholder in the
|
|||
/// execution graph. All the fields are editable in the inspector, allowing the user to specify everything
|
|||
/// but the data in a graphical way.
|
|||
/// </summary>
|
|||
[System.Serializable] |
|||
public class TensorProxy |
|||
{ |
|||
public enum TensorType |
|||
{ |
|||
Integer, |
|||
FloatingPoint |
|||
}; |
|||
|
|||
private static Dictionary<TensorType, Type> m_typeMap = new Dictionary<TensorType, Type>() |
|||
{ |
|||
{ TensorType.FloatingPoint, typeof(float)}, |
|||
{TensorType.Integer, typeof(int)} |
|||
}; |
|||
|
|||
public string Name; |
|||
public TensorType ValueType; |
|||
// Since Type is not serializable, we use the DisplayType for the Inspector
|
|||
public Type DataType |
|||
{ |
|||
get { return m_typeMap[ValueType]; } |
|||
} |
|||
public long[] Shape; |
|||
|
|||
public Tensor Data; |
|||
} |
|||
|
|||
public class TensorUtils |
|||
{ |
|||
public static void ResizeTensor(TensorProxy tensor, int batch, ITensorAllocator allocator) |
|||
{ |
|||
if (tensor.Shape[0] == batch && |
|||
tensor.Data != null && tensor.Data.batch == batch) |
|||
return; |
|||
|
|||
tensor.Data?.Dispose(); |
|||
tensor.Shape[0] = batch; |
|||
|
|||
if (tensor.Shape.Length == 4) |
|||
tensor.Data = allocator.Alloc(new TensorShape(batch, (int)tensor.Shape[1], (int)tensor.Shape[2], (int)tensor.Shape[3])); |
|||
else |
|||
tensor.Data = allocator.Alloc(new TensorShape(batch, (int)tensor.Shape[tensor.Shape.Length - 1])); |
|||
} |
|||
/// <summary>
|
|||
/// Tensor - A class to encapsulate a Tensor used for inference.
|
|||
///
|
|||
/// This class contains the Array that holds the data array, the shapes, type and the
|
|||
/// placeholder in the execution graph. All the fields are editable in the inspector,
|
|||
/// allowing the user to specify everything but the data in a graphical way.
|
|||
/// </summary>
|
|||
[Serializable] |
|||
public class TensorProxy |
|||
{ |
|||
public enum TensorType |
|||
{ |
|||
Integer, |
|||
FloatingPoint |
|||
}; |
|||
public static Array BarracudaToFloatArray(Tensor tensor) |
|||
{ |
|||
Array res; |
|||
|
|||
if (tensor.height == 1 && tensor.width == 1) |
|||
res = new float[tensor.batch, tensor.channels]; |
|||
else |
|||
res = new float[tensor.batch, tensor.height, tensor.width, tensor.channels]; |
|||
|
|||
Buffer.BlockCopy(tensor.readonlyArray, 0, res, 0, tensor.length * Marshal.SizeOf<float>()); |
|||
private static readonly Dictionary<TensorType, Type> _typeMap = |
|||
new Dictionary<TensorType, Type>() |
|||
{ |
|||
{TensorType.FloatingPoint, typeof(float)}, |
|||
{TensorType.Integer, typeof(int)} |
|||
}; |
|||
return res; |
|||
} |
|||
|
|||
public static Array BarracudaToIntArray(Tensor tensor) |
|||
{ |
|||
public string name; |
|||
public TensorType valueType; |
|||
if (tensor.height == 1 && tensor.width == 1) |
|||
{ |
|||
var res = new int[tensor.batch, tensor.channels]; |
|||
|
|||
for (int b = 0; b < tensor.batch; b++) |
|||
for (int c = 0; c < tensor.channels; c++) |
|||
{ |
|||
res[b, c] = (int)tensor[b, c]; |
|||
} |
|||
// Since Type is not serializable, we use the DisplayType for the Inspector
|
|||
public Type DataType => _typeMap[valueType]; |
|||
public long[] shape; |
|||
public Tensor data; |
|||
} |
|||
return res; |
|||
} |
|||
else |
|||
{ |
|||
var res = new int[tensor.batch, tensor.height, tensor.width, tensor.channels]; |
|||
for (int b = 0; b < tensor.batch; b++) |
|||
for (int y = 0; y < tensor.height; y++) |
|||
for (int x = 0; x < tensor.width; x++) |
|||
for (int c = 0; c < tensor.channels; c++) |
|||
{ |
|||
res[b, y, x, c] = (int)tensor[b, y, x, c]; |
|||
} |
|||
public static class TensorUtils |
|||
{ |
|||
public static void ResizeTensor(TensorProxy tensor, int batch, ITensorAllocator allocator) |
|||
{ |
|||
if (tensor.shape[0] == batch && |
|||
tensor.data != null && tensor.data.batch == batch) |
|||
{ |
|||
return; |
|||
} |
|||
return res; |
|||
} |
|||
} |
|||
tensor.data?.Dispose(); |
|||
tensor.shape[0] = batch; |
|||
public static Tensor ArrayToBarracuda(Array array) |
|||
{ |
|||
Tensor res; |
|||
|
|||
if (array.Rank == 2) |
|||
res = new Tensor(array.GetLength(0), array.GetLength(1)); |
|||
else |
|||
res = new Tensor(array.GetLength(0), array.GetLength(1), array.GetLength(2), array.GetLength(3)); |
|||
if (tensor.shape.Length == 4) |
|||
{ |
|||
tensor.data = allocator.Alloc( |
|||
new TensorShape( |
|||
batch, |
|||
(int) tensor.shape[1], |
|||
(int) tensor.shape[2], |
|||
(int) tensor.shape[3])); |
|||
} |
|||
else |
|||
{ |
|||
tensor.data = allocator.Alloc( |
|||
new TensorShape( |
|||
batch, |
|||
(int) tensor.shape[tensor.shape.Length - 1])); |
|||
} |
|||
} |
|||
int offset = 0; |
|||
var barracudaArray = res.data != null ? res.tensorOnDevice.SharedAccess(out offset) : null; |
|||
internal static long[] TensorShapeFromBarracuda(TensorShape src) |
|||
{ |
|||
if (src.height == 1 && src.width == 1) |
|||
{ |
|||
return new long[] {src.batch, src.channels}; |
|||
} |
|||
Buffer.BlockCopy(array, 0, barracudaArray, offset, res.length * Marshal.SizeOf<float>()); |
|||
|
|||
return res; |
|||
} |
|||
return new long[] {src.batch, src.height, src.width, src.channels}; |
|||
} |
|||
internal static long[] TensorShapeFromBarracuda(TensorShape src) |
|||
{ |
|||
if (src.height == 1 && src.width == 1) |
|||
return new long[2] {src.batch, src.channels}; |
|||
public static TensorProxy TensorProxyFromBarracuda(Tensor src, string nameOverride = null) |
|||
{ |
|||
var shape = TensorShapeFromBarracuda(src.shape); |
|||
return new TensorProxy |
|||
{ |
|||
name = nameOverride ?? src.name, |
|||
valueType = TensorProxy.TensorType.FloatingPoint, |
|||
shape = shape, |
|||
data = src |
|||
}; |
|||
} |
|||
return new long[4] {src.batch, src.height, src.width, src.channels}; |
|||
} |
|||
/// <summary>
|
|||
/// Fill a pre-allocated Tensor with random numbers
|
|||
/// </summary>
|
|||
/// <param name="tensorProxy">The pre-allocated Tensor to fill</param>
|
|||
/// <param name="randomNormal">RandomNormal object used to populate tensor</param>
|
|||
/// <exception cref="NotImplementedException">
|
|||
/// Throws when trying to fill a Tensor of type other than float
|
|||
/// </exception>
|
|||
/// <exception cref="ArgumentNullException">
|
|||
/// Throws when the Tensor is not allocated
|
|||
/// </exception>
|
|||
public static void FillTensorWithRandomNormal( |
|||
TensorProxy tensorProxy, RandomNormal randomNormal) |
|||
{ |
|||
if (tensorProxy.DataType != typeof(float)) |
|||
{ |
|||
throw new NotImplementedException("Only float data types are currently supported"); |
|||
} |
|||
public static TensorProxy TensorProxyFromBarracuda(Tensor src, string nameOverride = null) |
|||
{ |
|||
var shape = TensorShapeFromBarracuda(src.shape); |
|||
return new TensorProxy |
|||
{ |
|||
Name = nameOverride ?? src.name, |
|||
ValueType = TensorProxy.TensorType.FloatingPoint, |
|||
Shape = shape, |
|||
Data = src |
|||
}; |
|||
} |
|||
} |
|||
if (tensorProxy.data == null) |
|||
{ |
|||
throw new ArgumentNullException(); |
|||
} |
|||
for (var i = 0; i < tensorProxy.data.length; i++) |
|||
{ |
|||
tensorProxy.data[i] = (float) randomNormal.NextDouble(); |
|||
} |
|||
} |
|||
} |
|||
} |
|
|||
using System; |
|||
using Assert = UnityEngine.Assertions.Assert; |
|||
using UnityEngine; |
|||
|
|||
namespace MLAgents.InferenceBrain.Utils |
|||
namespace MLAgents.InferenceBrain.Utils |
|||
/// Multinomial - Draws samples from a multinomial distribution in log space
|
|||
/// Reference: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/multinomial_op.cc
|
|||
/// Multinomial - Draws samples from a multinomial distribution given a (potentially unscaled)
|
|||
/// cumulative mass function (CMF). This means that the CMF need not "end" with probability
|
|||
/// mass of 1.0. For instance: [0.1, 0.2, 0.5] is a valid (unscaled). What is important is
|
|||
/// that it is a cumulative function, not a probability function. In other words,
|
|||
/// entry[i] = P(x \le i), NOT P(i - 1 \le x \lt i).
|
|||
/// (\le stands for less than or equal to while \lt is strictly less than).
|
|||
private readonly System.Random m_random; |
|||
private readonly System.Random _random; |
|||
/// <summary>
|
|||
/// Constructor.
|
|||
/// </summary>
|
|||
/// <param name="seed">
|
|||
/// Seed for the random number generator used in the sampling process.
|
|||
/// </param>
|
|||
m_random = new System.Random(seed); |
|||
_random = new System.Random(seed); |
|||
/// Draw samples from a multinomial distribution based on log-probabilities specified in tensor src. The samples
|
|||
/// will be saved in the dst tensor.
|
|||
/// Samples from the Multinomial distribution defined by the provided cumulative
|
|||
/// mass function.
|
|||
/// <param name="src">2-D tensor with shape batch_size x num_classes</param>
|
|||
/// <param name="dst">Allocated tensor with size batch_size x num_samples</param>
|
|||
/// <exception cref="NotImplementedException">Multinomial doesn't support integer tensors</exception>
|
|||
/// <exception cref="ArgumentException">Issue with tensor shape or type</exception>
|
|||
/// <exception cref="ArgumentNullException">At least one of the tensors is not allocated</exception>
|
|||
public void Eval(TensorProxy src, TensorProxy dst) |
|||
/// <param name="cmf">
|
|||
/// Cumulative mass function, which may be unscaled. The entries in this array need
|
|||
/// to be monotonic (always increasing). If the CMF is scaled, then the last entry in
|
|||
/// the array will be 1.0.
|
|||
/// </param>
|
|||
/// <returns>A sampled index from the CMF ranging from 0 to cmf.Length-1.</returns>
|
|||
public int Sample(float[] cmf) |
|||
if (src.DataType != typeof(float)) |
|||
{ |
|||
throw new NotImplementedException("Multinomial does not support integer tensors yet!"); |
|||
} |
|||
|
|||
if (src.ValueType != dst.ValueType) |
|||
{ |
|||
throw new ArgumentException("Source and destination tensors have different types!"); |
|||
} |
|||
|
|||
if (src.Data == null || dst.Data == null) |
|||
var p = (float) _random.NextDouble() * cmf[cmf.Length - 1]; |
|||
var cls = 0; |
|||
while (cmf[cls] < p) |
|||
throw new ArgumentNullException(); |
|||
++cls; |
|||
|
|||
if (src.Data.batch != dst.Data.batch) |
|||
{ |
|||
throw new ArgumentException("Batch size for input and output data is different!"); |
|||
} |
|||
|
|||
float[] cdf = new float[src.Data.channels]; |
|||
|
|||
for (int batch = 0; batch < src.Data.batch; ++batch) |
|||
{ |
|||
// Find the class maximum
|
|||
float maxProb = float.NegativeInfinity; |
|||
for (int cls = 0; cls < src.Data.channels; ++cls) |
|||
{ |
|||
maxProb = Mathf.Max(src.Data[batch, cls], maxProb); |
|||
} |
|||
|
|||
// Sum the log probabilities and compute CDF
|
|||
float sumProb = 0.0f; |
|||
for (int cls = 0; cls < src.Data.channels; ++cls) |
|||
{ |
|||
sumProb += Mathf.Exp(src.Data[batch, cls] - maxProb); |
|||
cdf[cls] = sumProb; |
|||
} |
|||
|
|||
// Generate the samples
|
|||
for (int sample = 0; sample < dst.Data.channels; ++sample) |
|||
{ |
|||
float p = (float)m_random.NextDouble() * sumProb; |
|||
int cls = 0; |
|||
while (cdf[cls] < p) |
|||
{ |
|||
++cls; |
|||
} |
|||
|
|||
dst.Data[batch, sample] = cls; |
|||
} |
|||
|
|||
} |
|||
|
|||
return cls; |
|||
} |
|||
} |
|||
} |
|
|||
using System; |
|||
using Barracuda; |
|||
using NUnit.Framework; |
|||
using UnityEngine; |
|||
using MLAgents.InferenceBrain; |
|||
using MLAgents.InferenceBrain.Utils; |
|||
|
|||
namespace MLAgents.Tests |
|||
{ |
|||
public class DiscreteActionOutputApplierTest |
|||
{ |
|||
[Test] |
|||
public void TestEvalP() |
|||
{ |
|||
var m = new Multinomial(2018); |
|||
|
|||
var src = new TensorProxy |
|||
{ |
|||
data = new Tensor(1, 3, new[] {0.1f, 0.2f, 0.7f}), |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
var dst = new TensorProxy |
|||
{ |
|||
data = new Tensor(1, 3), |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
DiscreteActionOutputApplier.Eval(src, dst, m); |
|||
|
|||
float[] reference = {2, 2, 1}; |
|||
for (var i = 0; i < dst.data.length; i++) |
|||
{ |
|||
Assert.AreEqual(reference[i], dst.data[i]); |
|||
++i; |
|||
} |
|||
} |
|||
|
|||
[Test] |
|||
public void TestEvalLogits() |
|||
{ |
|||
var m = new Multinomial(2018); |
|||
|
|||
var src = new TensorProxy |
|||
{ |
|||
data = new Tensor( |
|||
1, |
|||
3, |
|||
new[] { Mathf.Log(0.1f) - 50, Mathf.Log(0.2f) - 50, Mathf.Log(0.7f) - 50 }), |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
var dst = new TensorProxy |
|||
{ |
|||
data = new Tensor(1, 3), |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
DiscreteActionOutputApplier.Eval(src, dst, m); |
|||
|
|||
float[] reference = {2, 2, 2}; |
|||
for (var i = 0; i < dst.data.length; i++) |
|||
{ |
|||
Assert.AreEqual(reference[i], dst.data[i]); |
|||
++i; |
|||
} |
|||
} |
|||
|
|||
[Test] |
|||
public void TestEvalBatching() |
|||
{ |
|||
var m = new Multinomial(2018); |
|||
|
|||
var src = new TensorProxy |
|||
{ |
|||
data = new Tensor(2, 3, new [] |
|||
{ |
|||
Mathf.Log(0.1f) - 50, Mathf.Log(0.2f) - 50, Mathf.Log(0.7f) - 50, |
|||
Mathf.Log(0.3f) - 25, Mathf.Log(0.4f) - 25, Mathf.Log(0.3f) - 25 |
|||
|
|||
}), |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
var dst = new TensorProxy |
|||
{ |
|||
data = new Tensor(2, 3), |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
DiscreteActionOutputApplier.Eval(src, dst, m); |
|||
|
|||
float[] reference = {2, 2, 2, 0, 1, 0}; |
|||
for (var i = 0; i < dst.data.length; i++) |
|||
{ |
|||
Assert.AreEqual(reference[i], dst.data[i]); |
|||
++i; |
|||
} |
|||
} |
|||
|
|||
[Test] |
|||
public void TestSrcInt() |
|||
{ |
|||
var m = new Multinomial(2018); |
|||
|
|||
var src = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.Integer |
|||
}; |
|||
|
|||
Assert.Throws<NotImplementedException>( |
|||
() => DiscreteActionOutputApplier.Eval(src, null, m)); |
|||
} |
|||
|
|||
[Test] |
|||
public void TestDstInt() |
|||
{ |
|||
var m = new Multinomial(2018); |
|||
|
|||
var src = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
var dst = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.Integer |
|||
}; |
|||
|
|||
Assert.Throws<ArgumentException>( |
|||
() => DiscreteActionOutputApplier.Eval(src, dst, m)); |
|||
} |
|||
|
|||
[Test] |
|||
public void TestSrcDataNull() |
|||
{ |
|||
var m = new Multinomial(2018); |
|||
|
|||
var src = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
var dst = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
Assert.Throws<ArgumentNullException>( |
|||
() => DiscreteActionOutputApplier.Eval(src, dst, m)); |
|||
} |
|||
|
|||
[Test] |
|||
public void TestDstDataNull() |
|||
{ |
|||
var m = new Multinomial(2018); |
|||
|
|||
var src = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.FloatingPoint, |
|||
data = new Tensor(0,1) |
|||
}; |
|||
|
|||
var dst = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
Assert.Throws<ArgumentNullException>( |
|||
() => DiscreteActionOutputApplier.Eval(src, dst, m)); |
|||
} |
|||
|
|||
[Test] |
|||
public void TestUnequalBatchSize() |
|||
{ |
|||
var m = new Multinomial(2018); |
|||
|
|||
var src = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.FloatingPoint, |
|||
data = new Tensor(1, 1) |
|||
}; |
|||
|
|||
var dst = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.FloatingPoint, |
|||
data = new Tensor(2, 1) |
|||
}; |
|||
|
|||
Assert.Throws<ArgumentException>( |
|||
() => DiscreteActionOutputApplier.Eval(src, dst, m)); |
|||
} |
|||
} |
|||
} |
|
|||
fileFormatVersion: 2 |
|||
guid: aa4c4ceac5f246a0b341958724ecd752 |
|||
MonoImporter: |
|||
externalObjects: {} |
|||
serializedVersion: 2 |
|||
defaultReferences: [] |
|||
executionOrder: 0 |
|||
icon: {instanceID: 0} |
|||
userData: |
|||
assetBundleName: |
|||
assetBundleVariant: |
|
|||
using System; |
|||
using Barracuda; |
|||
using MLAgents.InferenceBrain; |
|||
using MLAgents.InferenceBrain.Utils; |
|||
using NUnit.Framework; |
|||
|
|||
namespace MLAgents.Tests |
|||
{ |
|||
public class TensorUtilsTest |
|||
{ |
|||
[Test] |
|||
public void RandomNormalTestTensorInt() |
|||
{ |
|||
var rn = new RandomNormal(1982); |
|||
var t = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.Integer |
|||
}; |
|||
|
|||
Assert.Throws<NotImplementedException>( |
|||
() => TensorUtils.FillTensorWithRandomNormal(t, rn)); |
|||
} |
|||
|
|||
[Test] |
|||
public void RandomNormalTestDataNull() |
|||
{ |
|||
var rn = new RandomNormal(1982); |
|||
var t = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.FloatingPoint |
|||
}; |
|||
|
|||
Assert.Throws<ArgumentNullException>( |
|||
() => TensorUtils.FillTensorWithRandomNormal(t, rn)); |
|||
} |
|||
|
|||
[Test] |
|||
public void RandomNormalTestTensor() |
|||
{ |
|||
var rn = new RandomNormal(1982); |
|||
var t = new TensorProxy |
|||
{ |
|||
valueType = TensorProxy.TensorType.FloatingPoint, |
|||
data = new Tensor(1, 3, 4, 2) |
|||
}; |
|||
|
|||
TensorUtils.FillTensorWithRandomNormal(t, rn); |
|||
|
|||
var reference = new [] |
|||
{ |
|||
-0.4315872f, |
|||
-1.11074f, |
|||
0.3414804f, |
|||
-1.130287f, |
|||
0.1413168f, |
|||
-0.5105762f, |
|||
-0.3027347f, |
|||
-0.2645015f, |
|||
1.225356f, |
|||
-0.02921959f, |
|||
0.3716498f, |
|||
-1.092338f, |
|||
0.9561074f, |
|||
-0.5018106f, |
|||
1.167787f, |
|||
-0.7763879f, |
|||
-0.07491868f, |
|||
0.5396146f, |
|||
-0.1377991f, |
|||
0.3331701f, |
|||
0.06144788f, |
|||
0.9520947f, |
|||
1.088157f, |
|||
-1.177194f, |
|||
}; |
|||
|
|||
for (var i = 0; i < t.data.length; i++) |
|||
{ |
|||
Assert.AreEqual(t.data[i], reference[i], 0.0001); |
|||
} |
|||
} |
|||
} |
|||
} |
|
|||
fileFormatVersion: 2 |
|||
guid: 0a700a7c6187a433ca44d60d243bb0cd |
|||
MonoImporter: |
|||
externalObjects: {} |
|||
serializedVersion: 2 |
|||
defaultReferences: [] |
|||
executionOrder: 0 |
|||
icon: {instanceID: 0} |
|||
userData: |
|||
assetBundleName: |
|||
assetBundleVariant: |
|
|||
<wpf:ResourceDictionary xml:space="preserve" xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml" xmlns:s="clr-namespace:System;assembly=mscorlib" xmlns:ss="urn:shemas-jetbrains-com:settings-storage-xaml" xmlns:wpf="http://schemas.microsoft.com/winfx/2006/xaml/presentation"> |
|||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Logits/@EntryIndexedValue">True</s:Boolean> |
|||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Marsaglia/@EntryIndexedValue">True</s:Boolean> |
|||
<s:Boolean x:Key="/Default/UserDictionary/Words/=multinomial/@EntryIndexedValue">True</s:Boolean> |
|||
<s:Boolean x:Key="/Default/UserDictionary/Words/=Probs/@EntryIndexedValue">True</s:Boolean> |
|||
<s:Boolean x:Key="/Default/UserDictionary/Words/=stddev/@EntryIndexedValue">True</s:Boolean></wpf:ResourceDictionary> |
撰写
预览
正在加载...
取消
保存
Reference in new issue