Unity 机器学习代理工具包 (ML-Agents) 是一个开源项目,它使游戏和模拟能够作为训练智能代理的环境。
您最多选择25个主题 主题必须以中文或者字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符
 
 
 
 
 

81 行
2.3 KiB

using System;
using Unity.Barracuda;
using System.Collections.Generic;
using Unity.MLAgents.Actuators;
using Unity.MLAgents.Inference;
using Unity.MLAgents.Sensors;
namespace Unity.MLAgents.Policies
{
/// <summary>
/// Where to perform inference.
/// </summary>
public enum InferenceDevice
{
/// <summary>
/// CPU inference
/// </summary>
CPU = 0,
/// <summary>
/// GPU inference
/// </summary>
GPU = 1
}
/// <summary>
/// The Barracuda Policy uses a Barracuda Model to make decisions at
/// every step. It uses a ModelRunner that is shared across all
/// Barracuda Policies that use the same model and inference devices.
/// </summary>
internal class BarracudaPolicy : IPolicy
{
protected ModelRunner m_ModelRunner;
ActionBuffers m_LastActionBuffer;
int m_AgentId;
/// <summary>
/// Sensor shapes for the associated Agents. All Agents must have the same shapes for their Sensors.
/// </summary>
List<int[]> m_SensorShapes;
SpaceType m_SpaceType;
/// <inheritdoc />
public BarracudaPolicy(
BrainParameters brainParameters,
NNModel model,
InferenceDevice inferenceDevice)
{
var modelRunner = Academy.Instance.GetOrCreateModelRunner(model, brainParameters, inferenceDevice);
m_ModelRunner = modelRunner;
m_SpaceType = brainParameters.VectorActionSpaceType;
}
/// <inheritdoc />
public void RequestDecision(AgentInfo info, List<ISensor> sensors)
{
m_AgentId = info.episodeId;
m_ModelRunner?.PutObservations(info, sensors);
}
/// <inheritdoc />
public ref readonly ActionBuffers DecideAction()
{
m_ModelRunner?.DecideBatch();
var actions = m_ModelRunner?.GetAction(m_AgentId);
if (m_SpaceType == SpaceType.Continuous)
{
m_LastActionBuffer = new ActionBuffers(actions, Array.Empty<int>());
return ref m_LastActionBuffer;
}
m_LastActionBuffer = ActionBuffers.FromDiscreteActions(actions);
return ref m_LastActionBuffer;
}
public void Dispose()
{
}
}
}