|
|
|
|
|
|
using System; |
|
|
|
using Google.Protobuf.WellKnownTypes; |
|
|
|
using UnityEngine; |
|
|
|
using Unity.MLAgents; |
|
|
|
using Unity.MLAgentsExamples; |
|
|
|
|
|
|
[Range(0.1f, 10)] |
|
|
|
[SerializeField] |
|
|
|
//The walking speed to try and achieve
|
|
|
|
private float targetWalkingSpeed = 10; |
|
|
|
private float m_TargetWalkingSpeed = 10; |
|
|
|
public float TargetWalkingSpeed // property
|
|
|
|
public float MTargetWalkingSpeed // property
|
|
|
|
get { return targetWalkingSpeed; } |
|
|
|
set { targetWalkingSpeed = Mathf.Clamp(value, .1f, m_maxWalkingSpeed); } |
|
|
|
get { return m_TargetWalkingSpeed; } |
|
|
|
set { m_TargetWalkingSpeed = Mathf.Clamp(value, .1f, m_maxWalkingSpeed); } |
|
|
|
} |
|
|
|
|
|
|
|
const float m_maxWalkingSpeed = 10; //The max walking speed
|
|
|
|
|
|
|
public bool randomizeWalkSpeedEachEpisode; |
|
|
|
|
|
|
|
//The direction an agent will walk during training.
|
|
|
|
public Vector3 worldDirToWalk = Vector3.right; |
|
|
|
private Vector3 m_WorldDirToWalk = Vector3.right; |
|
|
|
|
|
|
|
[Header("Target To Walk Towards")] public Transform target; //Target the agent will walk towards during training.
|
|
|
|
|
|
|
|
|
|
|
UpdateOrientationObjects(); |
|
|
|
|
|
|
|
//Set our goal walking speed
|
|
|
|
TargetWalkingSpeed = |
|
|
|
randomizeWalkSpeedEachEpisode ? Random.Range(0.1f, m_maxWalkingSpeed) : TargetWalkingSpeed; |
|
|
|
MTargetWalkingSpeed = |
|
|
|
randomizeWalkSpeedEachEpisode ? Random.Range(0.1f, m_maxWalkingSpeed) : MTargetWalkingSpeed; |
|
|
|
|
|
|
|
SetResetParameters(); |
|
|
|
} |
|
|
|
|
|
|
var cubeForward = m_OrientationCube.transform.forward; |
|
|
|
|
|
|
|
//velocity we want to match
|
|
|
|
var velGoal = cubeForward * TargetWalkingSpeed; |
|
|
|
var velGoal = cubeForward * MTargetWalkingSpeed; |
|
|
|
//ragdoll's avg vel
|
|
|
|
var avgVel = GetAvgVelocity(); |
|
|
|
|
|
|
|
|
|
|
//Update OrientationCube and DirectionIndicator
|
|
|
|
void UpdateOrientationObjects() |
|
|
|
{ |
|
|
|
worldDirToWalk = target.position - hips.position; |
|
|
|
m_WorldDirToWalk = target.position - hips.position; |
|
|
|
m_OrientationCube.UpdateOrientation(hips, target); |
|
|
|
if (m_DirectionIndicator) |
|
|
|
{ |
|
|
|
|
|
|
// Set reward for this step according to mixture of the following elements.
|
|
|
|
// a. Match target speed
|
|
|
|
//This reward will approach 1 if it matches perfectly and approach zero as it deviates
|
|
|
|
var matchSpeedReward = GetMatchingVelocityReward(cubeForward * TargetWalkingSpeed, GetAvgVelocity()); |
|
|
|
var matchSpeedReward = GetMatchingVelocityReward(cubeForward * MTargetWalkingSpeed, GetAvgVelocity()); |
|
|
|
|
|
|
|
//Check for NaNs
|
|
|
|
if (float.IsNaN(matchSpeedReward)) |
|
|
|
|
|
|
public float GetMatchingVelocityReward(Vector3 velocityGoal, Vector3 actualVelocity) |
|
|
|
{ |
|
|
|
//distance between our actual velocity and goal velocity
|
|
|
|
var velDeltaMagnitude = Mathf.Clamp(Vector3.Distance(actualVelocity, velocityGoal), 0, TargetWalkingSpeed); |
|
|
|
var velDeltaMagnitude = Mathf.Clamp(Vector3.Distance(actualVelocity, velocityGoal), 0, MTargetWalkingSpeed); |
|
|
|
return Mathf.Pow(1 - Mathf.Pow(velDeltaMagnitude / TargetWalkingSpeed, 2), 2); |
|
|
|
return Mathf.Pow(1 - Mathf.Pow(velDeltaMagnitude / MTargetWalkingSpeed, 2), 2); |
|
|
|
} |
|
|
|
|
|
|
|
/// <summary>
|
|
|
|