浏览代码

Partial progress: Following a trio of Relay+UTP samples, I seem to have something that will bind a transport to Relay and keep the connection open (as evident from the fact that a client can join after what would be the 10s timeout on Relay and successfully get the JoinAllocation, when they wouldn't without keeping the connection open even when binding was correct). However, this current code is very slapdash and sloppy; I'm just committing now as a very rough draft.

/main/staging
nathaniel.buck@unity3d.com 3 年前
当前提交
abd52623
共有 61 个文件被更改,包括 3258 次插入18 次删除
  1. 34
      Assets/Scripts/Entities/GameStateManager.cs
  2. 7
      Assets/Scripts/LobbyRelaySample.asmdef
  3. 14
      Assets/Scripts/Relay/RelayInterface.cs
  4. 9
      Packages/packages-lock.json
  5. 402
      Assets/Scripts/Relay/RelayUtpSetup.cs
  6. 11
      Assets/Scripts/Relay/RelayUtpSetup.cs.meta
  7. 16
      ProjectSettings/BurstAotSettings_StandaloneWindows.json
  8. 6
      ProjectSettings/CommonBurstAotSettings.json
  9. 175
      Packages/com.unity.jobs/CHANGELOG.md
  10. 7
      Packages/com.unity.jobs/CHANGELOG.md.meta
  11. 4
      Packages/com.unity.jobs/Documentation~/TableOfContents.md
  12. 382
      Packages/com.unity.jobs/Documentation~/custom_job_types.md
  13. 20
      Packages/com.unity.jobs/Documentation~/filter.yml
  14. 4
      Packages/com.unity.jobs/Documentation~/images/pixel.png
  15. 5
      Packages/com.unity.jobs/Documentation~/index.md
  16. 36
      Packages/com.unity.jobs/Documentation~/scheduling_a_job_from_a_job.md
  17. 8
      Packages/com.unity.jobs/Editor.meta
  18. 32
      Packages/com.unity.jobs/Editor/CLILeakDetectionSwitcher.cs
  19. 13
      Packages/com.unity.jobs/Editor/CLILeakDetectionSwitcher.cs.meta
  20. 89
      Packages/com.unity.jobs/Editor/JobsMenu.cs
  21. 13
      Packages/com.unity.jobs/Editor/JobsMenu.cs.meta
  22. 11
      Packages/com.unity.jobs/Editor/Unity.Jobs.Editor.asmdef
  23. 7
      Packages/com.unity.jobs/Editor/Unity.Jobs.Editor.asmdef.meta
  24. 30
      Packages/com.unity.jobs/LICENSE.md
  25. 7
      Packages/com.unity.jobs/LICENSE.md.meta
  26. 8
      Packages/com.unity.jobs/Unity.Jobs.Tests.meta
  27. 8
      Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs.meta
  28. 88
      Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobStressTests.cs
  29. 11
      Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobStressTests.cs.meta
  30. 519
      Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTests.cs
  31. 11
      Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTests.cs.meta
  32. 156
      Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTestsFixture.cs
  33. 11
      Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTestsFixture.cs.meta
  34. 111
      Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTests_CombineDependencies.cs
  35. 11
      Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTests_CombineDependencies.cs.meta
  36. 226
      Packages/com.unity.jobs/Unity.Jobs.Tests/NativeListDeferredArrayTests.cs
  37. 11
      Packages/com.unity.jobs/Unity.Jobs.Tests/NativeListDeferredArrayTests.cs.meta
  38. 164
      Packages/com.unity.jobs/Unity.Jobs.Tests/ParallelFilterJobTests.cs
  39. 12
      Packages/com.unity.jobs/Unity.Jobs.Tests/ParallelFilterJobTests.cs.meta
  40. 21
      Packages/com.unity.jobs/Unity.Jobs.Tests/Unity.Jobs.Tests.asmdef
  41. 7
      Packages/com.unity.jobs/Unity.Jobs.Tests/Unity.Jobs.Tests.asmdef.meta
  42. 10
      Packages/com.unity.jobs/Unity.Jobs.meta
  43. 52
      Packages/com.unity.jobs/Unity.Jobs/EarlyInitHelpers.cs
  44. 11
      Packages/com.unity.jobs/Unity.Jobs/EarlyInitHelpers.cs.meta
  45. 80
      Packages/com.unity.jobs/Unity.Jobs/IJobParallelForBatch.cs
  46. 13
      Packages/com.unity.jobs/Unity.Jobs/IJobParallelForBatch.cs.meta
  47. 175
      Packages/com.unity.jobs/Unity.Jobs/IJobParallelForDefer.cs
  48. 11
      Packages/com.unity.jobs/Unity.Jobs/IJobParallelForDefer.cs.meta
  49. 144
      Packages/com.unity.jobs/Unity.Jobs/IJobParallelForFilter.cs
  50. 12
      Packages/com.unity.jobs/Unity.Jobs/IJobParallelForFilter.cs.meta
  51. 12
      Packages/com.unity.jobs/Unity.Jobs/Unity.Jobs.asmdef
  52. 7
      Packages/com.unity.jobs/Unity.Jobs/Unity.Jobs.asmdef.meta
  53. 25
      Packages/com.unity.jobs/package.json
  54. 7
      Packages/com.unity.jobs/package.json.meta

34
Assets/Scripts/Entities/GameStateManager.cs


LobbyServiceData m_lobbyServiceData = new LobbyServiceData();
LocalGameState m_localGameState = new LocalGameState();
ReadyCheck m_ReadyCheck;
RelayUTPSetup m_RelaySetup;
// Do some arbitrary operations to instantiate singletons.
// Do some arbitrary operations to instantiate singletons.
#pragma warning disable IDE0059 // Unnecessary assignment of a value
var unused = Locator.Get;
#pragma warning restore IDE0059 // Unnecessary assignment of a value

OnJoinedLobby();
}
void OnGotRelayAllocation(Allocation allocationID)
{
RelayInterface.GetJoinCodeAsync(allocationID.AllocationId, OnGotRelayCode);
}
void OnGotRelayCode(string relayCode)
{
m_localLobby.RelayCode = relayCode;
}
void OnJoinedLobby()
{
LobbyAsyncRequests.Instance.BeginTracking(m_localLobby.LobbyID);

displayNameData.Add("DisplayName", m_localUser.DisplayName);
LobbyAsyncRequests.Instance.UpdatePlayerDataAsync(displayNameData, null);
if (m_localUser.IsHost)
{
m_RelaySetup = gameObject.AddComponent<RelayUtpSetup_Host>();
(m_RelaySetup as RelayUtpSetup_Host).DoRelaySetup(m_localLobby);
}
else
{
m_RelaySetup = gameObject.AddComponent<RelayUtpSetup_Client>();
(m_RelaySetup as RelayUtpSetup_Client).JoinRelay(m_localLobby);
}
}
void OnLeftLobby()

m_lobbyContentHeartbeat.EndTracking();
LobbyAsyncRequests.Instance.EndTracking();
if (m_RelaySetup != null)
{
Component.Destroy(m_RelaySetup);
m_RelaySetup = null;
}
}
/// <summary>

// We want to do all the Relay Allocation calls in quick succession, as waiting too long
// (10s) will cause the Relay server to get cleaned up by the service
RelayInterface.AllocateAsync(m_localLobby.MaxPlayerCount, OnGotRelayAllocation);
m_localLobby.CountDownTime = m_localLobby.TargetEndTime.Subtract(DateTime.Now).Seconds;
m_localLobby.State = LobbyState.CountDown;
StartCoroutine(CountDown());

m_localLobby.State = LobbyState.InGame;
// TODO TRANSPORT: Move Relay Join to Pre-Countdown, and do connection and health checks before counting down for the game start.
RelayInterface.JoinAsync(m_localLobby.RelayCode, OnJoinedRelay);
//RelayInterface.JoinAsync(m_localLobby.RelayCode, OnJoinedRelay);
}
/// <summary>

7
Assets/Scripts/LobbyRelaySample.asmdef


"GUID:03058786646e84a4587858e9302c3f41",
"GUID:5540e30183c82e84b954c033c388e06c",
"GUID:fe25561d224ed4743af4c60938a59d0b",
"GUID:4c3f49d89436d478ea78315c03159dcc"
"GUID:4c3f49d89436d478ea78315c03159dcc",
"GUID:f2d49d9fa7e7eb3418e39723a7d3b92f",
"GUID:e0cd26848372d4e5c891c569017e11f1",
"GUID:8a2eafa29b15f444eb6d74f94a930e1d"
"allowUnsafeCode": false,
"allowUnsafeCode": true,
"overrideReferences": false,
"precompiledReferences": [],
"autoReferenced": true,

14
Assets/Scripts/Relay/RelayInterface.cs


private async void DoRequest(Task<T> task, Action<T> onComplete)
{
T result = await task;
onComplete?.Invoke(result);
T result = default;
string currentTrace = System.Environment.StackTrace;
try {
result = await task;
} catch (Exception e) {
Exception eFull = new Exception($"Call stack before async call:\n{currentTrace}\n", e);
throw eFull;
} finally {
onComplete?.Invoke(result);
}
// TODO: Ensure that passing null as result is handled.
}
}

9
Packages/packages-lock.json


"dependencies": {},
"url": "https://packages.unity.com"
},
"com.unity.jobs": {
"version": "file:com.unity.jobs",
"depth": 0,
"source": "embedded",
"dependencies": {
"com.unity.collections": "0.17.0-preview.18",
"com.unity.mathematics": "1.2.1"
}
},
"com.unity.mathematics": {
"version": "1.2.1",
"depth": 1,

402
Assets/Scripts/Relay/RelayUtpSetup.cs


using LobbyRelaySample;
using System.Collections;
using Unity.Collections;
using Unity.Jobs;
using Unity.Networking.Transport;
using Unity.Networking.Transport.Relay;
using Unity.Services.Relay;
using Unity.Services.Relay.Allocations;
using Unity.Services.Relay.Models;
using UnityEngine;
namespace LobbyRelaySample.Relay
{
/// <summary>
/// Responsible for setting up a connection with Relay using UTP, for the lobby host.
/// Must be a MonoBehaviour since the binding process doesn't have asynchronous callback options.
/// </summary>
public abstract class RelayUTPSetup : MonoBehaviour
{
// TODO: Eh, don't need to live here.
unsafe protected static RelayAllocationId ConvertFromAllocationIdBytes(byte[] allocationIdBytes)
{
fixed (byte* ptr = allocationIdBytes)
{
return RelayAllocationId.FromBytePointer(ptr, allocationIdBytes.Length);
}
}
unsafe protected static RelayConnectionData ConvertConnectionData(byte[] connectionData)
{
fixed (byte* ptr = connectionData)
{
return RelayConnectionData.FromBytePointer(ptr, RelayConnectionData.k_Length);
}
}
unsafe protected static RelayHMACKey ConvertFromHMAC(byte[] hmac)
{
fixed (byte* ptr = hmac)
{
return RelayHMACKey.FromBytePointer(ptr, RelayHMACKey.k_Length);
}
}
}
public class RelayUtpSetup_Host : RelayUTPSetup
{
private LocalLobby m_localLobby;
private Allocation m_allocation;
private bool m_isRelayConnected = false;
public NetworkDriver m_ServerDriver;
private NativeList<NetworkConnection> m_connections;
private JobHandle m_updateHandle;
public void DoRelaySetup(LocalLobby localLobby)
{
m_localLobby = localLobby;
RelayInterface.AllocateAsync(m_localLobby.MaxPlayerCount, OnAllocation);
}
public void OnAllocation(Allocation allocation)
{
m_allocation = allocation;
// RelayInterface.GetJoinCodeAsync(allocation.AllocationId, OnRelayCode);
//}
//public void OnRelayCode(string relayCode)
//{
// m_localLobby.RelayCode = relayCode;
// RelayInterface.JoinAsync(m_localLobby.RelayCode, OnJoin);
//}
//private void OnJoin(JoinAllocation allocation)
//{
// // TODO: Use the ServerAddress?
// m_localLobby.RelayServer = new ServerAddress(m_allocation.RelayServer.IpV4, m_allocation.RelayServer.Port);
NetworkEndPoint serverEndpoint = NetworkEndPoint.Parse(m_allocation.RelayServer.IpV4, (ushort)m_allocation.RelayServer.Port);
// UTP uses pointers instead of managed arrays for performance reasons, so we use these helper functions to convert them
RelayAllocationId allocationId = ConvertFromAllocationIdBytes(m_allocation.AllocationIdBytes);
RelayConnectionData connectionData = ConvertConnectionData(m_allocation.ConnectionData);
RelayHMACKey key = ConvertFromHMAC(m_allocation.Key);
var relayServerData = new RelayServerData(ref serverEndpoint, 0, ref allocationId, ref connectionData, ref connectionData, ref key);
relayServerData.ComputeNewNonce();
var relayNetworkParameter = new RelayNetworkParameter { ServerData = relayServerData };
StartCoroutine(ServerBindAndListen(relayNetworkParameter, serverEndpoint));
}
private IEnumerator ServerBindAndListen(RelayNetworkParameter relayNetworkParameter, NetworkEndPoint serverEndpoint)
{
// Create the NetworkDriver using the Relay parameters
m_ServerDriver = NetworkDriver.Create(new INetworkParameter[] { relayNetworkParameter });
m_connections = new NativeList<NetworkConnection>(16, Allocator.Persistent);
// Bind the NetworkDriver to the local endpoint
if (m_ServerDriver.Bind(NetworkEndPoint.AnyIpv4) != 0)
{
Debug.LogError("Server failed to bind");
}
else
{
// The binding process is an async operation; wait until bound
while (!m_ServerDriver.Bound)
{
m_ServerDriver.ScheduleUpdate().Complete();
yield return null; // TODO: Does this not proceed until a client connects as well?
}
// Once the driver is bound you can start listening for connection requests
if (m_ServerDriver.Listen() != 0)
{
Debug.LogError("Server failed to listen");
yield break;
}
else
{
Debug.LogWarning("Server is now listening!");
m_isRelayConnected = true;
}
//var serverConnection = driver.Connect(serverEndpoint);
//while (driver.GetConnectionState(serverConnection) == NetworkConnection.State.Connecting)
//{
// driver.ScheduleUpdate().Complete();
// yield return null;
//}
//Debug.LogWarning("Should be good now?");
//// This successfully sends data, it seems, though it's just to other clients and not actually to the Relay service.
//while (true)
//{
// yield return new WaitForSeconds(1);
// DataStreamWriter writer = default;
// if (driver.BeginSend(serverConnection, out writer) == 0)
// {
// writer.WriteByte(0);
// driver.EndSend(writer);
// Debug.LogWarning("Sent a byte");
// }
//}
RelayInterface.GetJoinCodeAsync(m_allocation.AllocationId, OnRelayCode);
}
}
public void OnRelayCode(string relayCode)
{
m_localLobby.RelayCode = relayCode;
//RelayInterface.JoinAsync(m_localLobby.RelayCode, OnRelayJoined);
}
private void OnRelayJoined(JoinAllocation allocation)
{
StartCoroutine(DoRelayConnect(allocation));
}
private IEnumerator DoRelayConnect(JoinAllocation allocation)
{
NetworkEndPoint serverEndpoint = NetworkEndPoint.Parse(allocation.RelayServer.IpV4, (ushort)allocation.RelayServer.Port);
// UTP uses pointers instead of managed arrays for performance reasons, so we use these helper functions to convert them
RelayAllocationId allocationId = ConvertFromAllocationIdBytes(allocation.AllocationIdBytes);
RelayConnectionData connectionData = ConvertConnectionData(allocation.ConnectionData);
RelayHMACKey key = ConvertFromHMAC(allocation.Key);
var relayServerData = new RelayServerData(ref serverEndpoint, 0, ref allocationId, ref connectionData, ref connectionData, ref key);
relayServerData.ComputeNewNonce();
var relayNetworkParameter = new RelayNetworkParameter { ServerData = relayServerData };
var driver = NetworkDriver.Create(new INetworkParameter[] { relayNetworkParameter });
var serverConnection = driver.Connect(serverEndpoint);
Debug.LogWarning("Trying the relay connection now.");
while (driver.GetConnectionState(serverConnection) == NetworkConnection.State.Connecting)
{
driver.ScheduleUpdate().Complete();
yield return null;
}
Debug.LogWarning("Should be good now?");
// This successfully sends data, it seems, though it's just to other clients and not actually to the Relay service.
while (true)
{
yield return new WaitForSeconds(1);
DataStreamWriter writer = default;
if (driver.BeginSend(serverConnection, out writer) == 0)
{
writer.WriteByte(0);
driver.EndSend(writer);
Debug.LogWarning("Sent a byte");
}
}
}
struct DriverUpdateJob : IJob
{
public NetworkDriver driver;
public NativeList<NetworkConnection> connections;
public void Execute()
{
// Remove connections which have been destroyed from the list of active connections
for (int i = 0; i < connections.Length; ++i)
{
if (!connections[i].IsCreated)
{
connections.RemoveAtSwapBack(i);
// Index i is a new connection since we did a swap back, check it again
--i;
}
}
// Accept all new connections
while (true)
{
var con = driver.Accept();
// "Nothing more to accept" is signaled by returning an invalid connection from accept
if (!con.IsCreated)
break;
connections.Add(con);
}
}
}
static NetworkConnection ProcessSingleConnection(NetworkDriver.Concurrent driver, NetworkConnection connection)
{
DataStreamReader strm;
NetworkEvent.Type cmd;
// Pop all events for the connection
while ((cmd = driver.PopEventForConnection(connection, out strm)) != NetworkEvent.Type.Empty)
{
if (cmd == NetworkEvent.Type.Data)
{
// For ping requests we reply with a pong message
int id = strm.ReadInt();
// Create a temporary DataStreamWriter to keep our serialized pong message
if (driver.BeginSend(connection, out var pongData) == 0)
{
pongData.WriteInt(id);
// Send the pong message with the same id as the ping
driver.EndSend(pongData);
}
}
else if (cmd == NetworkEvent.Type.Disconnect)
{
// When disconnected we make sure the connection return false to IsCreated so the next frames
// DriverUpdateJob will remove it
return default(NetworkConnection);
}
}
return connection;
}
struct PongJob : Unity.Jobs.IJobParallelForDefer
{
public NetworkDriver.Concurrent driver;
public NativeArray<NetworkConnection> connections;
public void Execute(int i)
{
connections[i] = ProcessSingleConnection(driver, connections[i]);
}
}
private void Update()
{
// When connecting to the relay we need to this?
if (m_ServerDriver.IsCreated && !m_isRelayConnected)
{
m_ServerDriver.ScheduleUpdate().Complete();
var updateJob = new DriverUpdateJob {driver = m_ServerDriver, connections = m_connections};
updateJob.Schedule().Complete();
}
}
void LateUpdate()
{
// On fast clients we can get more than 4 frames per fixed update, this call prevents warnings about TempJob
// allocation longer than 4 frames in those cases
if (m_ServerDriver.IsCreated && m_isRelayConnected)
m_updateHandle.Complete();
}
void FixedUpdate()
{
if (m_ServerDriver.IsCreated && m_isRelayConnected) {
// Wait for the previous frames ping to complete before starting a new one, the Complete in LateUpdate is not
// enough since we can get multiple FixedUpdate per frame on slow clients
m_updateHandle.Complete();
var updateJob = new DriverUpdateJob {driver = m_ServerDriver, connections = m_connections};
var pongJob = new PongJob
{
// PongJob is a ParallelFor job, it must use the concurrent NetworkDriver
driver = m_ServerDriver.ToConcurrent(),
// PongJob uses IJobParallelForDeferExtensions, we *must* use AsDeferredJobArray in order to access the
// list from the job
connections = m_connections.AsDeferredJobArray()
};
// Update the driver should be the first job in the chain
m_updateHandle = m_ServerDriver.ScheduleUpdate();
// The DriverUpdateJob which accepts new connections should be the second job in the chain, it needs to depend
// on the driver update job
m_updateHandle = updateJob.Schedule(m_updateHandle);
// PongJob uses IJobParallelForDeferExtensions, we *must* schedule with a list as first parameter rather than
// an int since the job needs to pick up new connections from DriverUpdateJob
// The PongJob is the last job in the chain and it must depends on the DriverUpdateJob
m_updateHandle = pongJob.Schedule(m_connections, 1, m_updateHandle);
}
}
}
public class RelayUtpSetup_Client : RelayUTPSetup
{
LocalLobby m_localLobby;
public void JoinRelay(LocalLobby localLobby)
{
m_localLobby = localLobby;
localLobby.onChanged += OnLobbyChange;
}
private void OnLobbyChange(LocalLobby lobby)
{
if (m_localLobby.RelayCode != null)
{
RelayInterface.JoinAsync(m_localLobby.RelayCode, OnJoin);
m_localLobby.onChanged -= OnLobbyChange;
}
}
private void OnJoin(JoinAllocation allocation)
{
if (allocation == null)
{
// TODO: Error messaging.
return;
}
// Collect and convert the Relay data from the join response
var serverEndpoint = NetworkEndPoint.Parse(allocation.RelayServer.IpV4, (ushort)allocation.RelayServer.Port);
var allocationId = ConvertFromAllocationIdBytes(allocation.AllocationIdBytes);
var connectionData = ConvertConnectionData(allocation.ConnectionData);
var hostConnectionData = ConvertConnectionData(allocation.HostConnectionData);
var key = ConvertFromHMAC(allocation.Key);
// Prepare the RelayNetworkParameter
var relayServerData = new RelayServerData(ref serverEndpoint, 0, ref allocationId, ref connectionData, ref hostConnectionData, ref key);
relayServerData.ComputeNewNonce();
var relayNetworkParameter = new RelayNetworkParameter { ServerData = relayServerData };
StartCoroutine(ServerBindAndListen(relayNetworkParameter, serverEndpoint));
}
private IEnumerator ServerBindAndListen(RelayNetworkParameter relayNetworkParameter, NetworkEndPoint serverEndpoint)
{
var driver = NetworkDriver.Create(new INetworkParameter[] { relayNetworkParameter });
// Bind the NetworkDriver to the available local endpoint.
// This will send the bind request to the Relay server
if (driver.Bind(NetworkEndPoint.AnyIpv4) != 0)
{
Debug.LogError("Client failed to bind");
}
else
{
while (!driver.Bound)
{
driver.ScheduleUpdate().Complete();
yield return null;
}
// Once the client is bound to the Relay server, you can send a connection request
var serverConnection = driver.Connect(serverEndpoint);
while (driver.GetConnectionState(serverConnection) == NetworkConnection.State.Connecting)
{
driver.ScheduleUpdate().Complete();
yield return null;
}
if (driver.GetConnectionState(serverConnection) != NetworkConnection.State.Connected)
{
Debug.LogError("Client failed to connect to server");
}
}
}
}
}

11
Assets/Scripts/Relay/RelayUtpSetup.cs.meta


fileFormatVersion: 2
guid: 5857e5666b1ecf844b8280729adb6e6e
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

16
ProjectSettings/BurstAotSettings_StandaloneWindows.json


{
"MonoBehaviour": {
"Version": 3,
"EnableBurstCompilation": true,
"EnableOptimisations": true,
"EnableSafetyChecks": false,
"EnableDebugInAllBuilds": false,
"UsePlatformSDKLinker": false,
"CpuMinTargetX32": 0,
"CpuMaxTargetX32": 0,
"CpuMinTargetX64": 0,
"CpuMaxTargetX64": 0,
"CpuTargetsX32": 6,
"CpuTargetsX64": 72
}
}

6
ProjectSettings/CommonBurstAotSettings.json


{
"MonoBehaviour": {
"Version": 3,
"DisabledWarnings": ""
}
}

175
Packages/com.unity.jobs/CHANGELOG.md


# Change log
## [0.10.0] - 2021-03-15
### Changed
* `IJobParallelForDeferExtensions.Schedule` generic `U` constraint from `struct` to `unmanaged`.
- Updated dependencies for using com.unity.burst@1.5.3
## [0.9.0] - 2021-01-26
### Added
* `CLILeakDetectionSwitcher` to be used by Yamato.
### Changed
* Update minimum editor version to 2020.2.1f1-dots.3
## [0.8.0] - 2020-11-13
### Changed
* Update minimum editor version to 2020.1.2f1
*Added tests for generic jobs.
## [0.7.0] - 2020-09-24
* Updated dependencies of this package.
## [0.6.0] - 2020-08-26
* Updated dependencies of this package.
## [0.5.0] - 2020-08-04
* Updated dependencies of this package.
## [0.4.0] - 2020-07-10
### Changed
* Updated minimum Unity Editor version to 2020.1.0b15 (40d9420e7de8)
### Known Issues
* This version is not compatible with 2020.2.0a17. Please update to the forthcoming alpha.
## [0.3.0] - 2020-05-27
### Changed
* Updated minimum Unity Editor version to 2020.1.0b9 (9c0aec301c8d)
## [0.2.10] - 2020-05-04
### Changed
* Updated dependencies of this package.
## [0.2.9] - 2020-04-24
### Changed
* Updated dependencies of this package.
## [0.2.8] - 2020-04-08
### Changed
* Updated dependencies of this package.
## [0.2.7] - 2020-03-13
### Changed
* Updated dependencies of this package.
* The internals of IJobParallelForFilter are now `internal` rather than `public`
## [0.2.6] - 2020-03-03
### Changed
* Updated dependencies of this package.
* Maintain JobsDebugger menu item value between sessions.
## [0.2.5] - 2020-02-17
### Changed
* Updated dependencies of this package.
## [0.2.4] - 2020-01-28
### Changed
* Updated dependencies of this package.
## [0.2.3] - 2020-01-16
### Changed
* Updated dependencies of this package.
## [0.2.2] - 2019-12-16
**This version requires Unity 2019.3.0f1+**
### Changes
* Updated dependencies of this package.
## [0.2.1] - 2019-12-03
### Changes
* Updated dependencies of this package.
## [0.2.0] - 2019-11-22
**This version requires Unity 2019.3 0b11+**
### Changes
* Updated dependencies for this package.
## [0.1.1] - 2019-08-06
### Changes
* Updated dependencies for this package.
## [0.1.0] - 2019-07-30
### Changes
* Updated dependencies for this package.
## [0.0.7-preview.13] - 2019-05-24
### Changes
* Updated dependency for `com.unity.collections`
## [0.0.7-preview.12] - 2019-05-16
### New Features
* IJobParallelForDeferred has been added to allow a parallel for job to be scheduled even if it's for each count will only be known during another jobs execution.
### Upgrade guide
* Previously IJobParallelFor had a overload with the same IJobParallelForDeferred functionality. This is no longer supported since it was not working in Standalone builds using Burst. Now you need to explicitly implement IJobParallelForDeferred if you want to use the deferred schedule parallel for.
## [0.0.7-preview.11] - 2019-05-01
Change tracking started with this version.

7
Packages/com.unity.jobs/CHANGELOG.md.meta


fileFormatVersion: 2
guid: ce949e15b369542f1991c23589a224fc
TextScriptImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

4
Packages/com.unity.jobs/Documentation~/TableOfContents.md


<!-- Generated from ../Samples/Packages/com.unity.jobs/Documentation~/toc.yml do not edit. -->
* [Jobs package](index.md)
* [Custom job types](custom_job_types.md)
* [Scheduling a job from a job](scheduling_a_job_from_a_job.md)

382
Packages/com.unity.jobs/Documentation~/custom_job_types.md


---
uid: custom-job-types
---
# Custom job types
On the lowest level of the job system, jobs are scheduled by calling one of the `Schedule` functions in [JobsUtility](https://docs.unity3d.com/ScriptReference/Unity.Jobs.LowLevel.Unsafe.JobsUtility.html). The currently existing [job types](https://docs.unity3d.com/ScriptReference/Unity.Jobs.LowLevel.Unsafe.JobType.html) all use these functions, but it is also possible to create specialized job types using the same APIs.
These APIs use unsafe code and have to be crafted carefully, since they can easily introduce unwanted race conditions. If you add your own job types, we strongly recommend to aim for full test coverage.
As an example we have a custom job type `IJobParallelForBatch` (see file: _/Packages/com.unity.jobs/Unity.Jobs/IJobParallelForBatch.cs_).
It works like __IJobParallelFor__, but instead of calling a single execute function per index it calls one execute function per batch being executed. This is useful if you need to do something on more than one item at a time, but still want to do it in parallel. A common scenario for this job type is if you need to create a temporary array and you want to avoid creating each item in the array one at a time. By using IJobParallelFor you can instead create one temporary array per batch.
In the IJobParallelForBatch example, the entry point where the job is actually scheduled looks like this:
```c#
unsafe static public JobHandle ScheduleBatch<T>(this T jobData, int arrayLength, int minIndicesPerJobCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch
{
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData), ParallelForBatchJobStruct<T>.Initialize(), dependsOn, ScheduleMode.Batched);
return JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, minIndicesPerJobCount);
}
```
The first line creates a struct containing the scheduling parameters. When creating it you need to set a pointer to the data which will be copied to the jobs. The reason this is a pointer is that the native code which uses it does not know about the type.
You also need to pass it a pointer to the __JobReflectionData__ created by calling:
```c#
JobsUtility.CreateJobReflectionData(typeof(T), JobType.ParallelFor, (ExecuteJobFunction)Execute);
```
JobReflection stores information about the struct with the data for the job, such as which __NativeContainers__ it has and how they need to be patched when scheduling a job. It lives on the native side of the engine and the managed code only has access to it though pointers without any information about what the type is. When creating JobReflectionData you need to specify the type of the struct implementing the job, the __JobType__ and the method which will be called to execute the job. The JobReflectionData does not depend on the data in the struct you schedule, only its type, so it should only be created once for all jobs implementing the same interface. There are currently only two job types, __Single__ and __ParallelFor__. Single means the job will only get a single call, ParallelFor means there will be multiple calls to process it; where each call is restricted to a subset of the range of indices to process. Which job type you choose affects which schedule function you are allowed to call.
The third parameter of __JobsUtility.JobScheduleParameters__ is the __JobHandle__ that the scheduled job should depend on.
The final parameter is the schedule mode. There are two scheduling modes to choose from, __Run__ and __Batched__. Batched means one or more jobs will be scheduled to do the processing, while Run means the processing will be done on the main thread before Schedule returns.
Once the schedule parameters are created we actually schedule the job. There are three ways to schedule jobs depending on their type:
```c#
JobHandle Schedule(ref JobScheduleParameters parameters);
JobHandle ScheduleParallelFor(ref JobScheduleParameters parameters, int arrayLength, int innerLoopBatchCount);
JobHandle ScheduleParallelForTransform(ref JobScheduleParameters parameters, IntPtr transfromAccessArray);
```
Schedule can only be used if the __ScheduleParameters__ are created with __JobType.Single__, the other two schedule functions require __JobType.ParallelFor__.
The __arrayLength__ and __innerLoopBatchCount__ parameter passed to __ScheduleParallelFor__ are used to determine how many indices the jobs should process and how many indices it should handle in the inner loop (see the section on [Execution and JobRanges](#execution-and-jobranges) for more information on the inner loop count).
__ScheduleParallelForTransform__ is similar to ScheduleParallelFor, but it also has access to a __TransformAccessArray__ that allows you to modify __Transform__ components on __GameObjects__. The number of indices and batch size is inferred from the TransformAccessArray.
## Execution and JobRanges
After scheduling the job, Unity will call the entry point you specified directly from the native side. It works in a similar way to how __Update__ is called on MonoBehaviours, but from inside a job instead. You only get one call per job and there is either one job, or one job per worker thread; in the case of ParallelFor.
The signature used for Execute is
```c#
public delegate void ExecuteJobFunction(ref T data, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex);
```
For Single jobs, only the data is needed and you can just do your processing right away, but for ParallelFor jobs it requires some more work before you can start processing indices. We need to split up the indices into a number of sequential sub-sets that each job will process in parallel. This way we do not process the same thing twice and we are sure that everything gets covered. The memory layout will determine the order of indices.
The JobRanges contain the batches and indices a ParallelFor job is supposed to process. The indices are split into batches based on the batch size, the batches are evenly distributed between the jobs doing the execution in such a way that each job can iterate over a continuous section of memory. The ParallelFor job should call:
```c#
JobsUtility.GetWorkStealingRange(ref ranges, jobIndex, out begin, out end)
```
This continues until it returns `false`, and after calling it process all items with index between __begin__ and __end__.
The reason you get batches of items, rather than the full set of items the job should process, is that Unity will apply [work stealing](https://en.wikipedia.org/wiki/Work_stealing) if one job completes before the others. Work stealing in this context means that when one job is done it will look at the other jobs running and see if any of them still have a lot of work left. If it finds a job which is not complete it will steal some of the batches that it has not yet started; to dynamically redistribute the work.
Before a ParallelFor job starts processing items it also needs to limit the write access to NativeContainers on the range of items which the job is processing. If it does not do this several jobs can potentially write to the same index which leads to race conditions. The NativeContainers that need to be limited is passed to the job and there is a function to patch them; so they cannot access items outside the correct range. The code to do it looks like this:
```c#
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.PatchBufferMinMaxRanges(bufferRangePatchData, UnsafeUtility.AddressOf(ref jobData), begin, end - begin);
#endif
```
# Custom NativeContainers
When writing jobs, the data communication between jobs is one of the hardest parts to get right. Just using __NativeArray__ is very limiting. Using __NativeQueue__, __NativeHashMap__ and __NativeMultiHashMap__ and their __Concurrent__ versions solves most scenarios.
For the remaining scenarios it is possible to write your own custom NativeContainers.
When writing custom containers for [thread synchronization](https://en.wikipedia.org/wiki/Synchronization_(computer_science)#Thread_or_process_synchronization) it is very important to write correct code. We strongly recommend full test coverage for any new containers you add.
As a very simple example of this we will create a __NativeCounter__ that can be incremented in a ParallelFor job through __NativeCounter.Concurrent__ and read in a later job or on the main thread.
Let's start with the basic container type:
```c#
// Mark this struct as a NativeContainer, usually this would be a generic struct for containers, but a counter does not need to be generic
// TODO - why does a counter not need to be generic? - explain the argument for this reasoning please.
[StructLayout(LayoutKind.Sequential)]
[NativeContainer]
unsafe public struct NativeCounter
{
// The actual pointer to the allocated count needs to have restrictions relaxed so jobs can be schedled with this container
[NativeDisableUnsafePtrRestriction]
int* m_Counter;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle m_Safety;
// The dispose sentinel tracks memory leaks. It is a managed type so it is cleared to null when scheduling a job
// The job cannot dispose the container, and no one else can dispose it until the job has run, so it is ok to not pass it along
// This attribute is required, without it this NativeContainer cannot be passed to a job; since that would give the job access to a managed object
[NativeSetClassTypeToNullOnSchedule]
DisposeSentinel m_DisposeSentinel;
#endif
// Keep track of where the memory for this was allocated
Allocator m_AllocatorLabel;
public NativeCounter(Allocator label)
{
// This check is redundant since we always use an int that is blittable.
// It is here as an example of how to check for type correctness for generic types.
#if ENABLE_UNITY_COLLECTIONS_CHECKS
if (!UnsafeUtility.IsBlittable<int>())
throw new ArgumentException(string.Format("{0} used in NativeQueue<{0}> must be blittable", typeof(int)));
#endif
m_AllocatorLabel = label;
// Allocate native memory for a single integer
m_Counter = (int*)UnsafeUtility.Malloc(UnsafeUtility.SizeOf<int>(), 4, label);
// Create a dispose sentinel to track memory leaks. This also creates the AtomicSafetyHandle
#if ENABLE_UNITY_COLLECTIONS_CHECKS
DisposeSentinel.Create(out m_Safety, out m_DisposeSentinel, 0);
#endif
// Initialize the count to 0 to avoid uninitialized data
Count = 0;
}
public void Increment()
{
// Verify that the caller has write permission on this data.
// This is the race condition protection, without these checks the AtomicSafetyHandle is useless
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle.CheckWriteAndThrow(m_Safety);
#endif
(*m_Counter)++;
}
public int Count
{
get
{
// Verify that the caller has read permission on this data.
// This is the race condition protection, without these checks the AtomicSafetyHandle is useless
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle.CheckReadAndThrow(m_Safety);
#endif
return *m_Counter;
}
set
{
// Verify that the caller has write permission on this data. This is the race condition protection, without these checks the AtomicSafetyHandle is useless
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle.CheckWriteAndThrow(m_Safety);
#endif
*m_Counter = value;
}
}
public bool IsCreated
{
get { return m_Counter != null; }
}
public void Dispose()
{
// Let the dispose sentinel know that the data has been freed so it does not report any memory leaks
#if ENABLE_UNITY_COLLECTIONS_CHECKS
DisposeSentinel.Dispose(m_Safety, ref m_DisposeSentinel);
#endif
UnsafeUtility.Free(m_Counter, m_AllocatorLabel);
m_Counter = null;
}
}
```
With this we have a simple NativeContainer where we can get, set, and increment the count. This container can be passed to a job, but it has the same restrictions as NativeArray, which means it cannot be passed to a ParallelFor job with write access.
The next step is to make it usable in ParallelFor. In order to avoid race conditions we want to make sure no-one else is reading it while the ParallelFor is writing to it. To achieve this we create a separate inner struct called Concurrent that can handle multiple writers, but no readers. We make sure NativeCounter.Concurrent can be assigned to from within a normal NativeCounter, since it is not possible for it to live separately outside a NativeCounter. <!-- TODO - why is that? -->
```c#
[NativeContainer]
// This attribute is what makes it possible to use NativeCounter.Concurrent in a ParallelFor job
[NativeContainerIsAtomicWriteOnly]
unsafe public struct Concurrent
{
// Copy of the pointer from the full NativeCounter
[NativeDisableUnsafePtrRestriction]
int* m_Counter;
// Copy of the AtomicSafetyHandle from the full NativeCounter. The dispose sentinel is not copied since this inner struct does not own the memory and is not responsible for freeing it.
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle m_Safety;
#endif
// This is what makes it possible to assign to NativeCounter.Concurrent from NativeCounter
public static implicit operator NativeCounter.Concurrent (NativeCounter cnt)
{
NativeCounter.Concurrent concurrent;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle.CheckWriteAndThrow(cnt.m_Safety);
concurrent.m_Safety = cnt.m_Safety;
AtomicSafetyHandle.UseSecondaryVersion(ref concurrent.m_Safety);
#endif
concurrent.m_Counter = cnt.m_Counter;
return concurrent;
}
public void Increment()
{
// Increment still needs to check for write permissions
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle.CheckWriteAndThrow(m_Safety);
#endif
// The actual increment is implemented with an atomic, since it can be incremented by multiple threads at the same time
Interlocked.Increment(ref *m_Counter);
}
}
```
With this setup we can schedule ParallelFor with write access to a NativeCounter through the inner Concurrent struct, like this:
```c#
struct CountZeros : IJobParallelFor
{
[ReadOnly]
public NativeArray<int> input;
public NativeCounter.Concurrent counter;
public void Execute(int i)
{
if (input[i] == 0)
{
counter.Increment();
}
}
}
```
```c#
var counter = new NativeCounter(Allocator.Temp);
var jobData = new CountZeros();
jobData.input = input;
jobData.counter = counter;
counter.Count = 0;
var handle = jobData.Schedule(input.Length, 8);
handle.Complete();
Debug.Log("The array countains " + counter.Count + " zeros");
counter.Dispose();
```
## Better cache usage
The NativeCounter from the previous section is a working implementation of a counter, but all jobs in the ParallelFor will access the same atomic to increment the value. This is not optimal as it means the same cache line is used by all threads.
The way this is generally solved in NativeContainers is to have a local cache per worker thread, which is stored on its own cache line.
The __[NativeSetThreadIndex]__ attribute can inject a worker thread index, the index is guaranteed to be unique while accessing the NativeContainer from the ParallelFor jobs.
In order to make such an optimization here we need to change a few things. The first thing we need to change is the data layout. For performance reasons we need one full cache line per worker thread, rather than a single int to avoid [false sharing](https://en.wikipedia.org/wiki/False_sharing).
We start by adding a constant for the number of ints on a cache line.
```c#
public const int IntsPerCacheLine = JobsUtility.CacheLineSize / sizeof(int);
```
Next we change the amount of memory allocated.
```c#
// One full cache line (integers per cacheline * size of integer) for each potential worker index, JobsUtility.MaxJobThreadCount
m_Counter = (int*)UnsafeUtility.Malloc(UnsafeUtility.SizeOf<int>()*IntsPerCacheLine*JobsUtility.MaxJobThreadCount, 4, label);
```
<!-- TODO: I'm not sure which example you are referring to when you say: main, non-concurrent, version below (is this an example you used on this page or what you would do if you were not using jobified code/ECS etc. It has potential for confusion.) -->
When accessing the counter from the main, non-concurrent, version there can only be one writer so the increment function is fine with the new memory layout.
For `get` and `set` of the `count` we need to loop over all potential worker indices.
```c#
public int Count
{
get
{
// Verify that the caller has read permission on this data.
// This is the race condition protection, without these checks the AtomicSafetyHandle is useless
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle.CheckReadAndThrow(m_Safety);
#endif
int count = 0;
for (int i = 0; i < JobsUtility.MaxJobThreadCount; ++i)
count += m_Counter[IntsPerCacheLine * i];
return count;
}
set
{
// Verify that the caller has write permission on this data.
// This is the race condition protection, without these checks the AtomicSafetyHandle is useless
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle.CheckWriteAndThrow(m_Safety);
#endif
// Clear all locally cached counts,
// set the first one to the required value
for (int i = 1; i < JobsUtility.MaxJobThreadCount; ++i)
m_Counter[IntsPerCacheLine * i] = 0;
*m_Counter = value;
}
}
```
The final change is the inner Concurrent struct that needs to get the worker index injected into it. Since each worker only runs one job at a time, there is no longer any need to use atomics when only accessing the local count.
```c#
[NativeContainer]
[NativeContainerIsAtomicWriteOnly]
// Let the job system know that it should inject the current worker index into this container
unsafe public struct Concurrent
{
[NativeDisableUnsafePtrRestriction]
int* m_Counter;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle m_Safety;
#endif
// The current worker thread index; it must use this exact name since it is injected
[NativeSetThreadIndex]
int m_ThreadIndex;
public static implicit operator NativeCacheCounter.Concurrent (NativeCacheCounter cnt)
{
NativeCacheCounter.Concurrent concurrent;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle.CheckWriteAndThrow(cnt.m_Safety);
concurrent.m_Safety = cnt.m_Safety;
AtomicSafetyHandle.UseSecondaryVersion(ref concurrent.m_Safety);
#endif
concurrent.m_Counter = cnt.m_Counter;
concurrent.m_ThreadIndex = 0;
return concurrent;
}
public void Increment()
{
#if ENABLE_UNITY_COLLECTIONS_CHECKS
AtomicSafetyHandle.CheckWriteAndThrow(m_Safety);
#endif
// No need for atomics any more since we are just incrementing the local count
++m_Counter[IntsPerCacheLine*m_ThreadIndex];
}
}
```
Writing the NativeCounter this way significantly reduces the overhead of having multiple threads writing to it. It does, however, come at a price. The cost of getting the count on the main thread has increased significantly since it now needs to check all local caches and sum them up. If you are aware of this and make sure to cache the return values it is usually worth it, but you need to know the limitations of your data structures. So we strongly recommend documenting the performance characteristics.
## Tests
The NativeCounter is not complete, the only thing left is to add tests for it to make sure it is correct and that it does not break in the future. When writing tests you should try to cover as many unusual scenarios as possible. It is also a good idea to add some kind of stress test using jobs to detect race conditions, even if it is unlikely to find all of them. The NativeCounter API is very small so the number of tests required is not huge.
* Both versions of the counter examples above are available at: _/Assets/NativeCounterDemo_.
* The tests for them can be found at: _/Assets/NativeCounterDemo/Editor/NativeCounterTests.cs_.
## Available attributes
The NativeCounter uses many attributes, but there are a few more available for other types of containers. Here is a list of the available attributes you can use on the NativeContainer struct.
* [NativeContainer](https://docs.unity3d.com/2018.1/Documentation/ScriptReference/Unity.Collections.LowLevel.Unsafe.NativeContainerAttribute.html) - marks a struct as a NativeContainer.Required for all native containers.
* [NativeContainerSupportsMinMaxWriteRestriction](https://docs.unity3d.com/2018.1/Documentation/ScriptReference/Unity.Collections.LowLevel.Unsafe.NativeContainerSupportsMinMaxWriteRestrictionAttribute.html) - signals that the NativeContainer can restrict its writable ranges to be between a min and max index. This is used when passing the container to an IJobParallelFor to make sure that the job does not write to indices it is not supposed to process. In order to use this the NativeContainer must have the members int __m_Length__, int __m_MinIndex__ and int __m_MaxIndex__ in that order with no other members between them. The container must also throw an exception for writes outside the min/max range.
* [NativeContainerIsAtomicWriteOnly](https://docs.unity3d.com/2018.1/Documentation/ScriptReference/Unity.Collections.LowLevel.Unsafe.NativeContainerIsAtomicWriteOnlyAttribute.html) - signals that the NativeContainer uses atomic writes and no reads. By adding this is is possible to pass the NativeContainer to an IJobParallelFor as writable without restrictions on which indices can be written to.
* [NativeContainerSupportsDeallocateOnJobCompletion](https://docs.unity3d.com/2018.1/Documentation/ScriptReference/Unity.Collections.LowLevel.Unsafe.NativeContainerSupportsDeallocateOnJobCompletionAttribute.html) - makes the NativeContainer usable with [DeallocateOnJobCompletion](https://docs.unity3d.com/2018.1/Documentation/ScriptReference/Unity.Collections.DeallocateOnJobCompletionAttribute.html). In order to use this the NativeContainer must have a single allocation in __m_Buffer__, an allocator label in __m_AllocatorLabel__ and a dispose sentinel in __m_DisposeSentinel__.
* [NativeSetThreadIndex](https://docs.unity3d.com/2018.1/Documentation/ScriptReference/Unity.Collections.LowLevel.Unsafe.NativeSetThreadIndexAttribute.html) - Patches an int with the thread index of the job.
In addition to these attributes on the native container struct itself there are a few attributes which can be used on members of the native container.
* [NativeDisableUnsafePtrRestriction](https://docs.unity3d.com/2018.1/Documentation/ScriptReference/Unity.Collections.LowLevel.Unsafe.NativeDisableUnsafePtrRestrictionAttribute.html) - allows the NativeContainer to be passed to a job even though it contains a pointer, which is usually not allowed.
* [NativeSetClassTypeToNullOnSchedule](https://docs.unity3d.com/2018.1/Documentation/ScriptReference/Unity.Collections.LowLevel.Unsafe.NativeSetClassTypeToNullOnScheduleAttribute.html) - allows the NativeContainer to be passed to a job even though it contains a managed object. The managed object will be set to `null` on the copy passed to the job.

20
Packages/com.unity.jobs/Documentation~/filter.yml


apiRules:
- exclude:
# inherited Object methods
uidRegex: ^System\.Object\..*$
type: Method
- exclude:
# mentioning types from System.* namespace
uidRegex: ^System\..*$
type: Type
- exclude:
hasAttribute:
uid: System.ObsoleteAttribute
type: Member
- exclude:
hasAttribute:
uid: System.ObsoleteAttribute
type: Type
- exclude:
uidRegex: Tests$
type: Namespace

4
Packages/com.unity.jobs/Documentation~/images/pixel.png

之前 之后
宽度: 1  |  高度: 1  |  大小: 122 B

5
Packages/com.unity.jobs/Documentation~/index.md


# Unity Jobs Package
The Jobs Package extends the core Unity Job system with types helpful when using the Entity Component System.
The main documentation for the C# Job System resides in the Unity Manual. See [C# Job System](https://docs.unity3d.com/Manual/JobSystem.html).

36
Packages/com.unity.jobs/Documentation~/scheduling_a_job_from_a_job.md


# Scheduling a job from a job - why not?
We have a couple of important principles that drive our design.
* Determinism by default: Determinism enables networked games, replay and debugging tools.
* Safe: Race conditions are immediately reported, this makes writing jobified code significantly more approachable and simple.
These two principles applied result in some choices and restrictions that we enforce.
## Jobs can only be completed on the main thread - but why?
If you were to call __JobHandle.Complete__ that leads to impossible to solve job scheduler deadlocks.
(We have tried this over the last couple years with the Unity C++ code base, and every single case has resulted in tears and us reverting such patterns in our code.) The deadlocks are rare but provably impossible to solve in all cases, they are heavily dependent on the timing of jobs.
## Jobs can only be scheduled on the main thread - but why?
If you were to simply schedule a job from another job, but not call JobHandle.Complete from the job, then there is no way to guarantee determinism. The main thread has to call JobHandle.Complete(), but who passes that JobHandle to the main thread? How do you know the job that schedules the other job has already executed?
In summary, first instinct is to simply schedule jobs from other jobs, and then wait for jobs within a job.
Yet experience tells us that this is always a bad idea. So the C# job system does not support it.
## OK, but how do I process workloads where I don't know the exact size upfront?
It's totally fine to schedule jobs conservatively and then simply exit early and do nothing if it turns out the number of actual elements to process, when the job executes, is much less than the conservative number of elements that was determined at schedule time.
In fact this way of doing it leads to deterministic execution, and if the early exit can skip a whole batch of operations it's not really a performance issue.
Also, there is no possibility of causing internal job scheduler deadlocks.
For this purpose using __IJobParallelForBatch__ as opposed to __IJobParallelFor__ can be very useful since you can exit early on a whole batch.
```
public interface IJobParallelForBatch
{
void Execute(int startIndex, int count);
}
```
<!-- TODO: CODE EXAMPLE for sorting? -->

8
Packages/com.unity.jobs/Editor.meta


fileFormatVersion: 2
guid: df44a713789094ce7a8dec834299ed29
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

32
Packages/com.unity.jobs/Editor/CLILeakDetectionSwitcher.cs


using System;
using UnityEditor;
using Unity.Collections;
using UnityEngine;
class CLILeakDetectionSwitcher
{
[InitializeOnLoadMethod]
static void SetLeakDetectionModeFromEnvironment()
{
var nativeLeakDetectionMode = Environment.GetEnvironmentVariable("UNITY_JOBS_NATIVE_LEAK_DETECTION_MODE");
if (!string.IsNullOrEmpty(nativeLeakDetectionMode))
{
switch (nativeLeakDetectionMode)
{
case "0":
NativeLeakDetection.Mode = NativeLeakDetectionMode.Disabled;
break;
case "1":
NativeLeakDetection.Mode = NativeLeakDetectionMode.Enabled;
break;
case "2":
NativeLeakDetection.Mode = NativeLeakDetectionMode.EnabledWithStackTrace;
break;
default:
Debug.LogWarning("The environment variable UNITY_JOBS_NATIVE_LEAK_DETECTION_MODE has an invalid value. Please use: 0 = Disabled, 1 = Enabled, 2 = EnabledWithStackTrace.");
break;
}
Debug.Log("Native leak detection mode: " + NativeLeakDetection.Mode);
}
}
}

13
Packages/com.unity.jobs/Editor/CLILeakDetectionSwitcher.cs.meta


fileFormatVersion: 2
guid: a3b018ffc4586a441a093c4d954a8cf1
timeCreated: 1507328300
licenseType: Pro
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

89
Packages/com.unity.jobs/Editor/JobsMenu.cs


using UnityEditor;
using Unity.Collections;
using Unity.Jobs.LowLevel.Unsafe;
class JobsMenu
{
private static int savedJobWorkerCount = JobsUtility.JobWorkerCount;
const string kUseJobThreads = "Jobs/Use Job Threads";
[MenuItem(kUseJobThreads, false)]
static void SwitchUseJobThreads()
{
if (JobsUtility.JobWorkerCount > 0)
{
savedJobWorkerCount = JobsUtility.JobWorkerCount;
try
{
JobsUtility.JobWorkerCount = 0;
}
catch (System.ArgumentOutOfRangeException e) when (e.ParamName == "JobWorkerCount")
{
UnityEngine.Debug.LogWarning("Disabling Job Threads requires Unity Version 2020.1.a15 or newer");
}
}
else
{
JobsUtility.JobWorkerCount = savedJobWorkerCount;
if (savedJobWorkerCount == 0)
{
JobsUtility.ResetJobWorkerCount();
}
}
}
[MenuItem(kUseJobThreads, true)]
static bool SwitchUseJobThreadsValidate()
{
Menu.SetChecked(kUseJobThreads, (JobsUtility.JobWorkerCount > 0));
return true;
}
const string kDebuggerMenu = "Jobs/JobsDebugger";
[MenuItem(kDebuggerMenu, false)]
static void SwitchJobsDebugger()
{
JobsUtility.JobDebuggerEnabled = !JobsUtility.JobDebuggerEnabled;
}
[MenuItem(kDebuggerMenu, true)]
static bool SwitchJobsDebuggerValidate()
{
Menu.SetChecked(kDebuggerMenu, JobsUtility.JobDebuggerEnabled);
return true;
}
const string kLeakOff = "Jobs/Leak Detection/Off";
const string kLeakOn = "Jobs/Leak Detection/On";
const string kLeakDetectionFull = "Jobs/Leak Detection/Full Stack Traces (Expensive)";
[MenuItem(kLeakOff)]
static void SwitchLeaksOff()
{
NativeLeakDetection.Mode = NativeLeakDetectionMode.Disabled;
}
[MenuItem(kLeakOn)]
static void SwitchLeaksOn()
{
NativeLeakDetection.Mode = NativeLeakDetectionMode.Enabled;
}
[MenuItem(kLeakDetectionFull)]
static void SwitchLeaksFull()
{
NativeLeakDetection.Mode = NativeLeakDetectionMode.EnabledWithStackTrace;
}
[MenuItem(kLeakOff, true)]
static bool SwitchLeaksOffValidate()
{
Menu.SetChecked(kLeakOff, NativeLeakDetection.Mode == NativeLeakDetectionMode.Disabled);
Menu.SetChecked(kLeakOn, NativeLeakDetection.Mode == NativeLeakDetectionMode.Enabled);
Menu.SetChecked(kLeakDetectionFull, NativeLeakDetection.Mode == NativeLeakDetectionMode.EnabledWithStackTrace);
return true;
}
}

13
Packages/com.unity.jobs/Editor/JobsMenu.cs.meta


fileFormatVersion: 2
guid: 7a96926915a7746789220056d7c409a5
timeCreated: 1507328300
licenseType: Pro
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

11
Packages/com.unity.jobs/Editor/Unity.Jobs.Editor.asmdef


{
"name": "Unity.Jobs.Editor",
"references": [
"Unity.Jobs"
],
"optionalUnityReferences": [],
"includePlatforms": [
"Editor"
],
"excludePlatforms": []
}

7
Packages/com.unity.jobs/Editor/Unity.Jobs.Editor.asmdef.meta


fileFormatVersion: 2
guid: 261882d2e8b4744e6bb349bdc6a75dc1
AssemblyDefinitionImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

30
Packages/com.unity.jobs/LICENSE.md


Unity Companion License (“License”)
Software Copyright © 2017-2020 Unity Technologies ApS
Unity Technologies ApS (“Unity”) grants to you a worldwide, non-exclusive, no-charge, and royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute the software that is made available under this License (“Software”), subject to the following terms and conditions:
1. Unity Companion Use Only. Exercise of the license granted herein is limited to exercise for the creation, use, and/or distribution of applications, software, or other content pursuant to a valid Unity content authoring and rendering engine software license (“Engine License”). That means while use of the Software is not limited to use in the software licensed under the Engine License, the Software may not be used for any purpose other than the creation, use, and/or distribution of Engine License-dependent applications, software, or other content. No other exercise of the license granted herein is permitted, and in no event may the Software be used for competitive analysis or to develop a competing product or service.
2. No Modification of Engine License. Neither this License nor any exercise of the license granted herein modifies the Engine License in any way.
3. Ownership & Grant Back to You.
3.1 You own your content. In this License, “derivative works” means derivatives of the Software itself--works derived only from the Software by you under this License (for example, modifying the code of the Software itself to improve its efficacy); “derivative works” of the Software do not include, for example, games, apps, or content that you create using the Software. You keep all right, title, and interest to your own content.
3.2 Unity owns its content. While you keep all right, title, and interest to your own content per the above, as between Unity and you, Unity will own all right, title, and interest to all intellectual property rights (including patent, trademark, and copyright) in the Software and derivative works of the Software, and you hereby assign and agree to assign all such rights in those derivative works to Unity.
3.3 You have a license to those derivative works. Subject to this License, Unity grants to you the same worldwide, non-exclusive, no-charge, and royalty-free copyright license to derivative works of the Software you create as is granted to you for the Software under this License.
4. Trademarks. You are not granted any right or license under this License to use any trademarks, service marks, trade names, products names, or branding of Unity or its affiliates (“Trademarks”). Descriptive uses of Trademarks are permitted; see, for example, Unity’s Branding Usage Guidelines at https://unity3d.com/public-relations/brand.
5. Notices & Third-Party Rights. This License, including the copyright notice associated with the Software, must be provided in all substantial portions of the Software and derivative works thereof (or, if that is impracticable, in any other location where such notices are customarily placed). Further, if the Software is accompanied by a Unity “third-party notices” or similar file, you acknowledge and agree that software identified in that file is governed by those separate license terms.
6. DISCLAIMER, LIMITATION OF LIABILITY. THE SOFTWARE AND ANY DERIVATIVE WORKS THEREOF IS PROVIDED ON AN "AS IS" BASIS, AND IS PROVIDED WITHOUT WARRANTY OF ANY KIND, WHETHER EXPRESS OR IMPLIED, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR NONINFRINGEMENT. IN NO EVENT SHALL ANY COPYRIGHT HOLDER OR AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES (WHETHER DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL, INCLUDING PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF USE, DATA, OR PROFITS, AND BUSINESS INTERRUPTION), OR OTHER LIABILITY WHATSOEVER, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM OR OUT OF, OR IN CONNECTION WITH, THE SOFTWARE OR ANY DERIVATIVE WORKS THEREOF OR THE USE OF OR OTHER DEALINGS IN SAME, EVEN WHERE ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
7. USE IS ACCEPTANCE and License Versions. Your receipt and use of the Software constitutes your acceptance of this License and its terms and conditions. Software released by Unity under this License may be modified or updated and the License with it; upon any such modification or update, you will comply with the terms of the updated License for any use of any of the Software under the updated License.
8. Use in Compliance with Law and Termination. Your exercise of the license granted herein will at all times be in compliance with applicable law and will not infringe any proprietary rights (including intellectual property rights); this License will terminate immediately on any breach by you of this License.
9. Severability. If any provision of this License is held to be unenforceable or invalid, that provision will be enforced to the maximum extent possible and the other provisions will remain in full force and effect.
10. Governing Law and Venue. This License is governed by and construed in accordance with the laws of Denmark, except for its conflict of laws rules; the United Nations Convention on Contracts for the International Sale of Goods will not apply. If you reside (or your principal place of business is) within the United States, you and Unity agree to submit to the personal and exclusive jurisdiction of and venue in the state and federal courts located in San Francisco County, California concerning any dispute arising out of this License (“Dispute”). If you reside (or your principal place of business is) outside the United States, you and Unity agree to submit to the personal and exclusive jurisdiction of and venue in the courts located in Copenhagen, Denmark concerning any Dispute.

7
Packages/com.unity.jobs/LICENSE.md.meta


fileFormatVersion: 2
guid: f9474ce91ede643258663ff3c5392424
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

8
Packages/com.unity.jobs/Unity.Jobs.Tests.meta


fileFormatVersion: 2
guid: 05ee6c1e82201411f994177ecda6086f
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

8
Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs.meta


fileFormatVersion: 2
guid: e1d350448ea42c04fb892565ddbd3231
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

88
Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobStressTests.cs


using System;
using NUnit.Framework;
using Unity.Collections;
using Unity.Jobs;
namespace Unity.Jobs.Tests.ManagedJobs
{
public class JobStressTests : JobTestsFixture
{
struct JobSetIndexValue : IJobParallelFor
{
public NativeArray<int> value;
public void Execute(int index)
{
value[index] = index;
}
}
[Test]
public void StressTestParallelFor()
{
StressTestParallelForIterations(1, 5000);
}
public void StressTestParallelForIterations(int amount, int amountOfData)
{
for (var k = 0; k != amount; k++)
{
var len = UnityEngine.Random.Range(1, amountOfData);
JobSetIndexValue job1;
job1.value = new NativeArray<int>(len, Allocator.TempJob);
JobSetIndexValue job2;
job2.value = new NativeArray<int>(len, Allocator.TempJob);
var job1Handle = job1.Schedule(len, UnityEngine.Random.Range(1, 1024));
var job2Handle = job2.Schedule(len, UnityEngine.Random.Range(1, 1024));
job2Handle.Complete();
job1Handle.Complete();
for (var i = 0; i < len; i++)
{
Assert.AreEqual(i, job1.value[i]);
Assert.AreEqual(i, job2.value[i]);
}
job1.value.Dispose();
job2.value.Dispose();
}
}
struct JobSetValue : IJob
{
public int expected;
public NativeArray<int> value;
public void Execute()
{
value[0] = value[0] + 1;
}
}
[Test]
public void DeepDependencyChain()
{
var array = new NativeArray<int>(1, Allocator.Persistent);
var jobHandle = new JobHandle();
const int depth = 10000;
for (var i = 0; i < depth; i++)
{
var job = new JobSetValue
{
value = array,
expected = i
};
jobHandle = job.Schedule(jobHandle);
}
jobHandle.Complete();
Assert.AreEqual(depth, array[0]);
array.Dispose();
}
}
}

11
Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobStressTests.cs.meta


fileFormatVersion: 2
guid: 36d0e1be06be93249a0ffa0c8356e69e
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

519
Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTests.cs


using System;
using NUnit.Framework;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using Unity.Jobs;
using Unity.Jobs.LowLevel.Unsafe;
namespace Unity.Jobs.Tests.ManagedJobs
{
#if UNITY_DOTSRUNTIME
public class DotsRuntimeFixmeAttribute : IgnoreAttribute
{
public DotsRuntimeFixmeAttribute(string msg = null) : base(msg == null ? "Test should work in DOTS Runtime but currently doesn't. Ignoring until fixed..." : msg)
{
}
}
#else
public class DotsRuntimeFixmeAttribute : Attribute
{
public DotsRuntimeFixmeAttribute(string msg = null)
{
}
}
#endif
[JobProducerType(typeof(IJobTestExtensions.JobTestProducer<>))]
public interface IJobTest
{
void Execute();
}
public static class IJobTestExtensions
{
internal struct JobTestWrapper<T> where T : struct
{
internal T JobData;
[NativeDisableContainerSafetyRestriction]
[DeallocateOnJobCompletion]
internal NativeArray<byte> ProducerResourceToClean;
}
internal struct JobTestProducer<T> where T : struct, IJobTest
{
static IntPtr s_JobReflectionData;
public static IntPtr Initialize()
{
if (s_JobReflectionData == IntPtr.Zero)
{
#if UNITY_2020_2_OR_NEWER
s_JobReflectionData = JobsUtility.CreateJobReflectionData(typeof(JobTestWrapper<T>), typeof(T), (ExecuteJobFunction)Execute);
#else
s_JobReflectionData = JobsUtility.CreateJobReflectionData(typeof(JobTestWrapper<T>), typeof(T),
JobType.Single, (ExecuteJobFunction)Execute);
#endif
}
return s_JobReflectionData;
}
public delegate void ExecuteJobFunction(ref JobTestWrapper<T> jobWrapper, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex);
public unsafe static void Execute(ref JobTestWrapper<T> jobWrapper, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex)
{
jobWrapper.JobData.Execute();
}
}
public static unsafe JobHandle ScheduleTest<T>(this T jobData, NativeArray<byte> dataForProducer, JobHandle dependsOn = new JobHandle()) where T : struct, IJobTest
{
JobTestWrapper<T> jobTestWrapper = new JobTestWrapper<T>
{
JobData = jobData,
ProducerResourceToClean = dataForProducer
};
var scheduleParams = new JobsUtility.JobScheduleParameters(
UnsafeUtility.AddressOf(ref jobTestWrapper),
JobTestProducer<T>.Initialize(),
dependsOn,
#if UNITY_2020_2_OR_NEWER
ScheduleMode.Parallel
#else
ScheduleMode.Batched
#endif
);
return JobsUtility.Schedule(ref scheduleParams);
}
}
public struct MyGenericResizeJob<T> : IJob where T : unmanaged
{
public int m_ListLength;
public NativeList<T> m_GenericList;
public void Execute()
{
m_GenericList.Resize(m_ListLength, NativeArrayOptions.UninitializedMemory);
}
}
public struct MyGenericJobDefer<T> : IJobParallelForDefer where T: unmanaged
{
public T m_Value;
[NativeDisableParallelForRestriction]
public NativeList<T> m_GenericList;
public void Execute(int index)
{
m_GenericList[index] = m_Value;
}
}
public struct GenericContainerResizeJob<T, U> : IJob
where T : struct, INativeList<U>
where U : struct
{
public int m_ListLength;
public T m_GenericList;
public void Execute()
{
m_GenericList.Length = m_ListLength;
}
}
public struct GenericContainerJobDefer<T, U> : IJobParallelForDefer
where T : struct, INativeList<U>
where U : struct
{
public U m_Value;
[NativeDisableParallelForRestriction]
public T m_GenericList;
public void Execute(int index)
{
m_GenericList[index] = m_Value;
}
}
public class JobTests : JobTestsFixture
{
public void ScheduleGenericContainerJob<T, U>(T container, U value)
where T : struct, INativeList<U>
where U : unmanaged
{
var j0 = new GenericContainerResizeJob<T, U>();
var length = 5;
j0.m_ListLength = length;
j0.m_GenericList = container;
var handle0 = j0.Schedule();
var j1 = new GenericContainerJobDefer<T, U>();
j1.m_Value = value;
j1.m_GenericList = j0.m_GenericList;
INativeList<U> iList = j0.m_GenericList;
j1.Schedule((NativeList<U>)iList, 1, handle0).Complete();
Assert.AreEqual(length, j1.m_GenericList.Length);
for (int i = 0; i != j1.m_GenericList.Length; i++)
Assert.AreEqual(value, j1.m_GenericList[i]);
}
[Test]
public void ValidateContainerSafetyInGenericJob_ContainerIsGenericParameter()
{
var list = new NativeList<int>(1, Allocator.TempJob);
ScheduleGenericContainerJob(list, 5);
list.Dispose();
}
public void GenericScheduleJobPair<T>(T value) where T : unmanaged
{
var j0 = new MyGenericResizeJob<T>();
var length = 5;
j0.m_ListLength = length;
j0.m_GenericList = new NativeList<T>(1, Allocator.TempJob);
var handle0 = j0.Schedule();
var j1 = new MyGenericJobDefer<T>();
j1.m_Value = value;
j1.m_GenericList = j0.m_GenericList;
j1.Schedule(j0.m_GenericList, 1, handle0).Complete();
Assert.AreEqual(length, j1.m_GenericList.Length);
for (int i = 0; i != j1.m_GenericList.Length; i++)
Assert.AreEqual(value, j1.m_GenericList[i]);
j0.m_GenericList.Dispose();
}
[Test]
public void ScheduleGenericJobPairFloat()
{
GenericScheduleJobPair(10f);
}
[Test]
public void ScheduleGenericJobPairDouble()
{
GenericScheduleJobPair<double>(10.0);
}
[Test]
public void ScheduleGenericJobPairInt()
{
GenericScheduleJobPair(20);
}
#if ENABLE_UNITY_COLLECTIONS_CHECKS
[Test]
public void SchedulingGenericJobUnsafelyThrows()
{
var j0 = new MyGenericResizeJob<int>();
var length = 5;
j0.m_ListLength = length;
j0.m_GenericList = new NativeList<int>(1, Allocator.TempJob);
var handle0 = j0.Schedule();
var j1 = new MyGenericJobDefer<int>();
j1.m_Value = 6;
j1.m_GenericList = j0.m_GenericList;
Assert.Throws<InvalidOperationException>(()=>j1.Schedule(j0.m_GenericList, 1).Complete());
handle0.Complete();
j0.m_GenericList.Dispose();
}
#endif
[Test, DotsRuntimeFixme("From a pure generic context, DOTS Runtime cannot determine what closed generic jobs are scheduled. See DOTSR-2347")]
public void SchedulingGenericJobFromGenericContextUnsafelyThrows()
{
var list = new NativeList<int>(1, Allocator.TempJob);
ScheduleGenericJobUnsafely(list, 5);
list.Dispose();
}
void ScheduleGenericJobUnsafely<T, U>(T container, U value)
where T : struct, INativeList<U>
where U : unmanaged
{
var j0 = new GenericContainerResizeJob<T, U>();
var length = 5;
j0.m_ListLength = length;
j0.m_GenericList = container;
var handle0 = j0.Schedule();
var j1 = new GenericContainerJobDefer<T, U>();
j1.m_Value = value;
j1.m_GenericList = j0.m_GenericList;
INativeList<U> iList = j0.m_GenericList;
Assert.Throws<InvalidOperationException>(()=>j1.Schedule((NativeList<U>)iList, 1).Complete());
// Note we now pass the correct dependency to complete the job otherwise we won't be able to dispose the list
// which will cause other tests to fail when they detect leaks. We can't just throw and then dispose since the
// safety system will see that the list was scheduled and should first have the job completed (however we
// are intentionally setting up a job that cannot complete)
j1.Schedule((NativeList<U>)iList, 1, handle0).Complete();
}
/*
* these two tests used to test that a job that inherited from both IJob and IJobParallelFor would work as expected
* but that's probably crazy.
*/
/*[Test]
public void Scheduling()
{
var job = data.Schedule();
job.Complete();
ExpectOutputSumOfInput0And1();
}*/
/*[Test]
public void Scheduling_With_Dependencies()
{
data.input0 = input0;
data.input1 = input1;
data.output = output2;
var job1 = data.Schedule();
// Schedule job2 with dependency against the first job
data.input0 = output2;
data.input1 = input2;
data.output = output;
var job2 = data.Schedule(job1);
// Wait for completion
job2.Complete();
ExpectOutputSumOfInput0And1And2();
}*/
[Test]
public void ForEach_Scheduling_With_Dependencies()
{
data.input0 = input0;
data.input1 = input1;
data.output = output2;
var job1 = data.Schedule(output.Length, 1);
// Schedule job2 with dependency against the first job
data.input0 = output2;
data.input1 = input2;
data.output = output;
var job2 = data.Schedule(output.Length, 1, job1);
// Wait for completion
job2.Complete();
ExpectOutputSumOfInput0And1And2();
}
struct EmptyComputeParallelForJob : IJobParallelFor
{
public void Execute(int i)
{
}
}
[Test]
public void ForEach_Scheduling_With_Zero_Size()
{
var test = new EmptyComputeParallelForJob();
var job = test.Schedule(0, 1);
job.Complete();
}
[Test]
public void Deallocate_Temp_NativeArray_From_Job()
{
TestDeallocateNativeArrayFromJob(Allocator.TempJob);
}
[Test]
public void Deallocate_Persistent_NativeArray_From_Job()
{
TestDeallocateNativeArrayFromJob(Allocator.Persistent);
}
private void TestDeallocateNativeArrayFromJob(Allocator label)
{
var tempNativeArray = new NativeArray<int>(expectedInput0, label);
var copyAndDestroyJob = new CopyAndDestroyNativeArrayParallelForJob
{
input = tempNativeArray,
output = output
};
// NativeArray can safely be accessed before scheduling
Assert.AreEqual(10, tempNativeArray.Length);
tempNativeArray[0] = tempNativeArray[0];
var job = copyAndDestroyJob.Schedule(copyAndDestroyJob.input.Length, 1);
job.Complete();
Assert.AreEqual(expectedInput0, copyAndDestroyJob.output.ToArray());
}
#if ENABLE_UNITY_COLLECTIONS_CHECKS
public struct NestedDeallocateStruct
{
// This should deallocate even though it's a nested field
[DeallocateOnJobCompletion]
public NativeArray<int> input;
}
public struct TestNestedDeallocate : IJob
{
public NestedDeallocateStruct nested;
public NativeArray<int> output;
public void Execute()
{
for (int i = 0; i < nested.input.Length; ++i)
output[i] = nested.input[i];
}
}
[Test]
public void TestNestedDeallocateOnJobCompletion()
{
var tempNativeArray = new NativeArray<int>(10, Allocator.TempJob);
var outNativeArray = new NativeArray<int>(10, Allocator.TempJob);
for (int i = 0; i < 10; i++)
tempNativeArray[i] = i;
var job = new TestNestedDeallocate
{
nested = new NestedDeallocateStruct() { input = tempNativeArray },
output = outNativeArray
};
var handle = job.Schedule();
handle.Complete();
outNativeArray.Dispose();
// Ensure released safety handle indicating invalid buffer
Assert.Throws<InvalidOperationException>(() => { AtomicSafetyHandle.CheckExistsAndThrow(NativeArrayUnsafeUtility.GetAtomicSafetyHandle(tempNativeArray)); });
Assert.Throws<InvalidOperationException>(() => { AtomicSafetyHandle.CheckExistsAndThrow(NativeArrayUnsafeUtility.GetAtomicSafetyHandle(job.nested.input)); });
}
public struct TestJobProducerJob : IJobTest
{
[DeallocateOnJobCompletion]
public NativeArray<int> jobStructData;
public void Execute()
{
}
}
[Test]
public void TestJobProducerCleansUp()
{
var tempNativeArray = new NativeArray<int>(10, Allocator.TempJob);
var tempNativeArray2 = new NativeArray<byte>(16, Allocator.TempJob);
var job = new TestJobProducerJob
{
jobStructData = tempNativeArray,
};
var handle = job.ScheduleTest(tempNativeArray2);
handle.Complete();
// Check job data
Assert.Throws<InvalidOperationException>(() => { AtomicSafetyHandle.CheckExistsAndThrow(NativeArrayUnsafeUtility.GetAtomicSafetyHandle(tempNativeArray)); });
Assert.Throws<InvalidOperationException>(() => { AtomicSafetyHandle.CheckExistsAndThrow(NativeArrayUnsafeUtility.GetAtomicSafetyHandle(job.jobStructData)); });
// Check job producer
Assert.Throws<InvalidOperationException>(() => { AtomicSafetyHandle.CheckExistsAndThrow(NativeArrayUnsafeUtility.GetAtomicSafetyHandle(tempNativeArray2)); });
}
public struct CopyJob : IJob
{
public NativeList<int> List1;
public NativeList<int> List2;
public void Execute()
{
List1 = List2;
}
}
[Test]
public unsafe void TestContainerCopy_EnsureSafetyHandlesCopyAndDisposeProperly()
{
var list1 = new NativeList<int>(10, Allocator.TempJob);
var list2 = new NativeList<int>(10, Allocator.TempJob);
list1.Add(1);
list2.Add(2);
var job = new CopyJob
{
List1 = list1,
List2 = list2
};
job.Schedule().Complete();
list1.Dispose();
list2.Dispose();
}
#endif
struct LargeJobParallelForDefer : IJobParallelForDefer
{
public FixedString4096 StrA;
public FixedString4096 StrB;
public FixedString4096 StrC;
public FixedString4096 StrD;
[NativeDisableParallelForRestriction]
public NativeArray<int> TotalLengths;
[ReadOnly]
public NativeList<float> Unused; // Schedule() from NativeList.Length requires that the list be passed into the job
public void Execute(int index)
{
TotalLengths[0] = StrA.Length + StrB.Length + StrC.Length + StrD.Length;
}
}
public enum IterationCountMode
{
List, Pointer
}
[Test]
public unsafe void IJobParallelForDefer_LargeJobStruct_ScheduleRefWorks(
[Values(IterationCountMode.List, IterationCountMode.Pointer)] IterationCountMode countMode)
{
using(var lengths = new NativeArray<int>(1, Allocator.TempJob))
{
var dummyList = new NativeList<float>(Allocator.TempJob);
dummyList.Add(5.0f);
var job = new LargeJobParallelForDefer
{
StrA = "A",
StrB = "BB",
StrC = "CCC",
StrD = "DDDD",
TotalLengths = lengths,
Unused = dummyList,
};
if (countMode == IterationCountMode.List)
{
Assert.DoesNotThrow(() => job.ScheduleByRef(dummyList, 1).Complete());
}
else if (countMode == IterationCountMode.Pointer)
{
var lengthArray = new NativeArray<int>(1, Allocator.TempJob);
lengthArray[0] = 1;
Assert.DoesNotThrow(() => job.ScheduleByRef((int*)lengthArray.GetUnsafePtr(), 1).Complete());
lengthArray.Dispose();
}
dummyList.Dispose();
}
}
}
}

11
Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTests.cs.meta


fileFormatVersion: 2
guid: 8693443c8d9368d4b8b9f2beea09d699
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

156
Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTestsFixture.cs


using NUnit.Framework;
using Unity.Collections;
using Unity.Jobs;
namespace Unity.Jobs.Tests.ManagedJobs
{
public class JobTestsFixtureBasic
{
[SetUp]
public void Setup()
{
#if UNITY_DOTSRUNTIME
Unity.Runtime.TempMemoryScope.EnterScope();
#endif
}
[TearDown]
public void Teardown()
{
#if UNITY_DOTSRUNTIME
Unity.Runtime.TempMemoryScope.ExitScope();
#endif
}
}
public class JobTestsFixture : JobTestsFixtureBasic
{
/*
* this used to test both, and maybe it should again, but we have to make GetExecuteMethod() work with
* multiple interfaces, hopefully in a non-global way
*/
public struct SumDataParallelForJob : /*IJob,*/ IJobParallelFor
{
[ReadOnly] public NativeArray<int> input0;
[ReadOnly] public NativeArray<int> input1;
public NativeArray<int> output;
/* public void Execute()
{
for (var i = 0; i < output.Length; ++i)
output[i] = input0[i] + input1[i];
}*/
public void Execute(int i)
{
output[i] = input0[i] + input1[i];
}
}
public struct CopyAndDestroyNativeArrayParallelForJob : IJobParallelFor
{
[ReadOnlyAttribute][DeallocateOnJobCompletionAttribute]
public NativeArray<int> input;
public NativeArray<int> output;
public void Execute(int i)
{
output[i] = input[i];
}
}
public SumDataParallelForJob data;
public int[] expectedInput0;
public NativeArray<int> input0;
public NativeArray<int> input1;
public NativeArray<int> input2;
public NativeArray<int> output;
public NativeArray<int> output2;
[SetUp]
public void Init()
{
expectedInput0 = new int[10];
input0 = new NativeArray<int>(10, Allocator.Persistent);
input1 = new NativeArray<int>(10, Allocator.Persistent);
input2 = new NativeArray<int>(10, Allocator.Persistent);
output = new NativeArray<int>(10, Allocator.Persistent);
output2 = new NativeArray<int>(10, Allocator.Persistent);
for (var i = 0; i < output.Length; i++)
{
expectedInput0[i] = i;
input0[i] = i;
input1[i] = 10 * i;
input2[i] = 100 * i;
output[i] = 0;
output2[i] = 0;
}
data.input0 = input0;
data.input1 = input1;
data.output = output;
}
public void ExpectOutputSumOfInput0And1()
{
for (var i = 0; i != output.Length; i++)
Assert.AreEqual(input0[i] + input1[i], output[i]);
}
public void ExpectOutputSumOfInput0And1And2()
{
for (var i = 0; i != output.Length; i++)
Assert.AreEqual(input0[i] + input1[i] + input2[i], output[i]);
}
[TearDown]
public void Cleanup()
{
try
{
input0.Dispose();
}
catch
{
}
try
{
input1.Dispose();
}
catch
{
}
try
{
input2.Dispose();
}
catch
{
}
try
{
output.Dispose();
}
catch
{
}
try
{
output2.Dispose();
}
catch
{
}
}
}
}

11
Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTestsFixture.cs.meta


fileFormatVersion: 2
guid: 69135939d4cd2cf4fbecfe92b80f882a
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

111
Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTests_CombineDependencies.cs


using NUnit.Framework;
using Unity.Collections;
using Unity.Jobs;
namespace Unity.Jobs.Tests.ManagedJobs
{
public class JobTests_CombineDependencies : JobTestsFixtureBasic
{
struct ArrayJob1 : IJob
{
public NativeArray<int> data;
public void Execute()
{
data[0] = data[0] + 1;
}
}
struct ArrayJob2 : IJob
{
public NativeArray<int> a;
public NativeArray<int> b;
public void Execute()
{
a[0] = a[0] + 100;
b[0] = b[0] + 100;
}
}
[Test]
public void CombineDependenciesWorks()
{
var arrayA = new NativeArray<int>(2, Allocator.Persistent);
var arrayB = new NativeArray<int>(2, Allocator.Persistent);
var jobA = new ArrayJob1 {data = arrayA};
var jobAHandle = jobA.Schedule();
var jobB = new ArrayJob1 {data = arrayB};
var jobBHandle = jobB.Schedule();
var combinedHandle = JobHandle.CombineDependencies(jobAHandle, jobBHandle);
var job2 = new ArrayJob2
{
a = arrayA,
b = arrayB
};
job2.Schedule(combinedHandle).Complete();
for (int i = 0; i < arrayA.Length; ++i)
{
Assert.AreEqual(arrayA[0], arrayB[0]);
}
arrayA.Dispose();
arrayB.Dispose();
}
public void DeepCombineDependencies(int depth, int arraySize)
{
var arrays = new NativeArray<int>[arraySize];
for (var i = 0; i < arrays.Length; i++)
{
arrays[i] = new NativeArray<int>(1, Allocator.Persistent);
arrays[i][0] = 0;
}
var handles = new NativeArray<JobHandle>(arrays.Length, Allocator.Persistent);
var previousJobHandle = new JobHandle();
for (var i = 0; i < depth; i++)
{
for (var a = 0; a != arrays.Length; a++)
{
var job = new ArrayJob1 {data = arrays[a]};
handles[a] = job.Schedule(previousJobHandle);
}
var combinedHandle = JobHandle.CombineDependencies(handles);
var job2 = new ArrayJob2
{
a = arrays[0],
b = arrays[1]
};
previousJobHandle = job2.Schedule(combinedHandle);
}
previousJobHandle.Complete();
Assert.AreEqual(100 * depth + depth, arrays[0][0]);
Assert.AreEqual(100 * depth + depth, arrays[1][0]);
for (var i = 2; i < arrays.Length; i++)
Assert.AreEqual(depth, arrays[i][0]);
for (var a = 0; a != arrays.Length; a++)
arrays[a].Dispose();
handles.Dispose();
}
[Test]
public void DeepCombineDependenciesWorks()
{
DeepCombineDependencies(5, 21);
}
}
}

11
Packages/com.unity.jobs/Unity.Jobs.Tests/ManagedJobs/JobTests_CombineDependencies.cs.meta


fileFormatVersion: 2
guid: ecbee97f2678c1747802e50945a31f1d
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

226
Packages/com.unity.jobs/Unity.Jobs.Tests/NativeListDeferredArrayTests.cs


using System;
using NUnit.Framework;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using Unity.Jobs;
using Unity.Jobs.LowLevel.Unsafe;
public class NativeListDeferredArrayTests
{
private bool JobsDebuggerWasEnabled;
struct AliasJob : IJob
{
public NativeArray<int> array;
public NativeList<int> list;
public void Execute()
{
}
}
struct SetListLengthJob : IJob
{
public int ResizeLength;
public NativeList<int> list;
public void Execute()
{
list.Resize(ResizeLength, NativeArrayOptions.UninitializedMemory);
}
}
struct SetArrayValuesJobParallel : IJobParallelForDefer
{
public NativeArray<int> array;
public void Execute(int index)
{
array[index] = array.Length;
}
}
struct GetArrayValuesJobParallel : IJobParallelForDefer
{
[ReadOnly]
public NativeArray<int> array;
public void Execute(int index)
{
}
}
struct ParallelForWithoutList : IJobParallelForDefer
{
public void Execute(int index)
{
}
}
[SetUp]
public void Setup()
{
// Many ECS tests will only pass if the Jobs Debugger enabled;
// force it enabled for all tests, and restore the original value at teardown.
JobsDebuggerWasEnabled = JobsUtility.JobDebuggerEnabled;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.JobDebuggerEnabled = true;
#endif
#if UNITY_DOTSRUNTIME
Unity.Runtime.TempMemoryScope.EnterScope();
#endif
}
[Test]
public void ResizedListToDeferredJobArray([Values(0, 1, 2, 3, 4, 5, 6, 42, 97, 1023)] int length)
{
var list = new NativeList<int>(Allocator.TempJob);
var setLengthJob = new SetListLengthJob { list = list, ResizeLength = length };
var jobHandle = setLengthJob.Schedule();
var setValuesJob = new SetArrayValuesJobParallel { array = list.AsDeferredJobArray() };
setValuesJob.Schedule(list, 3, jobHandle).Complete();
Assert.AreEqual(length, list.Length);
for (int i = 0; i != list.Length; i++)
Assert.AreEqual(length, list[i]);
list.Dispose();
}
[Test]
public unsafe void DeferredParallelForFromIntPtr()
{
int length = 10;
var lengthValue = new NativeArray<int>(1, Allocator.TempJob);
lengthValue[0] = length;
var array = new NativeArray<int>(length, Allocator.TempJob);
var setValuesJob = new SetArrayValuesJobParallel { array = array };
setValuesJob.Schedule((int*)lengthValue.GetUnsafePtr(), 3).Complete();
for (int i = 0; i != array.Length; i++)
Assert.AreEqual(length, array[i]);
lengthValue.Dispose();
array.Dispose();
}
[Test]
public void ResizeListBeforeSchedule([Values(5)] int length)
{
var list = new NativeList<int>(Allocator.TempJob);
var setLengthJob = new SetListLengthJob { list = list, ResizeLength = length }.Schedule();
var setValuesJob = new SetArrayValuesJobParallel { array = list.AsDeferredJobArray() };
setLengthJob.Complete();
setValuesJob.Schedule(list, 3).Complete();
Assert.AreEqual(length, list.Length);
for (int i = 0; i != list.Length; i++)
Assert.AreEqual(length, list[i]);
list.Dispose();
}
#if ENABLE_UNITY_COLLECTIONS_CHECKS
[Test]
public void ResizedListToDeferredJobArray()
{
var list = new NativeList<int>(Allocator.TempJob);
list.Add(1);
var array = list.AsDeferredJobArray();
#pragma warning disable 0219 // assigned but its value is never used
Assert.Throws<IndexOutOfRangeException>(() => { var value = array[0]; });
#pragma warning restore 0219
Assert.AreEqual(0, array.Length);
list.Dispose();
}
[Test]
public void ResizeListWhileJobIsRunning()
{
var list = new NativeList<int>(Allocator.TempJob);
list.Resize(42, NativeArrayOptions.UninitializedMemory);
var setValuesJob = new GetArrayValuesJobParallel { array = list.AsDeferredJobArray() };
var jobHandle = setValuesJob.Schedule(list, 3);
Assert.Throws<InvalidOperationException>(() => list.Resize(1, NativeArrayOptions.UninitializedMemory));
jobHandle.Complete();
list.Dispose();
}
[Test]
public void AliasArrayThrows()
{
var list = new NativeList<int>(Allocator.TempJob);
var aliasJob = new AliasJob { list = list, array = list.AsDeferredJobArray() };
Assert.Throws<InvalidOperationException>(() => aliasJob.Schedule());
list.Dispose();
}
[Test]
public void DeferredListMustExistInJobData()
{
var list = new NativeList<int>(Allocator.TempJob);
var job = new ParallelForWithoutList();
Assert.Throws<InvalidOperationException>(() => job.Schedule(list, 64));
list.Dispose();
}
[Test]
public void DeferredListCantBeDeletedWhileJobIsRunning()
{
var list = new NativeList<int>(Allocator.TempJob);
list.Resize(42, NativeArrayOptions.UninitializedMemory);
var setValuesJob = new GetArrayValuesJobParallel { array = list.AsDeferredJobArray() };
var jobHandle = setValuesJob.Schedule(list, 3);
Assert.Throws<InvalidOperationException>(() => list.Dispose());
jobHandle.Complete();
// Actually clean up memory to avoid DisposeSentinel complaint
list.Dispose();
}
[Test]
public void DeferredArrayCantBeAccessedOnMainthread()
{
var list = new NativeList<int>(Allocator.TempJob);
list.Add(1);
var defer = list.AsDeferredJobArray();
Assert.AreEqual(0, defer.Length);
Assert.Throws<IndexOutOfRangeException>(() => defer[0] = 5);
list.Dispose();
}
#endif
[TearDown]
public void TearDown()
{
#if UNITY_DOTSRUNTIME
Unity.Runtime.TempMemoryScope.ExitScope();
#endif
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.JobDebuggerEnabled = JobsDebuggerWasEnabled;
#endif
}
}

11
Packages/com.unity.jobs/Unity.Jobs.Tests/NativeListDeferredArrayTests.cs.meta


fileFormatVersion: 2
guid: 44de4272423aa4557a162364f252e82a
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

164
Packages/com.unity.jobs/Unity.Jobs.Tests/ParallelFilterJobTests.cs


using NUnit.Framework;
using System;
using Unity.Jobs;
using Unity.Collections;
using Unity.Jobs.Tests.ManagedJobs;
#pragma warning disable 0219
public class ParallelFilterJobTests : JobTestsFixtureBasic
{
struct NativeListAddMod7Job : IJob
{
NativeList<int> list;
int produceCount;
public NativeListAddMod7Job(NativeList<int> list, int produceCount)
{
this.list = list;
this.produceCount = produceCount;
}
public void Execute()
{
for (int index = 0; index != produceCount; index++)
{
if (index % 7 == 0)
list.Add(index);
}
}
}
struct FilterMod7Job : IJobParallelForFilter
{
public bool Execute(int index)
{
return index % 7 == 0;
}
}
struct FilterAllJob : IJobParallelForFilter
{
public bool Execute(int index)
{
return true;
}
}
[Test]
public void AddElementForEach([Values] bool userFilterJob)
{
var list = new NativeList<int>(0, Allocator.TempJob);
list.Add(-1);
list.Add(-2);
if (userFilterJob)
{
var job = new FilterMod7Job();
job.ScheduleAppend(list, 1000, 41).Complete();
}
else
{
var job = new NativeListAddMod7Job(list, 1000);
job.Schedule().Complete();
}
int counter = 2;
for (int i = 0; i != 1000; i++)
{
if (i % 7 == 0)
{
Assert.AreEqual(i, list[counter]);
counter++;
}
}
Assert.AreEqual(-1, list[0]);
Assert.AreEqual(-2, list[1]);
Assert.AreEqual(counter, list.Length);
list.Dispose();
}
[Test]
public void FilterProduceChained()
{
var list = new NativeList<int>(3, Allocator.TempJob);
var jobHandle = new FilterMod7Job().ScheduleAppend(list, 14, 4);
jobHandle = new FilterAllJob().ScheduleAppend(list, 2, 19, jobHandle);
jobHandle.Complete();
Assert.AreEqual(0, list[0]);
Assert.AreEqual(7, list[1]);
Assert.AreEqual(0, list[2]);
Assert.AreEqual(1, list[3]);
Assert.AreEqual(4, list.Length);
list.Dispose();
}
[Test]
public void FilterAppendChained()
{
var list = new NativeList<int>(3, Allocator.TempJob);
var jobHandle = new FilterMod7Job().ScheduleAppend(list, 14, 4);
jobHandle = new FilterAllJob().ScheduleAppend(list, 2, 19, jobHandle);
jobHandle.Complete();
Assert.AreEqual(0, list[0]);
Assert.AreEqual(7, list[1]);
Assert.AreEqual(0, list[2]);
Assert.AreEqual(1, list[3]);
Assert.AreEqual(4, list.Length);
list.Dispose();
}
[Test]
public void FilterPreviousChained()
{
var list = new NativeList<int>(3, Allocator.TempJob);
var jobHandle = new FilterAllJob().ScheduleAppend(list, 14, 3);
jobHandle = new FilterMod7Job().ScheduleFilter(list, 3, jobHandle);
jobHandle.Complete();
Assert.AreEqual(2, list.Length);
Assert.AreEqual(0, list[0]);
Assert.AreEqual(7, list[1]);
list.Dispose();
}
struct MinMaxRestrictionJob : IJobParallelForFilter
{
public NativeArray<float> array;
public MinMaxRestrictionJob(NativeArray<float> array) { this.array = array; }
public bool Execute(int index)
{
array[index] = 5;
var localArray = array;
Assert.Throws<IndexOutOfRangeException>(() => { localArray[50] = 5; });
return true;
}
}
[Test]
[Ignore("DOTS-1959 Currently thats legal, but only because filter jobs aren't implemented as parallel for right now...")]
public void AccessingWritable()
{
var list = new NativeList<int>(0, Allocator.Persistent);
var array = new NativeArray<float>(51, Allocator.Persistent);
var jobHandle = new MinMaxRestrictionJob(array).ScheduleAppend(list, 50, 3);
new MinMaxRestrictionJob(array).ScheduleFilter(list, 3, jobHandle).Complete();
Assert.AreEqual(50, list.Length);
list.Dispose();
array.Dispose();
}
}

12
Packages/com.unity.jobs/Unity.Jobs.Tests/ParallelFilterJobTests.cs.meta


fileFormatVersion: 2
guid: 198c389f4bc3b41b6b298a0c56e6b870
timeCreated: 1492884447
licenseType: Pro
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

21
Packages/com.unity.jobs/Unity.Jobs.Tests/Unity.Jobs.Tests.asmdef


{
"name": "Unity.Jobs.Tests",
"references": [
"Unity.Jobs",
"Unity.Collections",
"Unity.Mathematics"
],
"optionalUnityReferences": [
"TestAssemblies"
],
"includePlatforms": [
"Editor"
],
"excludePlatforms": [],
"allowUnsafeCode": true,
"overrideReferences": false,
"precompiledReferences": [],
"autoReferenced": true,
"defineConstraints": [],
"versionDefines": []
}

7
Packages/com.unity.jobs/Unity.Jobs.Tests/Unity.Jobs.Tests.asmdef.meta


fileFormatVersion: 2
guid: 2a290626669fc45c1b8bc86933976e70
AssemblyDefinitionImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

10
Packages/com.unity.jobs/Unity.Jobs.meta


fileFormatVersion: 2
guid: 85a3b579157e544f2b5a0d82611d45ae
folderAsset: yes
timeCreated: 1502099506
licenseType: Pro
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

52
Packages/com.unity.jobs/Unity.Jobs/EarlyInitHelpers.cs


using System;
using System.Collections.Generic;
using UnityEngine;
namespace Unity.Jobs
{
/// <summary>
/// Used by automatically generated code. Do not use in projects.
/// </summary>
public class EarlyInitHelpers
{
public delegate void EarlyInitFunction();
private static List<EarlyInitFunction> s_PendingDelegates;
public static void FlushEarlyInits()
{
while (s_PendingDelegates != null)
{
var oldList = s_PendingDelegates;
s_PendingDelegates = null;
for (int i = 0; i < oldList.Count; ++i)
{
try
{
oldList[i]();
}
catch (Exception ex)
{
Debug.LogException(ex);
}
}
}
}
public static void AddEarlyInitFunction(EarlyInitFunction f)
{
if (s_PendingDelegates == null)
s_PendingDelegates = new List<EarlyInitFunction>();
s_PendingDelegates.Add(f);
}
public static void JobReflectionDataCreationFailed(Exception ex, Type jobType)
{
Debug.LogError($"Failed to create job reflection data for type ${jobType}:");
Debug.LogException(ex);
}
}
}

11
Packages/com.unity.jobs/Unity.Jobs/EarlyInitHelpers.cs.meta


fileFormatVersion: 2
guid: 05a99ad2800ad2f49b22ab35d999bfc7
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

80
Packages/com.unity.jobs/Unity.Jobs/IJobParallelForBatch.cs


using System;
using System.Runtime.InteropServices;
using Unity.Collections;
using Unity.Jobs.LowLevel.Unsafe;
using Unity.Collections.LowLevel.Unsafe;
namespace Unity.Jobs
{
[JobProducerType(typeof(IJobParallelForBatchExtensions.JobParallelForBatchProducer<>))]
public interface IJobParallelForBatch
{
void Execute(int startIndex, int count);
}
public static class IJobParallelForBatchExtensions
{
internal struct JobParallelForBatchProducer<T> where T : struct, IJobParallelForBatch
{
static IntPtr s_JobReflectionData;
public static IntPtr Initialize()
{
if (s_JobReflectionData == IntPtr.Zero)
{
#if UNITY_2020_2_OR_NEWER
s_JobReflectionData = JobsUtility.CreateJobReflectionData(typeof(T), typeof(T), (ExecuteJobFunction)Execute);
#else
s_JobReflectionData = JobsUtility.CreateJobReflectionData(typeof(T), typeof(T),
JobType.ParallelFor, (ExecuteJobFunction)Execute);
#endif
}
return s_JobReflectionData;
}
public delegate void ExecuteJobFunction(ref T jobData, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex);
public unsafe static void Execute(ref T jobData, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex)
{
while (true)
{
if (!JobsUtility.GetWorkStealingRange(
ref ranges,
jobIndex, out int begin, out int end))
return;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.PatchBufferMinMaxRanges(bufferRangePatchData, UnsafeUtility.AddressOf(ref jobData), begin, end - begin);
#endif
jobData.Execute(begin, end - begin);
}
}
}
public static unsafe JobHandle ScheduleBatch<T>(this T jobData, int arrayLength, int minIndicesPerJobCount,
JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForBatch
{
var scheduleParams = new JobsUtility.JobScheduleParameters(
UnsafeUtility.AddressOf(ref jobData),
JobParallelForBatchProducer<T>.Initialize(),
dependsOn,
#if UNITY_2020_2_OR_NEWER
ScheduleMode.Parallel
#else
ScheduleMode.Batched
#endif
);
return JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, minIndicesPerJobCount);
}
public static unsafe void RunBatch<T>(this T jobData, int arrayLength) where T : struct, IJobParallelForBatch
{
var scheduleParams =
new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobData),
JobParallelForBatchProducer<T>.Initialize(), new JobHandle(), ScheduleMode.Run);
JobsUtility.ScheduleParallelFor(ref scheduleParams, arrayLength, arrayLength);
}
}
}

13
Packages/com.unity.jobs/Unity.Jobs/IJobParallelForBatch.cs.meta


fileFormatVersion: 2
guid: 73fbfe0d21bc34441843483c0c2406f0
timeCreated: 1504270347
licenseType: Pro
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

175
Packages/com.unity.jobs/Unity.Jobs/IJobParallelForDefer.cs


using System;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using Unity.Jobs.LowLevel.Unsafe;
namespace Unity.Jobs
{
/// <summary>
/// A replacement for IJobParallelFor when the number of work items is not known at Schedule time.
/// IJobParallelForDefer lets you calculate the number of iterations to perform in a job that must execute before the IJobParallelForDefer job.
///
/// When Scheduling the job's Execute(int index) method will be invoked on multiple worker threads in parallel to each other.
/// Execute(int index) will be executed once for each index from 0 to the provided length. Each iteration must be independent from other iterations (The safety system enforces this rule for you). The indices have no guaranteed order and are executed on multiple cores in parallel.
/// Unity automatically splits the work into chunks of no less than the provided batchSize, and schedules an appropriate number of jobs based on the number of worker threads, the length of the array and the batch size.
/// Batch size should generally be chosen depending on the amount of work performed in the job. A simple job, for example adding a couple of float3 to each other should probably have a batch size of 32 to 128. However if the work performed is very expensive then it is best to use a small batch size, for expensive work a batch size of 1 is totally fine. IJobParallelFor performs work stealing using atomic operations. Batch sizes can be small but they are not for free.
/// The returned JobHandle can be used to ensure that the job has completed. Or it can be passed to other jobs as a dependency, thus ensuring the jobs are executed one after another on the worker threads.
/// </summary>
[JobProducerType(typeof(IJobParallelForDeferExtensions.JobParallelForDeferProducer<>))]
public interface IJobParallelForDefer
{
/// <summary>
/// Implement this method to perform work against a specific iteration index.
/// </summary>
/// <param name="index">The index of the Parallel for loop at which to perform work.</param>
void Execute(int index);
}
public static class IJobParallelForDeferExtensions
{
internal struct JobParallelForDeferProducer<T> where T : struct, IJobParallelForDefer
{
static IntPtr s_JobReflectionData;
public static unsafe IntPtr Initialize()
{
if (s_JobReflectionData == IntPtr.Zero)
{
s_JobReflectionData = JobsUtility.CreateJobReflectionData(typeof(T), typeof(T), (ExecuteJobFunction)Execute);
}
return s_JobReflectionData;
}
public delegate void ExecuteJobFunction(ref T jobData, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex);
public unsafe static void Execute(ref T jobData, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex)
{
while (true)
{
if (!JobsUtility.GetWorkStealingRange(ref ranges, jobIndex, out int begin, out int end))
break;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.PatchBufferMinMaxRanges(bufferRangePatchData, UnsafeUtility.AddressOf(ref jobData), begin, end - begin);
#endif
for (var i = begin; i < end; ++i)
jobData.Execute(i);
}
}
}
/// <summary>
/// Schedule the job for execution on worker threads.
/// list.Length is used as the iteration count.
/// Note that it is required to embed the list on the job struct as well.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="list">list.Length is used as the iteration count.</param>
/// <param name="innerloopBatchCount">Granularity in which workstealing is performed. A value of 32, means the job queue will steal 32 iterations and then perform them in an efficient inner loop.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
public static unsafe JobHandle Schedule<T, U>(this T jobData, NativeList<U> list, int innerloopBatchCount,
JobHandle dependsOn = new JobHandle())
where T : struct, IJobParallelForDefer
where U : unmanaged
{
void* atomicSafetyHandlePtr = null;
// Calculate the deferred atomic safety handle before constructing JobScheduleParameters so
// DOTS Runtime can validate the deferred list statically similar to the reflection based
// validation in Big Unity.
#if ENABLE_UNITY_COLLECTIONS_CHECKS
var safety = NativeListUnsafeUtility.GetAtomicSafetyHandle(ref list);
atomicSafetyHandlePtr = UnsafeUtility.AddressOf(ref safety);
#endif
return ScheduleInternal(ref jobData, innerloopBatchCount,
NativeListUnsafeUtility.GetInternalListDataPtrUnchecked(ref list),
atomicSafetyHandlePtr, dependsOn);
}
/// <summary>
/// Schedule the job for execution on worker threads.
/// list.Length is used as the iteration count.
/// Note that it is required to embed the list on the job struct as well.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="list">list.Length is used as the iteration count.</param>
/// <param name="innerloopBatchCount">Granularity in which workstealing is performed. A value of 32, means the job queue will steal 32 iterations and then perform them in an efficient inner loop.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
public static unsafe JobHandle ScheduleByRef<T, U>(this ref T jobData, NativeList<U> list, int innerloopBatchCount,
JobHandle dependsOn = new JobHandle())
where T : struct, IJobParallelForDefer
where U : unmanaged
{
void* atomicSafetyHandlePtr = null;
// Calculate the deferred atomic safety handle before constructing JobScheduleParameters so
// DOTS Runtime can validate the deferred list statically similar to the reflection based
// validation in Big Unity.
#if ENABLE_UNITY_COLLECTIONS_CHECKS
var safety = NativeListUnsafeUtility.GetAtomicSafetyHandle(ref list);
atomicSafetyHandlePtr = UnsafeUtility.AddressOf(ref safety);
#endif
return ScheduleInternal(ref jobData, innerloopBatchCount,
NativeListUnsafeUtility.GetInternalListDataPtrUnchecked(ref list),
atomicSafetyHandlePtr, dependsOn);
}
/// <summary>
/// Schedule the job for execution on worker threads.
/// forEachCount is a pointer to the number of iterations, when dependsOn has completed.
/// This API is unsafe, it is recommended to use the NativeList based Schedule method instead.
/// </summary>
/// <param name="jobData">The job and data to schedule.</param>
/// <param name="forEachCount">*forEachCount is used as the iteration count.</param>
/// <param name="innerloopBatchCount">Granularity in which workstealing is performed. A value of 32, means the job queue will steal 32 iterations and then perform them in an efficient inner loop.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T"></typeparam>
/// <returns></returns>
public static unsafe JobHandle Schedule<T>(this T jobData, int* forEachCount, int innerloopBatchCount,
JobHandle dependsOn = new JobHandle())
where T : struct, IJobParallelForDefer
{
var forEachListPtr = (byte*)forEachCount - sizeof(void*);
return ScheduleInternal(ref jobData, innerloopBatchCount, forEachListPtr, null, dependsOn);
}
/// <summary>
/// Schedule the job for execution on worker threads.
/// forEachCount is a pointer to the number of iterations, when dependsOn has completed.
/// This API is unsafe, it is recommended to use the NativeList based Schedule method instead.
/// </summary>
/// <param name="jobData">The job and data to schedule. In this variant, the jobData is
/// passed by reference, which may be necessary for unusually large job structs.</param>
/// <param name="forEachCount">*forEachCount is used as the iteration count.</param>
/// <param name="innerloopBatchCount">Granularity in which workstealing is performed. A value of 32, means the job queue will steal 32 iterations and then perform them in an efficient inner loop.</param>
/// <param name="dependsOn">Dependencies are used to ensure that a job executes on workerthreads after the dependency has completed execution. Making sure that two jobs reading or writing to same data do not run in parallel.</param>
/// <returns>JobHandle The handle identifying the scheduled job. Can be used as a dependency for a later job or ensure completion on the main thread.</returns>
/// <typeparam name="T"></typeparam>
/// <returns></returns>
public static unsafe JobHandle ScheduleByRef<T>(this ref T jobData, int* forEachCount, int innerloopBatchCount,
JobHandle dependsOn = new JobHandle())
where T : struct, IJobParallelForDefer
{
var forEachListPtr = (byte*)forEachCount - sizeof(void*);
return ScheduleInternal(ref jobData, innerloopBatchCount, forEachListPtr, null, dependsOn);
}
private static unsafe JobHandle ScheduleInternal<T>(ref T jobData,
int innerloopBatchCount,
void* forEachListPtr,
void *atomicSafetyHandlePtr,
JobHandle dependsOn) where T : struct, IJobParallelForDefer
{
var scheduleParams = new JobsUtility.JobScheduleParameters(
UnsafeUtility.AddressOf(ref jobData),
JobParallelForDeferProducer<T>.Initialize(), dependsOn,
ScheduleMode.Parallel);
return JobsUtility.ScheduleParallelForDeferArraySize(ref scheduleParams, innerloopBatchCount,
forEachListPtr, atomicSafetyHandlePtr);
}
}
}

11
Packages/com.unity.jobs/Unity.Jobs/IJobParallelForDefer.cs.meta


fileFormatVersion: 2
guid: c4686c462988440609429182cc168e62
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

144
Packages/com.unity.jobs/Unity.Jobs/IJobParallelForFilter.cs


using System;
using Unity.Jobs.LowLevel.Unsafe;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using Unity.Mathematics;
namespace Unity.Jobs
{
[JobProducerType(typeof(JobParallelIndexListExtensions.JobParallelForFilterProducer<>))]
public interface IJobParallelForFilter
{
bool Execute(int index);
}
public static class JobParallelIndexListExtensions
{
internal struct JobParallelForFilterProducer<T> where T : struct, IJobParallelForFilter
{
public struct JobWrapper
{
[NativeDisableParallelForRestriction]
public NativeList<int> outputIndices;
public int appendCount;
public T JobData;
}
static IntPtr s_JobReflectionData;
public static IntPtr Initialize()
{
if (s_JobReflectionData == IntPtr.Zero)
// @TODO: Use parallel for job... (Need to expose combine jobs)
s_JobReflectionData = JobsUtility.CreateJobReflectionData(typeof(JobWrapper), typeof(T),
#if !UNITY_2020_2_OR_NEWER
JobType.Single,
#endif
(ExecuteJobFunction)Execute);
return s_JobReflectionData;
}
public delegate void ExecuteJobFunction(ref JobWrapper jobWrapper, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex);
// @TODO: Use parallel for job... (Need to expose combine jobs)
public static void Execute(ref JobWrapper jobWrapper, IntPtr additionalPtr, IntPtr bufferRangePatchData, ref JobRanges ranges, int jobIndex)
{
if (jobWrapper.appendCount == -1)
ExecuteFilter(ref jobWrapper, bufferRangePatchData);
else
ExecuteAppend(ref jobWrapper, bufferRangePatchData);
}
public static unsafe void ExecuteAppend(ref JobWrapper jobWrapper, System.IntPtr bufferRangePatchData)
{
int oldLength = jobWrapper.outputIndices.Length;
jobWrapper.outputIndices.Capacity = math.max(jobWrapper.appendCount + oldLength, jobWrapper.outputIndices.Capacity);
int* outputPtr = (int*)jobWrapper.outputIndices.GetUnsafePtr();
int outputIndex = oldLength;
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.PatchBufferMinMaxRanges(bufferRangePatchData, UnsafeUtility.AddressOf(ref jobWrapper),
0, jobWrapper.appendCount);
#endif
for (int i = 0; i != jobWrapper.appendCount; i++)
{
if (jobWrapper.JobData.Execute(i))
{
outputPtr[outputIndex] = i;
outputIndex++;
}
}
jobWrapper.outputIndices.ResizeUninitialized(outputIndex);
}
public static unsafe void ExecuteFilter(ref JobWrapper jobWrapper, System.IntPtr bufferRangePatchData)
{
int* outputPtr = (int*)jobWrapper.outputIndices.GetUnsafePtr();
int inputLength = jobWrapper.outputIndices.Length;
int outputCount = 0;
for (int i = 0; i != inputLength; i++)
{
int inputIndex = outputPtr[i];
#if ENABLE_UNITY_COLLECTIONS_CHECKS
JobsUtility.PatchBufferMinMaxRanges(bufferRangePatchData, UnsafeUtility.AddressOf(ref jobWrapper), inputIndex, 1);
#endif
if (jobWrapper.JobData.Execute(inputIndex))
{
outputPtr[outputCount] = inputIndex;
outputCount++;
}
}
jobWrapper.outputIndices.ResizeUninitialized(outputCount);
}
}
public static unsafe JobHandle ScheduleAppend<T>(this T jobData, NativeList<int> indices, int arrayLength, int innerloopBatchCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForFilter
{
JobParallelForFilterProducer<T>.JobWrapper jobWrapper = new JobParallelForFilterProducer<T>.JobWrapper()
{
JobData = jobData,
outputIndices = indices,
appendCount = arrayLength
};
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobWrapper), JobParallelForFilterProducer<T>.Initialize(), dependsOn,
#if UNITY_2020_2_OR_NEWER
ScheduleMode.Parallel
#else
ScheduleMode.Batched
#endif
);
return JobsUtility.Schedule(ref scheduleParams);
}
public static unsafe JobHandle ScheduleFilter<T>(this T jobData, NativeList<int> indices, int innerloopBatchCount, JobHandle dependsOn = new JobHandle()) where T : struct, IJobParallelForFilter
{
JobParallelForFilterProducer<T>.JobWrapper jobWrapper = new JobParallelForFilterProducer<T>.JobWrapper()
{
JobData = jobData,
outputIndices = indices,
appendCount = -1
};
var scheduleParams = new JobsUtility.JobScheduleParameters(UnsafeUtility.AddressOf(ref jobWrapper), JobParallelForFilterProducer<T>.Initialize(), dependsOn,
#if UNITY_2020_2_OR_NEWER
ScheduleMode.Parallel
#else
ScheduleMode.Batched
#endif
);
return JobsUtility.Schedule(ref scheduleParams);
}
//@TODO: RUN
}
}

12
Packages/com.unity.jobs/Unity.Jobs/IJobParallelForFilter.cs.meta


fileFormatVersion: 2
guid: 88ce42f5519c641cba8aab76c7ea14de
timeCreated: 1492869658
licenseType: Pro
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

12
Packages/com.unity.jobs/Unity.Jobs/Unity.Jobs.asmdef


{
"name": "Unity.Jobs",
"references": [
"Unity.Mathematics",
"Unity.Collections",
"Unity.Burst"
],
"optionalUnityReferences": [],
"includePlatforms": [],
"excludePlatforms": [],
"allowUnsafeCode": true
}

7
Packages/com.unity.jobs/Unity.Jobs/Unity.Jobs.asmdef.meta


fileFormatVersion: 2
guid: 8a2eafa29b15f444eb6d74f94a930e1d
AssemblyDefinitionImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

25
Packages/com.unity.jobs/package.json


{
"name": "com.unity.jobs",
"displayName": "Jobs",
"version": "0.10.0-preview.18",
"unity": "2020.3",
"unityRelease": "0f1",
"description": "The Jobs package provides additional job types like IJobParallelForBatch, IJobParallelForDefer, and IJobParallelForFilter.",
"dependencies": {
"com.unity.collections": "0.17.0-preview.18",
"com.unity.mathematics": "1.2.1"
},
"keywords": [
"dots",
"jobs",
"unity"
],
"upmCi": {
"footprint": "c083cc48839c3bffaa4329f5502b9a1303e05b01"
},
"repository": {
"url": "https://github.cds.internal.unity3d.com/unity/dots.git",
"type": "git",
"revision": "51d641c1c6a767b98209a5204bc6b69f873d497f"
}
}

7
Packages/com.unity.jobs/package.json.meta


fileFormatVersion: 2
guid: 2cbb966dd48544296b68bb7df44c8c5d
TextScriptImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:
正在加载...
取消
保存