2023-02-27 15:45:21 +08:00
|
|
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
|
|
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
|
|
|
|
Date: Sun, 26 Feb 2023 23:42:29 -0800
|
|
|
|
Subject: [PATCH] Increase parallelism for neighbour writing chunk statuses
|
|
|
|
|
|
|
|
Namely, everything after FEATURES. By creating a dependency
|
|
|
|
chain indicating what chunks are in use, we can safely
|
|
|
|
schedule completely independent tasks in parallel. This
|
|
|
|
will allow the chunk system to scale beyond 10 threads
|
|
|
|
per world.
|
|
|
|
|
|
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java
|
Fix several issues, mostly saving pending teleporting entities
The place/portal async function now track entities that have been
removed from the world but have not teleported. When the server
shuts down, these entities will have their passenger tree restored
and re-added to the entity slices at the location they were teleporting to,
or in the case of portals that did not run placeAsync yet,
the location they entered the portal on. This should ensure that
for regular teleports that the entity is placed at its correct
target location, and for portalling to ensure that either
the entity is placed at the portal entrace location (where
they entered) or the portal destination. In any case,
the entity is preserved in a location and will survive
the shutdown.
Additionally, move player saving until after the worlds save. This
is to ensure that the save logic is performed only after
all teleportations have completed.
Fix some other misc issues as well:
- Fix double nether portal creation by checking if a portal exists again
before creating it, fixing a race condition where two entites would portal
and neither would see that the other created a portal.
- Make all remove ticket add an unknown ticket.
In general this behavior is better since it means that unloads will only
ever occur at the next tick, rather than during the tick logic. Thus,
there will be no cases where a chunk is unloaded unexpectedly.
- Do not use fastFloor for calculating chunk position from block position
It is not going to return a good value outside of [-1024, 1024]
- Always perform mid tick update for ticking regionised player chunk loader
If no entities were loaded, no chunks were loaded, and nothing else -
the logic would not have otherwise ran. This fixed some rare cases of
chunks never loading for players after logging in.
2023-03-02 11:12:31 +08:00
|
|
|
index 245242b276e3de1edde1e2ebd0ce518fd0d08117..acf77a7745db2e28bd674107cdcb65d278625445 100644
|
2023-02-27 15:45:21 +08:00
|
|
|
--- a/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java
|
|
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java
|
|
|
|
@@ -12,6 +12,7 @@ import it.unimi.dsi.fastutil.longs.LongArrayFIFOQueue;
|
|
|
|
import it.unimi.dsi.fastutil.longs.LongArrayList;
|
|
|
|
import it.unimi.dsi.fastutil.longs.LongComparator;
|
|
|
|
import it.unimi.dsi.fastutil.longs.LongHeapPriorityQueue;
|
|
|
|
+import it.unimi.dsi.fastutil.longs.LongIterator;
|
|
|
|
import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
|
|
|
|
import net.minecraft.network.protocol.Packet;
|
|
|
|
import net.minecraft.network.protocol.game.ClientboundSetChunkCacheCenterPacket;
|
|
|
|
@@ -30,8 +31,9 @@ import net.minecraft.world.level.levelgen.BelowZeroRetrogen;
|
|
|
|
import org.apache.commons.lang3.mutable.MutableObject;
|
|
|
|
import org.bukkit.craftbukkit.entity.CraftPlayer;
|
|
|
|
import org.bukkit.entity.Player;
|
|
|
|
-
|
|
|
|
import java.util.ArrayDeque;
|
|
|
|
+import java.util.ArrayList;
|
|
|
|
+import java.util.List;
|
|
|
|
import java.util.concurrent.TimeUnit;
|
|
|
|
import java.util.concurrent.atomic.AtomicLong;
|
|
|
|
|
Fix several issues, mostly saving pending teleporting entities
The place/portal async function now track entities that have been
removed from the world but have not teleported. When the server
shuts down, these entities will have their passenger tree restored
and re-added to the entity slices at the location they were teleporting to,
or in the case of portals that did not run placeAsync yet,
the location they entered the portal on. This should ensure that
for regular teleports that the entity is placed at its correct
target location, and for portalling to ensure that either
the entity is placed at the portal entrace location (where
they entered) or the portal destination. In any case,
the entity is preserved in a location and will survive
the shutdown.
Additionally, move player saving until after the worlds save. This
is to ensure that the save logic is performed only after
all teleportations have completed.
Fix some other misc issues as well:
- Fix double nether portal creation by checking if a portal exists again
before creating it, fixing a race condition where two entites would portal
and neither would see that the other created a portal.
- Make all remove ticket add an unknown ticket.
In general this behavior is better since it means that unloads will only
ever occur at the next tick, rather than during the tick logic. Thus,
there will be no cases where a chunk is unloaded unexpectedly.
- Do not use fastFloor for calculating chunk position from block position
It is not going to return a good value outside of [-1024, 1024]
- Always perform mid tick update for ticking regionised player chunk loader
If no entities were loaded, no chunks were loaded, and nothing else -
the logic would not have otherwise ran. This fixed some rare cases of
chunks never loading for players after logging in.
2023-03-02 11:12:31 +08:00
|
|
|
@@ -286,7 +288,92 @@ public class RegionisedPlayerChunkLoader {
|
2023-02-27 15:45:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
- return chunks.toLongArray();
|
|
|
|
+ // to increase generation parallelism, we want to space the chunks out so that they are not nearby when generating
|
|
|
|
+ // this also means we are minimising locality
|
|
|
|
+ // but, we need to maintain sorted order by manhatten distance
|
|
|
|
+
|
|
|
|
+ // first, build a map of manhatten distance -> chunks
|
|
|
|
+ final List<LongArrayList> byDistance = new ArrayList<>();
|
|
|
|
+ for (final LongIterator iterator = chunks.iterator(); iterator.hasNext();) {
|
|
|
|
+ final long chunkKey = iterator.nextLong();
|
|
|
|
+
|
|
|
|
+ final int chunkX = CoordinateUtils.getChunkX(chunkKey);
|
|
|
|
+ final int chunkZ = CoordinateUtils.getChunkZ(chunkKey);
|
|
|
|
+
|
|
|
|
+ final int dist = Math.abs(chunkX) + Math.abs(chunkZ);
|
|
|
|
+ if (dist == byDistance.size()) {
|
|
|
|
+ final LongArrayList list = new LongArrayList();
|
|
|
|
+ list.add(chunkKey);
|
|
|
|
+ byDistance.add(list);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ byDistance.get(dist).add(chunkKey);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // per distance we transform the chunk list so that each element is maximally spaced out from each other
|
|
|
|
+ for (int i = 0, len = byDistance.size(); i < len; ++i) {
|
|
|
|
+ final LongArrayList notAdded = byDistance.get(i).clone();
|
|
|
|
+ final LongArrayList added = new LongArrayList();
|
|
|
|
+
|
|
|
|
+ while (!notAdded.isEmpty()) {
|
|
|
|
+ if (added.isEmpty()) {
|
|
|
|
+ added.add(notAdded.removeLong(notAdded.size() - 1));
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ long maxChunk = -1L;
|
|
|
|
+ int maxDist = 0;
|
|
|
|
+
|
|
|
|
+ // select the chunk from the not yet added set that has the largest minimum distance from
|
|
|
|
+ // the current set of added chunks
|
|
|
|
+
|
|
|
|
+ for (final LongIterator iterator = notAdded.iterator(); iterator.hasNext();) {
|
|
|
|
+ final long chunkKey = iterator.nextLong();
|
|
|
|
+ final int chunkX = CoordinateUtils.getChunkX(chunkKey);
|
|
|
|
+ final int chunkZ = CoordinateUtils.getChunkZ(chunkKey);
|
|
|
|
+
|
|
|
|
+ int minDist = Integer.MAX_VALUE;
|
|
|
|
+
|
|
|
|
+ for (final LongIterator iterator2 = added.iterator(); iterator2.hasNext();) {
|
|
|
|
+ final long addedKey = iterator2.nextLong();
|
|
|
|
+ final int addedX = CoordinateUtils.getChunkX(addedKey);
|
|
|
|
+ final int addedZ = CoordinateUtils.getChunkZ(addedKey);
|
|
|
|
+
|
|
|
|
+ // here we use square distance because chunk generation uses neighbours in a square radius
|
|
|
|
+ final int dist = Math.max(Math.abs(addedX - chunkX), Math.abs(addedZ - chunkZ));
|
|
|
|
+
|
|
|
|
+ if (dist < minDist) {
|
|
|
|
+ minDist = dist;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (minDist > maxDist) {
|
|
|
|
+ maxDist = minDist;
|
|
|
|
+ maxChunk = chunkKey;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // move the selected chunk from the not added set to the added set
|
|
|
|
+
|
|
|
|
+ if (!notAdded.rem(maxChunk)) {
|
|
|
|
+ throw new IllegalStateException();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ added.add(maxChunk);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ byDistance.set(i, added);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // now, rebuild the list so that it still maintains manhatten distance order
|
|
|
|
+ final LongArrayList ret = new LongArrayList(chunks.size());
|
|
|
|
+
|
|
|
|
+ for (final LongArrayList dist : byDistance) {
|
|
|
|
+ ret.addAll(dist);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ret.toLongArray();
|
|
|
|
}
|
|
|
|
|
|
|
|
public static final class PlayerChunkLoaderData {
|
|
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
|
|
|
|
index 0b7a2b0ead4f3bc07bfd9a38c2b7cf024bd140c6..36e93fefdfbebddce4c153974c7cd81af3cb92e9 100644
|
|
|
|
--- a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
|
|
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
|
|
|
|
@@ -4,7 +4,6 @@ import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
|
|
import ca.spottedleaf.starlight.common.light.BlockStarLightEngine;
|
|
|
|
import ca.spottedleaf.starlight.common.light.SkyStarLightEngine;
|
|
|
|
import ca.spottedleaf.starlight.common.light.StarLightInterface;
|
|
|
|
-import io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler;
|
|
|
|
import io.papermc.paper.util.CoordinateUtils;
|
|
|
|
import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
|
|
|
|
import it.unimi.dsi.fastutil.shorts.ShortCollection;
|
|
|
|
@@ -13,6 +12,7 @@ import net.minecraft.core.BlockPos;
|
|
|
|
import net.minecraft.core.SectionPos;
|
|
|
|
import net.minecraft.server.level.ServerLevel;
|
|
|
|
import net.minecraft.world.level.ChunkPos;
|
|
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
|
|
import java.util.ArrayList;
|
|
|
|
import java.util.HashSet;
|
|
|
|
import java.util.List;
|
|
|
|
@@ -201,7 +201,10 @@ public final class LightQueue {
|
|
|
|
this.chunkCoordinate = chunkCoordinate;
|
|
|
|
this.lightEngine = lightEngine;
|
|
|
|
this.queue = queue;
|
|
|
|
- this.task = queue.world.chunkTaskScheduler.lightExecutor.createTask(this, priority);
|
|
|
|
+ this.task = queue.world.chunkTaskScheduler.radiusAwareScheduler.createTask(
|
|
|
|
+ CoordinateUtils.getChunkX(chunkCoordinate), CoordinateUtils.getChunkZ(chunkCoordinate),
|
|
|
|
+ ChunkStatus.LIGHT.writeRadius, this, priority
|
|
|
|
+ );
|
|
|
|
}
|
|
|
|
|
|
|
|
public void schedule() {
|
|
|
|
@@ -230,23 +233,23 @@ public final class LightQueue {
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
- final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine();
|
|
|
|
- final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine();
|
|
|
|
- try {
|
|
|
|
- synchronized (this.queue) {
|
|
|
|
- this.queue.chunkTasks.remove(this.chunkCoordinate);
|
|
|
|
- }
|
|
|
|
+ synchronized (this.queue) {
|
|
|
|
+ this.queue.chunkTasks.remove(this.chunkCoordinate);
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- boolean litChunk = false;
|
|
|
|
- if (this.lightTasks != null) {
|
|
|
|
- for (final BooleanSupplier run : this.lightTasks) {
|
|
|
|
- if (run.getAsBoolean()) {
|
|
|
|
- litChunk = true;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
+ boolean litChunk = false;
|
|
|
|
+ if (this.lightTasks != null) {
|
|
|
|
+ for (final BooleanSupplier run : this.lightTasks) {
|
|
|
|
+ if (run.getAsBoolean()) {
|
|
|
|
+ litChunk = true;
|
|
|
|
+ break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
+ }
|
|
|
|
|
|
|
|
+ final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine();
|
|
|
|
+ final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine();
|
|
|
|
+ try {
|
|
|
|
final long coordinate = this.chunkCoordinate;
|
|
|
|
final int chunkX = CoordinateUtils.getChunkX(coordinate);
|
|
|
|
final int chunkZ = CoordinateUtils.getChunkZ(coordinate);
|
|
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
|
Fix several issues, mostly saving pending teleporting entities
The place/portal async function now track entities that have been
removed from the world but have not teleported. When the server
shuts down, these entities will have their passenger tree restored
and re-added to the entity slices at the location they were teleporting to,
or in the case of portals that did not run placeAsync yet,
the location they entered the portal on. This should ensure that
for regular teleports that the entity is placed at its correct
target location, and for portalling to ensure that either
the entity is placed at the portal entrace location (where
they entered) or the portal destination. In any case,
the entity is preserved in a location and will survive
the shutdown.
Additionally, move player saving until after the worlds save. This
is to ensure that the save logic is performed only after
all teleportations have completed.
Fix some other misc issues as well:
- Fix double nether portal creation by checking if a portal exists again
before creating it, fixing a race condition where two entites would portal
and neither would see that the other created a portal.
- Make all remove ticket add an unknown ticket.
In general this behavior is better since it means that unloads will only
ever occur at the next tick, rather than during the tick logic. Thus,
there will be no cases where a chunk is unloaded unexpectedly.
- Do not use fastFloor for calculating chunk position from block position
It is not going to return a good value outside of [-1024, 1024]
- Always perform mid tick update for ticking regionised player chunk loader
If no entities were loaded, no chunks were loaded, and nothing else -
the logic would not have otherwise ran. This fixed some rare cases of
chunks never loading for players after logging in.
2023-03-02 11:12:31 +08:00
|
|
|
index 32b88d7902e877e1cce0b7635cbfa67b84b8eac0..89e8b5d3a62241df0e3cb5c296f1deb754305843 100644
|
2023-02-27 15:45:21 +08:00
|
|
|
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
|
|
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
|
Fix several issues, mostly saving pending teleporting entities
The place/portal async function now track entities that have been
removed from the world but have not teleported. When the server
shuts down, these entities will have their passenger tree restored
and re-added to the entity slices at the location they were teleporting to,
or in the case of portals that did not run placeAsync yet,
the location they entered the portal on. This should ensure that
for regular teleports that the entity is placed at its correct
target location, and for portalling to ensure that either
the entity is placed at the portal entrace location (where
they entered) or the portal destination. In any case,
the entity is preserved in a location and will survive
the shutdown.
Additionally, move player saving until after the worlds save. This
is to ensure that the save logic is performed only after
all teleportations have completed.
Fix some other misc issues as well:
- Fix double nether portal creation by checking if a portal exists again
before creating it, fixing a race condition where two entites would portal
and neither would see that the other created a portal.
- Make all remove ticket add an unknown ticket.
In general this behavior is better since it means that unloads will only
ever occur at the next tick, rather than during the tick logic. Thus,
there will be no cases where a chunk is unloaded unexpectedly.
- Do not use fastFloor for calculating chunk position from block position
It is not going to return a good value outside of [-1024, 1024]
- Always perform mid tick update for ticking regionised player chunk loader
If no entities were loaded, no chunks were loaded, and nothing else -
the logic would not have otherwise ran. This fixed some rare cases of
chunks never loading for players after logging in.
2023-03-02 11:12:31 +08:00
|
|
|
@@ -1306,17 +1306,23 @@ public final class ChunkHolderManager {
|
2023-02-27 15:45:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
public Boolean tryDrainTicketUpdates() {
|
|
|
|
- final boolean acquired = this.ticketLock.tryLock();
|
|
|
|
- try {
|
|
|
|
- if (!acquired) {
|
|
|
|
- return null;
|
|
|
|
- }
|
|
|
|
+ boolean ret = false;
|
|
|
|
+ for (;;) {
|
|
|
|
+ final boolean acquired = this.ticketLock.tryLock();
|
|
|
|
+ try {
|
|
|
|
+ if (!acquired) {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- return Boolean.valueOf(this.drainTicketUpdates());
|
|
|
|
- } finally {
|
|
|
|
- if (acquired) {
|
|
|
|
- this.ticketLock.unlock();
|
|
|
|
+ ret |= this.drainTicketUpdates();
|
|
|
|
+ } finally {
|
|
|
|
+ if (acquired) {
|
|
|
|
+ this.ticketLock.unlock();
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
+ if (this.delayedTicketUpdates.isEmpty()) {
|
|
|
|
+ return Boolean.valueOf(ret);
|
|
|
|
+ } // else: try to re-acquire
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
|
|
|
|
index 93b666893a9755e426701f5c2849fc0fb2026bb7..c17fd8ff9d1eb437fabd5757565137f5a3b307e4 100644
|
|
|
|
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
|
|
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
|
|
|
|
@@ -2,9 +2,9 @@ package io.papermc.paper.chunk.system.scheduling;
|
|
|
|
|
|
|
|
import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
|
|
import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool;
|
|
|
|
-import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue;
|
|
|
|
import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
|
|
import com.mojang.logging.LogUtils;
|
|
|
|
+import io.papermc.paper.chunk.system.scheduling.queue.RadiusAwarePrioritisedExecutor;
|
|
|
|
import io.papermc.paper.configuration.GlobalConfiguration;
|
|
|
|
import io.papermc.paper.util.CoordinateUtils;
|
|
|
|
import io.papermc.paper.util.TickThread;
|
|
|
|
@@ -21,7 +21,6 @@ import net.minecraft.world.level.ChunkPos;
|
|
|
|
import net.minecraft.world.level.chunk.ChunkAccess;
|
|
|
|
import net.minecraft.world.level.chunk.ChunkStatus;
|
|
|
|
import net.minecraft.world.level.chunk.LevelChunk;
|
|
|
|
-import org.bukkit.Bukkit;
|
|
|
|
import org.slf4j.Logger;
|
|
|
|
import java.io.File;
|
|
|
|
import java.util.ArrayDeque;
|
|
|
|
@@ -34,7 +33,6 @@ import java.util.Objects;
|
|
|
|
import java.util.concurrent.atomic.AtomicBoolean;
|
|
|
|
import java.util.concurrent.atomic.AtomicLong;
|
|
|
|
import java.util.concurrent.locks.ReentrantLock;
|
|
|
|
-import java.util.function.BooleanSupplier;
|
|
|
|
import java.util.function.Consumer;
|
|
|
|
|
|
|
|
public final class ChunkTaskScheduler {
|
|
|
|
@@ -108,9 +106,9 @@ public final class ChunkTaskScheduler {
|
|
|
|
|
|
|
|
public final ServerLevel world;
|
|
|
|
public final PrioritisedThreadPool workers;
|
|
|
|
- public final PrioritisedThreadPool.PrioritisedPoolExecutor lightExecutor;
|
|
|
|
- public final PrioritisedThreadPool.PrioritisedPoolExecutor genExecutor;
|
|
|
|
+ public final RadiusAwarePrioritisedExecutor radiusAwareScheduler;
|
|
|
|
public final PrioritisedThreadPool.PrioritisedPoolExecutor parallelGenExecutor;
|
|
|
|
+ private final PrioritisedThreadPool.PrioritisedPoolExecutor radiusAwareGenExecutor;
|
|
|
|
public final PrioritisedThreadPool.PrioritisedPoolExecutor loadExecutor;
|
|
|
|
|
|
|
|
// Folia - regionised ticking
|
|
|
|
@@ -128,7 +126,7 @@ public final class ChunkTaskScheduler {
|
|
|
|
ChunkStatus.CARVERS.writeRadius = 0;
|
|
|
|
ChunkStatus.LIQUID_CARVERS.writeRadius = 0;
|
|
|
|
ChunkStatus.FEATURES.writeRadius = 1;
|
|
|
|
- ChunkStatus.LIGHT.writeRadius = 1;
|
|
|
|
+ ChunkStatus.LIGHT.writeRadius = 2;
|
|
|
|
ChunkStatus.SPAWN.writeRadius = 0;
|
|
|
|
ChunkStatus.HEIGHTMAPS.writeRadius = 0;
|
|
|
|
ChunkStatus.FULL.writeRadius = 0;
|
|
|
|
@@ -196,12 +194,11 @@ public final class ChunkTaskScheduler {
|
|
|
|
this.workers = workers;
|
|
|
|
|
|
|
|
final String worldName = world.getWorld().getName();
|
|
|
|
- this.genExecutor = workers.createExecutor("Chunk single-threaded generation executor for world '" + worldName + "'", 1);
|
|
|
|
- // same as genExecutor, as there are race conditions between updating blocks in FEATURE status while lighting chunks
|
|
|
|
- this.lightExecutor = this.genExecutor;
|
|
|
|
- this.parallelGenExecutor = newChunkSystemGenParallelism <= 1 ? this.genExecutor
|
|
|
|
- : workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", newChunkSystemGenParallelism);
|
|
|
|
+ this.parallelGenExecutor = workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", Math.max(1, newChunkSystemGenParallelism));
|
|
|
|
+ this.radiusAwareGenExecutor =
|
|
|
|
+ newChunkSystemGenParallelism <= 1 ? this.parallelGenExecutor : workers.createExecutor("Chunk radius aware generator for world '" + worldName + "'", newChunkSystemGenParallelism);
|
|
|
|
this.loadExecutor = workers.createExecutor("Chunk load executor for world '" + worldName + "'", newChunkSystemLoadParallelism);
|
|
|
|
+ this.radiusAwareScheduler = new RadiusAwarePrioritisedExecutor(this.radiusAwareGenExecutor, Math.max(1, newChunkSystemGenParallelism));
|
|
|
|
this.chunkHolderManager = new ChunkHolderManager(world, this);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -740,8 +737,7 @@ public final class ChunkTaskScheduler {
|
|
|
|
// Folia - regionised ticking
|
|
|
|
|
|
|
|
public boolean halt(final boolean sync, final long maxWaitNS) {
|
|
|
|
- this.lightExecutor.halt();
|
|
|
|
- this.genExecutor.halt();
|
|
|
|
+ this.radiusAwareGenExecutor.halt();
|
|
|
|
this.parallelGenExecutor.halt();
|
|
|
|
this.loadExecutor.halt();
|
|
|
|
final long time = System.nanoTime();
|
|
|
|
@@ -749,8 +745,7 @@ public final class ChunkTaskScheduler {
|
|
|
|
// start at 10 * 0.5ms -> 5ms
|
|
|
|
for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) {
|
|
|
|
if (
|
|
|
|
- !this.lightExecutor.isActive() &&
|
|
|
|
- !this.genExecutor.isActive() &&
|
|
|
|
+ !this.radiusAwareGenExecutor.isActive() &&
|
|
|
|
!this.parallelGenExecutor.isActive() &&
|
|
|
|
!this.loadExecutor.isActive()
|
|
|
|
) {
|
|
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
|
|
|
|
index 73ce0909bd89244835a0d0f2030a25871461f1e0..ecc366a4176b2efadc46aa91aa21621f0fc6abe9 100644
|
|
|
|
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
|
|
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
|
|
|
|
@@ -39,8 +39,11 @@ public final class ChunkUpgradeGenericStatusTask extends ChunkProgressionTask im
|
|
|
|
this.fromStatus = chunk.getStatus();
|
|
|
|
this.toStatus = toStatus;
|
|
|
|
this.neighbours = neighbours;
|
|
|
|
- this.generateTask = (this.toStatus.isParallelCapable ? this.scheduler.parallelGenExecutor : this.scheduler.genExecutor)
|
|
|
|
- .createTask(this, priority);
|
|
|
|
+ if (this.toStatus.isParallelCapable) {
|
|
|
|
+ this.generateTask = this.scheduler.parallelGenExecutor.createTask(this, priority);
|
|
|
|
+ } else {
|
|
|
|
+ this.generateTask = this.scheduler.radiusAwareScheduler.createTask(chunkX, chunkZ, this.toStatus.writeRadius, this, priority);
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000000000000000000000000000000000000..c6d3ba027ed1513232c501b4b175720cf4fe8b82
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
|
|
|
|
@@ -0,0 +1,654 @@
|
|
|
|
+package io.papermc.paper.chunk.system.scheduling.queue;
|
|
|
|
+
|
|
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
|
|
+import io.papermc.paper.util.CoordinateUtils;
|
|
|
|
+import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
|
|
|
|
+import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
|
|
|
|
+import java.util.ArrayList;
|
|
|
|
+import java.util.Comparator;
|
|
|
|
+import java.util.List;
|
|
|
|
+import java.util.PriorityQueue;
|
|
|
|
+
|
|
|
|
+public class RadiusAwarePrioritisedExecutor {
|
|
|
|
+
|
|
|
|
+ private static final Comparator<DependencyNode> DEPENDENCY_NODE_COMPARATOR = (final DependencyNode t1, final DependencyNode t2) -> {
|
|
|
|
+ return Long.compare(t1.id, t2.id);
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ private final DependencyTree[] queues = new DependencyTree[PrioritisedExecutor.Priority.TOTAL_SCHEDULABLE_PRIORITIES];
|
|
|
|
+ private static final int NO_TASKS_QUEUED = -1;
|
|
|
|
+ private int selectedQueue = NO_TASKS_QUEUED;
|
|
|
|
+ private boolean canQueueTasks = true;
|
|
|
|
+
|
|
|
|
+ public RadiusAwarePrioritisedExecutor(final PrioritisedExecutor executor, final int maxToSchedule) {
|
|
|
|
+ for (int i = 0; i < this.queues.length; ++i) {
|
|
|
|
+ this.queues[i] = new DependencyTree(this, executor, maxToSchedule, i);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private boolean canQueueTasks() {
|
|
|
|
+ return this.canQueueTasks;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private List<PrioritisedExecutor.PrioritisedTask> treeFinished() {
|
|
|
|
+ this.canQueueTasks = true;
|
|
|
|
+ for (int priority = 0; priority < this.queues.length; ++priority) {
|
|
|
|
+ final DependencyTree queue = this.queues[priority];
|
|
|
|
+ if (queue.hasWaitingTasks()) {
|
|
|
|
+ this.selectedQueue = priority;
|
|
|
|
+ return queue.tryPushTasks();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ this.selectedQueue = NO_TASKS_QUEUED;
|
|
|
|
+
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private List<PrioritisedExecutor.PrioritisedTask> queue(final Task task, final PrioritisedExecutor.Priority priority) {
|
|
|
|
+ final int priorityId = priority.priority;
|
|
|
|
+ final DependencyTree queue = this.queues[priorityId];
|
|
|
|
+
|
|
|
|
+ final DependencyNode node = new DependencyNode(task, queue);
|
|
|
|
+
|
|
|
|
+ if (task.dependencyNode != null) {
|
|
|
|
+ throw new IllegalStateException();
|
|
|
|
+ }
|
|
|
|
+ task.dependencyNode = node;
|
|
|
|
+
|
|
|
|
+ queue.pushNode(node);
|
|
|
|
+
|
|
|
|
+ if (this.selectedQueue == NO_TASKS_QUEUED) {
|
|
|
|
+ this.canQueueTasks = true;
|
|
|
|
+ this.selectedQueue = priorityId;
|
|
|
|
+ return queue.tryPushTasks();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!this.canQueueTasks) {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (PrioritisedExecutor.Priority.isHigherPriority(priorityId, this.selectedQueue)) {
|
|
|
|
+ // prevent the lower priority tree from queueing more tasks
|
|
|
|
+ this.canQueueTasks = false;
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // priorityId != selectedQueue: lower priority, don't care - treeFinished will pick it up
|
|
|
|
+ return priorityId == this.selectedQueue ? queue.tryPushTasks() : null;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public PrioritisedExecutor.PrioritisedTask createTask(final int chunkX, final int chunkZ, final int radius,
|
|
|
|
+ final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
|
|
+ if (radius < 0) {
|
|
|
|
+ throw new IllegalArgumentException("Radius must be > 0: " + radius);
|
|
|
|
+ }
|
|
|
|
+ return new Task(this, chunkX, chunkZ, radius, run, priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public PrioritisedExecutor.PrioritisedTask createTask(final int chunkX, final int chunkZ, final int radius,
|
|
|
|
+ final Runnable run) {
|
|
|
|
+ return this.createTask(chunkX, chunkZ, radius, run, PrioritisedExecutor.Priority.NORMAL);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public PrioritisedExecutor.PrioritisedTask queueTask(final int chunkX, final int chunkZ, final int radius,
|
|
|
|
+ final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask ret = this.createTask(chunkX, chunkZ, radius, run, priority);
|
|
|
|
+
|
|
|
|
+ ret.queue();
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public PrioritisedExecutor.PrioritisedTask queueTask(final int chunkX, final int chunkZ, final int radius,
|
|
|
|
+ final Runnable run) {
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask ret = this.createTask(chunkX, chunkZ, radius, run);
|
|
|
|
+
|
|
|
|
+ ret.queue();
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public PrioritisedExecutor.PrioritisedTask createInfiniteRadiusTask(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
|
|
+ return new Task(this, 0, 0, -1, run, priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public PrioritisedExecutor.PrioritisedTask createInfiniteRadiusTask(final Runnable run) {
|
|
|
|
+ return this.createInfiniteRadiusTask(run, PrioritisedExecutor.Priority.NORMAL);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public PrioritisedExecutor.PrioritisedTask queueInfiniteRadiusTask(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask ret = this.createInfiniteRadiusTask(run, priority);
|
|
|
|
+
|
|
|
|
+ ret.queue();
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public PrioritisedExecutor.PrioritisedTask queueInfiniteRadiusTask(final Runnable run) {
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask ret = this.createInfiniteRadiusTask(run, PrioritisedExecutor.Priority.NORMAL);
|
|
|
|
+
|
|
|
|
+ ret.queue();
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // all accesses must be synchronised by the radius aware object
|
|
|
|
+ private static final class DependencyTree {
|
|
|
|
+
|
|
|
|
+ private final RadiusAwarePrioritisedExecutor scheduler;
|
|
|
|
+ private final PrioritisedExecutor executor;
|
|
|
|
+ private final int maxToSchedule;
|
|
|
|
+ private final int treeIndex;
|
|
|
|
+
|
|
|
|
+ private int currentlyExecuting;
|
|
|
|
+ private long idGenerator;
|
|
|
|
+
|
|
|
|
+ private final PriorityQueue<DependencyNode> awaiting = new PriorityQueue<>(DEPENDENCY_NODE_COMPARATOR);
|
|
|
|
+
|
|
|
|
+ private final PriorityQueue<DependencyNode> infiniteRadius = new PriorityQueue<>(DEPENDENCY_NODE_COMPARATOR);
|
|
|
|
+ private boolean isInfiniteRadiusScheduled;
|
|
|
|
+
|
|
|
|
+ private final Long2ReferenceOpenHashMap<DependencyNode> nodeByPosition = new Long2ReferenceOpenHashMap<>();
|
|
|
|
+
|
|
|
|
+ public DependencyTree(final RadiusAwarePrioritisedExecutor scheduler, final PrioritisedExecutor executor,
|
|
|
|
+ final int maxToSchedule, final int treeIndex) {
|
|
|
|
+ this.scheduler = scheduler;
|
|
|
|
+ this.executor = executor;
|
|
|
|
+ this.maxToSchedule = maxToSchedule;
|
|
|
|
+ this.treeIndex = treeIndex;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public boolean hasWaitingTasks() {
|
|
|
|
+ return !this.awaiting.isEmpty() || !this.infiniteRadius.isEmpty();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private long nextId() {
|
|
|
|
+ return this.idGenerator++;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private boolean isExecutingAnyTasks() {
|
|
|
|
+ return this.currentlyExecuting != 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void pushNode(final DependencyNode node) {
|
|
|
|
+ if (!node.task.isFiniteRadius()) {
|
|
|
|
+ this.infiniteRadius.add(node);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // set up dependency for node
|
|
|
|
+ final Task task = node.task;
|
|
|
|
+
|
|
|
|
+ final int centerX = task.chunkX;
|
|
|
|
+ final int centerZ = task.chunkZ;
|
|
|
|
+ final int radius = task.radius;
|
|
|
|
+
|
|
|
|
+ final int minX = centerX - radius;
|
|
|
|
+ final int maxX = centerX + radius;
|
|
|
|
+
|
|
|
|
+ final int minZ = centerZ - radius;
|
|
|
|
+ final int maxZ = centerZ + radius;
|
|
|
|
+
|
|
|
|
+ ReferenceOpenHashSet<DependencyNode> parents = null;
|
|
|
|
+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
|
|
|
|
+ for (int currX = minX; currX <= maxX; ++currX) {
|
|
|
|
+ final DependencyNode dependency = this.nodeByPosition.put(CoordinateUtils.getChunkKey(currX, currZ), node);
|
|
|
|
+ if (dependency != null) {
|
|
|
|
+ if (parents == null) {
|
|
|
|
+ parents = new ReferenceOpenHashSet<>();
|
|
|
|
+ }
|
|
|
|
+ if (parents.add(dependency)) {
|
|
|
|
+ // added a dependency, so we need to add as a child to the dependency
|
|
|
|
+ if (dependency.children == null) {
|
|
|
|
+ dependency.children = new ArrayList<>();
|
|
|
|
+ }
|
|
|
|
+ dependency.children.add(node);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (parents == null) {
|
|
|
|
+ // no dependencies, add straight to awaiting
|
|
|
|
+ this.awaiting.add(node);
|
|
|
|
+ } else {
|
|
|
|
+ node.parents = parents;
|
|
|
|
+ // we will be added to awaiting once we have no parents
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // called only when a node is returned after being executed
|
|
|
|
+ private List<PrioritisedExecutor.PrioritisedTask> returnNode(final DependencyNode node) {
|
|
|
|
+ final Task task = node.task;
|
|
|
|
+
|
|
|
|
+ // now that the task is completed, we can push its children to the awaiting queue
|
|
|
|
+ this.pushChildren(node);
|
|
|
|
+
|
|
|
|
+ if (task.isFiniteRadius()) {
|
|
|
|
+ // remove from dependency map
|
|
|
|
+ this.removeNodeFromMap(node);
|
|
|
|
+ } else {
|
|
|
|
+ // mark as no longer executing infinite radius
|
|
|
|
+ if (!this.isInfiniteRadiusScheduled) {
|
|
|
|
+ throw new IllegalStateException();
|
|
|
|
+ }
|
|
|
|
+ this.isInfiniteRadiusScheduled = false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ // decrement executing count, we are done executing this task
|
|
|
|
+ --this.currentlyExecuting;
|
|
|
|
+
|
|
|
|
+ if (this.currentlyExecuting == 0) {
|
|
|
|
+ return this.scheduler.treeFinished();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return this.scheduler.canQueueTasks() ? this.tryPushTasks() : null;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private List<PrioritisedExecutor.PrioritisedTask> tryPushTasks() {
|
|
|
|
+ // tasks are not queued, but only created here - we do hold the lock for the map
|
|
|
|
+ List<PrioritisedExecutor.PrioritisedTask> ret = null;
|
|
|
|
+ PrioritisedExecutor.PrioritisedTask pushedTask;
|
|
|
|
+ while ((pushedTask = this.tryPushTask()) != null) {
|
|
|
|
+ if (ret == null) {
|
|
|
|
+ ret = new ArrayList<>();
|
|
|
|
+ }
|
|
|
|
+ ret.add(pushedTask);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void removeNodeFromMap(final DependencyNode node) {
|
|
|
|
+ final Task task = node.task;
|
|
|
|
+
|
|
|
|
+ final int centerX = task.chunkX;
|
|
|
|
+ final int centerZ = task.chunkZ;
|
|
|
|
+ final int radius = task.radius;
|
|
|
|
+
|
|
|
|
+ final int minX = centerX - radius;
|
|
|
|
+ final int maxX = centerX + radius;
|
|
|
|
+
|
|
|
|
+ final int minZ = centerZ - radius;
|
|
|
|
+ final int maxZ = centerZ + radius;
|
|
|
|
+
|
|
|
|
+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
|
|
|
|
+ for (int currX = minX; currX <= maxX; ++currX) {
|
|
|
|
+ this.nodeByPosition.remove(CoordinateUtils.getChunkKey(currX, currZ), node);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void pushChildren(final DependencyNode node) {
|
|
|
|
+ // add all the children that we can into awaiting
|
|
|
|
+ final List<DependencyNode> children = node.children;
|
|
|
|
+ if (children != null) {
|
|
|
|
+ for (int i = 0, len = children.size(); i < len; ++i) {
|
|
|
|
+ final DependencyNode child = children.get(i);
|
|
|
|
+ if (!child.parents.remove(node)) {
|
|
|
|
+ throw new IllegalStateException();
|
|
|
|
+ }
|
|
|
|
+ if (child.parents.isEmpty()) {
|
|
|
|
+ // no more dependents, we can push to awaiting
|
|
|
|
+ child.parents = null;
|
|
|
|
+ // even if the child is purged, we need to push it so that its children will be pushed
|
|
|
|
+ this.awaiting.add(child);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private DependencyNode pollAwaiting() {
|
|
|
|
+ final DependencyNode ret = this.awaiting.poll();
|
|
|
|
+ if (ret == null) {
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ret.parents != null) {
|
|
|
|
+ throw new IllegalStateException();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (ret.purged) {
|
|
|
|
+ // need to manually remove from state here
|
|
|
|
+ this.pushChildren(ret);
|
|
|
|
+ this.removeNodeFromMap(ret);
|
|
|
|
+ } // else: delay children push until the task has finished
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private DependencyNode pollInfinite() {
|
|
|
|
+ return this.infiniteRadius.poll();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public PrioritisedExecutor.PrioritisedTask tryPushTask() {
|
|
|
|
+ if (this.currentlyExecuting >= this.maxToSchedule || this.isInfiniteRadiusScheduled) {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ DependencyNode firstInfinite;
|
|
|
|
+ while ((firstInfinite = this.infiniteRadius.peek()) != null && firstInfinite.purged) {
|
|
|
|
+ this.pollInfinite();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ DependencyNode firstAwaiting;
|
|
|
|
+ while ((firstAwaiting = this.awaiting.peek()) != null && firstAwaiting.purged) {
|
|
|
|
+ this.pollAwaiting();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (firstInfinite == null && firstAwaiting == null) {
|
|
|
|
+ return null;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ++this.currentlyExecuting;
|
|
|
|
+
|
|
|
|
+ // firstAwaiting compared to firstInfinite
|
|
|
|
+ final int compare;
|
|
|
|
+
|
|
|
|
+ if (firstAwaiting == null) {
|
|
|
|
+ // we choose first infinite, or infinite < awaiting
|
|
|
|
+ compare = 1;
|
|
|
|
+ } else if (firstInfinite == null) {
|
|
|
|
+ // we choose first awaiting, or awaiting < infinite
|
|
|
|
+ compare = -1;
|
|
|
|
+ } else {
|
|
|
|
+ compare = DEPENDENCY_NODE_COMPARATOR.compare(firstAwaiting, firstInfinite);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (compare >= 0) {
|
|
|
|
+ this.pollInfinite();
|
|
|
|
+ this.isInfiniteRadiusScheduled = true;
|
|
|
|
+ return firstInfinite.task.pushTask(this.executor);
|
|
|
|
+ } else {
|
|
|
|
+ this.pollAwaiting();
|
|
|
|
+ return firstAwaiting.task.pushTask(this.executor);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final class DependencyNode {
|
|
|
|
+
|
|
|
|
+ private final Task task;
|
|
|
|
+ private final DependencyTree tree;
|
|
|
|
+
|
|
|
|
+ // dependency tree fields
|
|
|
|
+ // (must hold lock on the scheduler to use)
|
|
|
|
+ // null is the same as empty, we just use it so that we don't allocate the set unless we need to
|
|
|
|
+ private List<DependencyNode> children;
|
|
|
|
+ // null is the same as empty, indicating that this task is considered "awaiting"
|
|
|
|
+ private ReferenceOpenHashSet<DependencyNode> parents;
|
|
|
|
+ // false -> scheduled and not cancelled
|
|
|
|
+ // true -> scheduled but cancelled
|
|
|
|
+ private boolean purged;
|
|
|
|
+ private final long id;
|
|
|
|
+
|
|
|
|
+ public DependencyNode(final Task task, final DependencyTree tree) {
|
|
|
|
+ this.task = task;
|
|
|
|
+ this.id = tree.nextId();
|
|
|
|
+ this.tree = tree;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static final class Task implements PrioritisedExecutor.PrioritisedTask, Runnable {
|
|
|
|
+
|
|
|
|
+ // task specific fields
|
|
|
|
+ private final RadiusAwarePrioritisedExecutor scheduler;
|
|
|
|
+ private final int chunkX;
|
|
|
|
+ private final int chunkZ;
|
|
|
|
+ private final int radius;
|
|
|
|
+ private Runnable run;
|
|
|
|
+ private PrioritisedExecutor.Priority priority;
|
|
|
|
+
|
|
|
|
+ private DependencyNode dependencyNode;
|
|
|
|
+ private PrioritisedExecutor.PrioritisedTask queuedTask;
|
|
|
|
+
|
|
|
|
+ private Task(final RadiusAwarePrioritisedExecutor scheduler, final int chunkX, final int chunkZ, final int radius,
|
|
|
|
+ final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
|
|
+ this.scheduler = scheduler;
|
|
|
|
+ this.chunkX = chunkX;
|
|
|
|
+ this.chunkZ = chunkZ;
|
|
|
|
+ this.radius = radius;
|
|
|
|
+ this.run = run;
|
|
|
|
+ this.priority = priority;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private boolean isFiniteRadius() {
|
|
|
|
+ return this.radius > 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private PrioritisedExecutor.PrioritisedTask pushTask(final PrioritisedExecutor executor) {
|
|
|
|
+ return this.queuedTask = executor.createTask(this, this.priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void executeTask() {
|
|
|
|
+ final Runnable run = this.run;
|
|
|
|
+ this.run = null;
|
|
|
|
+ run.run();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private static void scheduleTasks(final List<PrioritisedExecutor.PrioritisedTask> toSchedule) {
|
|
|
|
+ if (toSchedule != null) {
|
|
|
|
+ for (int i = 0, len = toSchedule.size(); i < len; ++i) {
|
|
|
|
+ toSchedule.get(i).queue();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private void returnNode() {
|
|
|
|
+ final List<PrioritisedExecutor.PrioritisedTask> toSchedule;
|
|
|
|
+ synchronized (this.scheduler) {
|
|
|
|
+ final DependencyNode node = this.dependencyNode;
|
|
|
|
+ this.dependencyNode = null;
|
|
|
|
+ toSchedule = node.tree.returnNode(node);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ scheduleTasks(toSchedule);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @Override
|
|
|
|
+ public void run() {
|
|
|
|
+ final Runnable run = this.run;
|
|
|
|
+ this.run = null;
|
|
|
|
+ try {
|
|
|
|
+ run.run();
|
|
|
|
+ } finally {
|
|
|
|
+ this.returnNode();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @Override
|
|
|
|
+ public boolean queue() {
|
|
|
|
+ final List<PrioritisedExecutor.PrioritisedTask> toSchedule;
|
|
|
|
+ synchronized (this.scheduler) {
|
|
|
|
+ if (this.queuedTask != null || this.dependencyNode != null || this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ toSchedule = this.scheduler.queue(this, this.priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ scheduleTasks(toSchedule);
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @Override
|
|
|
|
+ public boolean cancel() {
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask task;
|
|
|
|
+ synchronized (this.scheduler) {
|
|
|
|
+ if ((task = this.queuedTask) == null) {
|
|
|
|
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ this.priority = PrioritisedExecutor.Priority.COMPLETING;
|
|
|
|
+ if (this.dependencyNode != null) {
|
|
|
|
+ this.dependencyNode.purged = true;
|
|
|
|
+ this.dependencyNode = null;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (task.cancel()) {
|
|
|
|
+ // must manually return the node
|
|
|
|
+ this.run = null;
|
|
|
|
+ this.returnNode();
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @Override
|
|
|
|
+ public boolean execute() {
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask task;
|
|
|
|
+ synchronized (this.scheduler) {
|
|
|
|
+ if ((task = this.queuedTask) == null) {
|
|
|
|
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ this.priority = PrioritisedExecutor.Priority.COMPLETING;
|
|
|
|
+ if (this.dependencyNode != null) {
|
|
|
|
+ this.dependencyNode.purged = true;
|
|
|
|
+ this.dependencyNode = null;
|
|
|
|
+ }
|
|
|
|
+ // fall through to execution logic
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (task != null) {
|
|
|
|
+ // will run the return node logic automatically
|
|
|
|
+ return task.execute();
|
|
|
|
+ } else {
|
|
|
|
+ // don't run node removal/insertion logic, we aren't actually removed from the dependency tree
|
|
|
|
+ this.executeTask();
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @Override
|
|
|
|
+ public PrioritisedExecutor.Priority getPriority() {
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask task;
|
|
|
|
+ synchronized (this.scheduler) {
|
|
|
|
+ if ((task = this.queuedTask) == null) {
|
|
|
|
+ return this.priority;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return task.getPriority();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @Override
|
|
|
|
+ public boolean setPriority(final PrioritisedExecutor.Priority priority) {
|
|
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask task;
|
|
|
|
+ List<PrioritisedExecutor.PrioritisedTask> toSchedule = null;
|
|
|
|
+ synchronized (this.scheduler) {
|
|
|
|
+ if ((task = this.queuedTask) == null) {
|
|
|
|
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (this.priority == priority) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ this.priority = priority;
|
|
|
|
+ if (this.dependencyNode != null) {
|
|
|
|
+ // need to re-insert node
|
|
|
|
+ this.dependencyNode.purged = true;
|
|
|
|
+ this.dependencyNode = null;
|
|
|
|
+ toSchedule = this.scheduler.queue(this, priority);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (task != null) {
|
|
|
|
+ return task.setPriority(priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ scheduleTasks(toSchedule);
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @Override
|
|
|
|
+ public boolean raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask task;
|
|
|
|
+ List<PrioritisedExecutor.PrioritisedTask> toSchedule = null;
|
|
|
|
+ synchronized (this.scheduler) {
|
|
|
|
+ if ((task = this.queuedTask) == null) {
|
|
|
|
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (this.priority.isHigherOrEqualPriority(priority)) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ this.priority = priority;
|
|
|
|
+ if (this.dependencyNode != null) {
|
|
|
|
+ // need to re-insert node
|
|
|
|
+ this.dependencyNode.purged = true;
|
|
|
|
+ this.dependencyNode = null;
|
|
|
|
+ toSchedule = this.scheduler.queue(this, priority);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (task != null) {
|
|
|
|
+ return task.raisePriority(priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ scheduleTasks(toSchedule);
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ @Override
|
|
|
|
+ public boolean lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ final PrioritisedExecutor.PrioritisedTask task;
|
|
|
|
+ List<PrioritisedExecutor.PrioritisedTask> toSchedule = null;
|
|
|
|
+ synchronized (this.scheduler) {
|
|
|
|
+ if ((task = this.queuedTask) == null) {
|
|
|
|
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (this.priority.isLowerOrEqualPriority(priority)) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ this.priority = priority;
|
|
|
|
+ if (this.dependencyNode != null) {
|
|
|
|
+ // need to re-insert node
|
|
|
|
+ this.dependencyNode.purged = true;
|
|
|
|
+ this.dependencyNode = null;
|
|
|
|
+ toSchedule = this.scheduler.queue(this, priority);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (task != null) {
|
|
|
|
+ return task.lowerPriority(priority);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ scheduleTasks(toSchedule);
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
diff --git a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
|
|
|
|
index eef501b0558680e5563b0a15a93bd3ab217b91d8..4e06ba4165bc385fc360e771db91d75dbb73bea7 100644
|
|
|
|
--- a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
|
|
|
|
+++ b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
|
|
|
|
@@ -95,7 +95,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
|
|
|
|
++totalChunks;
|
|
|
|
}
|
|
|
|
|
|
|
|
- this.chunkMap.level.chunkTaskScheduler.lightExecutor.queueRunnable(() -> { // Paper - rewrite chunk system
|
|
|
|
+ this.chunkMap.level.chunkTaskScheduler.radiusAwareScheduler.queueInfiniteRadiusTask(() -> { // Paper - rewrite chunk system
|
|
|
|
this.theLightEngine.relightChunks(chunks, (ChunkPos chunkPos) -> {
|
|
|
|
chunkLightCallback.accept(chunkPos);
|
|
|
|
Runnable run = () -> { // Folia - region threading
|