diff --git a/.gitignore b/.gitignore
index 615b683..ed6498d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -46,7 +46,7 @@ out/
# other stuff
run/
-forktest-server
-forktest-api
+folia-server
+folia-api
!gradle/wrapper/gradle-wrapper.jar
diff --git a/build.gradle.kts b/build.gradle.kts
index ba3ffd2..7dee066 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -50,7 +50,7 @@ subprojects {
}
paperweight {
- serverProject.set(project(":forktest-server"))
+ serverProject.set(project(":Folia-Server"))
remapRepo.set(paperMavenPublicUrl)
decompileRepo.set(paperMavenPublicUrl)
@@ -58,10 +58,10 @@ paperweight {
usePaperUpstream(providers.gradleProperty("paperRef")) {
withPaperPatcher {
apiPatchDir.set(layout.projectDirectory.dir("patches/api"))
- apiOutputDir.set(layout.projectDirectory.dir("forktest-api"))
+ apiOutputDir.set(layout.projectDirectory.dir("Folia-API"))
serverPatchDir.set(layout.projectDirectory.dir("patches/server"))
- serverOutputDir.set(layout.projectDirectory.dir("forktest-server"))
+ serverOutputDir.set(layout.projectDirectory.dir("Folia-Server"))
}
}
}
@@ -71,20 +71,20 @@ paperweight {
//
tasks.generateDevelopmentBundle {
- apiCoordinates.set("com.example.paperfork:forktest-api")
+ apiCoordinates.set("dev.folia:folia-api")
mojangApiCoordinates.set("io.papermc.paper:paper-mojangapi")
libraryRepositories.set(
listOf(
"https://repo.maven.apache.org/maven2/",
paperMavenPublicUrl,
- // "https://my.repo/", // This should be a repo hosting your API (in this example, 'com.example.paperfork:forktest-api')
+ // "https://my.repo/", // This should be a repo hosting your API (in this example, 'dev.folia:folia-api')
)
)
}
allprojects {
// Publishing API:
- // ./gradlew :ForkTest-API:publish[ToMavenLocal]
+ // ./gradlew :folia-API:publish[ToMavenLocal]
publishing {
repositories {
maven {
diff --git a/gradle.properties b/gradle.properties
index 829d5ad..ed67948 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -1,8 +1,8 @@
-group=com.example.paperfork
+group=dev.folia
version=1.19.3-R0.1-SNAPSHOT
mcVersion=1.19.3
-paperRef=adb8e499dbc6050abf4a690d369cf506bc3ac318
+paperRef=4da844f1e3e375a24a0e518b0787ae909fa0e247
org.gradle.caching=true
org.gradle.parallel=true
diff --git a/patches/server/0001-Build-changes.patch b/patches/server/0001-Build-changes.patch
index 18163fc..ad4a197 100644
--- a/patches/server/0001-Build-changes.patch
+++ b/patches/server/0001-Build-changes.patch
@@ -1,11 +1,11 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: MiniDigger
-Date: Sat, 12 Jun 2021 16:40:34 +0200
+From: Spottedleaf
+Date: Thu, 23 Feb 2023 07:56:29 -0800
Subject: [PATCH] Build changes
diff --git a/build.gradle.kts b/build.gradle.kts
-index d5d49bb2b47c889e12d17dc87b8c439a60b3fe67..497db79710a93e18c245ba8ac5853dd5ac6012b5 100644
+index 781609605d25283009e5f3e61649ecde9ea9a4cb..0a389871b8bd66252455773a6d735576a1bfcd77 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -7,8 +7,12 @@ plugins {
@@ -14,12 +14,12 @@ index d5d49bb2b47c889e12d17dc87b8c439a60b3fe67..497db79710a93e18c245ba8ac5853dd5
dependencies {
- implementation(project(":paper-api"))
- implementation(project(":paper-mojangapi"))
-+ // ForkTest start
-+ implementation(project(":forktest-api"))
++ // Folia start
++ implementation(project(":Folia-API"))
+ implementation("io.papermc.paper:paper-mojangapi:1.19.3-R0.1-SNAPSHOT") {
+ exclude("io.papermc.paper", "paper-api")
+ }
-+ // ForkTest end
++ // Folia end
// Paper start
implementation("org.jline:jline-terminal-jansi:3.21.0")
implementation("net.minecrell:terminalconsoleappender:1.3.0")
@@ -28,7 +28,7 @@ index d5d49bb2b47c889e12d17dc87b8c439a60b3fe67..497db79710a93e18c245ba8ac5853dd5
"Main-Class" to "org.bukkit.craftbukkit.Main",
"Implementation-Title" to "CraftBukkit",
- "Implementation-Version" to "git-Paper-$implementationVersion",
-+ "Implementation-Version" to "git-ForkTest-$implementationVersion", // ForkTest
++ "Implementation-Version" to "git-Folia-$implementationVersion", // Folia
"Implementation-Vendor" to date, // Paper
"Specification-Title" to "Bukkit",
"Specification-Version" to project.version,
@@ -37,12 +37,12 @@ index d5d49bb2b47c889e12d17dc87b8c439a60b3fe67..497db79710a93e18c245ba8ac5853dd5
block: JavaExec.() -> Unit
): TaskProvider = register(name) {
- group = "paper"
-+ group = "paperweight" // ForkTest
++ group = "paperweight" // Folia
mainClass.set("org.bukkit.craftbukkit.Main")
standardInput = System.`in`
workingDir = rootProject.layout.projectDirectory
diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java
-index 710ca7d3a5659953f64bc6dccdd93b43300961cc..57e0aa0341b359442e562ef4e213b1c785841788 100644
+index 710ca7d3a5659953f64bc6dccdd93b43300961cc..2ee4e5e8d17a3a1e6a342c74b13135df030ffef6 100644
--- a/src/main/java/net/minecraft/server/MinecraftServer.java
+++ b/src/main/java/net/minecraft/server/MinecraftServer.java
@@ -1654,7 +1654,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop // Spigot - Spigot > // CraftBukkit - cb > vanilla!
-+ return "ForkTest"; // ForkTest - ForkTest > // Paper - Paper > // Spigot - Spigot > // CraftBukkit - cb > vanilla!
++ return "Folia"; // Folia - Folia > // Paper - Paper > // Spigot - Spigot > // CraftBukkit - cb > vanilla!
}
public SystemReport fillSystemReport(SystemReport details) {
diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java
-index 47df6f4268a63118da8187f4102c876bd37d1680..24a3c5228fe22683bc87c0c6251a9e49b9426ad7 100644
+index bfc4ee36befb925ab4eb6b96f5c1aa6c76bf711f..2ea3778ee1348e5d06b15a2c5dc5d9bd4136dbe3 100644
--- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java
+++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java
@@ -261,7 +261,7 @@ import javax.annotation.Nullable; // Paper
@@ -63,12 +63,12 @@ index 47df6f4268a63118da8187f4102c876bd37d1680..24a3c5228fe22683bc87c0c6251a9e49
public final class CraftServer implements Server {
- private final String serverName = "Paper"; // Paper
-+ private final String serverName = "ForkTest"; // ForkTest // Paper
++ private final String serverName = "Folia"; // Folia // Paper
private final String serverVersion;
private final String bukkitVersion = Versioning.getBukkitVersion();
private final Logger logger = Logger.getLogger("Minecraft");
diff --git a/src/main/java/org/bukkit/craftbukkit/util/Versioning.java b/src/main/java/org/bukkit/craftbukkit/util/Versioning.java
-index 774556a62eb240da42e84db4502e2ed43495be17..21f39bd0c33ef2635249298e6a247afba8b05742 100644
+index 774556a62eb240da42e84db4502e2ed43495be17..e9b6ca3aa25e140467ae866d572483050ea3fa0e 100644
--- a/src/main/java/org/bukkit/craftbukkit/util/Versioning.java
+++ b/src/main/java/org/bukkit/craftbukkit/util/Versioning.java
@@ -11,7 +11,7 @@ public final class Versioning {
@@ -76,7 +76,7 @@ index 774556a62eb240da42e84db4502e2ed43495be17..21f39bd0c33ef2635249298e6a247afb
String result = "Unknown-Version";
- InputStream stream = Bukkit.class.getClassLoader().getResourceAsStream("META-INF/maven/io.papermc.paper/paper-api/pom.properties");
-+ InputStream stream = Bukkit.class.getClassLoader().getResourceAsStream("META-INF/maven/com.example.paperfork/forktest-api/pom.properties"); // ForkTest
++ InputStream stream = Bukkit.class.getClassLoader().getResourceAsStream("META-INF/maven/dev.folia/folia-api/pom.properties"); // Folia
Properties properties = new Properties();
if (stream != null) {
diff --git a/patches/server/0002-New-player-chunk-loader-system.patch b/patches/server/0002-New-player-chunk-loader-system.patch
new file mode 100644
index 0000000..ea73192
--- /dev/null
+++ b/patches/server/0002-New-player-chunk-loader-system.patch
@@ -0,0 +1,2281 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Spottedleaf
+Date: Wed, 1 Feb 2023 21:06:31 -0800
+Subject: [PATCH] New player chunk loader system
+
+
+diff --git a/src/main/java/co/aikar/timings/TimingsExport.java b/src/main/java/co/aikar/timings/TimingsExport.java
+index 06bff37e4c1fddd3be6343049a66787c63fb420c..1be1fe766401221b6adb417175312007d29d347e 100644
+--- a/src/main/java/co/aikar/timings/TimingsExport.java
++++ b/src/main/java/co/aikar/timings/TimingsExport.java
+@@ -163,9 +163,9 @@ public class TimingsExport extends Thread {
+ return pair(rule, world.getWorld().getGameRuleValue(rule));
+ })),
+ // Paper start - replace chunk loader system
+- pair("ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()),
+- pair("no-ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()),
+- pair("sending-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance())
++ pair("ticking-distance", world.getWorld().getSimulationDistance()),
++ pair("no-ticking-distance", world.getWorld().getViewDistance()),
++ pair("sending-distance", world.getWorld().getSendViewDistance())
+ // Paper end - replace chunk loader system
+ ));
+ }));
+diff --git a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
+index 0e45a340ae534caf676b7f9d0adcbcee5829925e..6df1948b1204a7288ecb7238b6fc2a733f7d25b3 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
++++ b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
+@@ -129,15 +129,15 @@ public final class ChunkSystem {
+ }
+
+ public static int getSendViewDistance(final ServerPlayer player) {
+- return io.papermc.paper.chunk.PlayerChunkLoader.getSendViewDistance(player);
++ return io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.getAPISendViewDistance(player);
+ }
+
+ public static int getLoadViewDistance(final ServerPlayer player) {
+- return io.papermc.paper.chunk.PlayerChunkLoader.getLoadViewDistance(player);
++ return io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.getLoadViewDistance(player);
+ }
+
+ public static int getTickViewDistance(final ServerPlayer player) {
+- return io.papermc.paper.chunk.PlayerChunkLoader.getTickViewDistance(player);
++ return io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.getAPITickViewDistance(player);
+ }
+
+ private ChunkSystem() {
+diff --git a/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..a4d58352eebed11fafde8c381afe3572893b8f8f
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java
+@@ -0,0 +1,1302 @@
++package io.papermc.paper.chunk.system;
++
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import io.papermc.paper.chunk.system.scheduling.ChunkHolderManager;
++import io.papermc.paper.configuration.GlobalConfiguration;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.IntegerUtil;
++import io.papermc.paper.util.IntervalledCounter;
++import io.papermc.paper.util.TickThread;
++import it.unimi.dsi.fastutil.longs.Long2ByteOpenHashMap;
++import it.unimi.dsi.fastutil.longs.LongArrayFIFOQueue;
++import it.unimi.dsi.fastutil.longs.LongArrayList;
++import it.unimi.dsi.fastutil.longs.LongComparator;
++import it.unimi.dsi.fastutil.longs.LongHeapPriorityQueue;
++import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
++import net.minecraft.network.protocol.Packet;
++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheCenterPacket;
++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheRadiusPacket;
++import net.minecraft.network.protocol.game.ClientboundSetSimulationDistancePacket;
++import net.minecraft.server.level.ChunkMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.ServerPlayer;
++import net.minecraft.server.level.TicketType;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.GameRules;
++import net.minecraft.world.level.chunk.ChunkAccess;
++import net.minecraft.world.level.chunk.ChunkStatus;
++import net.minecraft.world.level.chunk.LevelChunk;
++import net.minecraft.world.level.levelgen.BelowZeroRetrogen;
++import org.apache.commons.lang3.mutable.MutableObject;
++import org.bukkit.craftbukkit.entity.CraftPlayer;
++import org.bukkit.entity.Player;
++
++import java.util.ArrayDeque;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.atomic.AtomicLong;
++
++public class RegionisedPlayerChunkLoader {
++
++ public static final TicketType REGION_PLAYER_TICKET = TicketType.create("region_player_ticket", Long::compareTo);
++
++ public static final int MIN_VIEW_DISTANCE = 2;
++ public static final int MAX_VIEW_DISTANCE = 32;
++
++ public static final int TICK_TICKET_LEVEL = 31;
++ public static final int GENERATED_TICKET_LEVEL = 33 + ChunkStatus.getDistance(ChunkStatus.FULL);
++ public static final int LOADED_TICKET_LEVEL = 33 + ChunkStatus.getDistance(ChunkStatus.EMPTY);
++
++ public static final record ViewDistances(
++ int tickViewDistance,
++ int loadViewDistance,
++ int sendViewDistance
++ ) {
++ public ViewDistances setTickViewDistance(final int distance) {
++ return new ViewDistances(distance, this.loadViewDistance, this.sendViewDistance);
++ }
++
++ public ViewDistances setLoadViewDistance(final int distance) {
++ return new ViewDistances(this.tickViewDistance, distance, this.sendViewDistance);
++ }
++
++
++ public ViewDistances setSendViewDistance(final int distance) {
++ return new ViewDistances(this.tickViewDistance, this.loadViewDistance, distance);
++ }
++ }
++
++ public static int getAPITickViewDistance(final Player player) {
++ return getAPITickViewDistance(((CraftPlayer)player).getHandle());
++ }
++
++ public static int getAPITickViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerChunkLoaderData data = player.chunkLoader;
++ if (data == null) {
++ return level.playerChunkLoader.getAPITickDistance();
++ }
++ return data.lastTickDistance;
++ }
++
++ public static int getAPIViewDistance(final Player player) {
++ return getAPIViewDistance(((CraftPlayer)player).getHandle());
++ }
++
++ public static int getAPIViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerChunkLoaderData data = player.chunkLoader;
++ if (data == null) {
++ return level.playerChunkLoader.getAPIViewDistance();
++ }
++ // view distance = load distance + 1
++ return data.lastLoadDistance - 1;
++ }
++
++ public static int getLoadViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerChunkLoaderData data = player.chunkLoader;
++ if (data == null) {
++ return level.playerChunkLoader.getAPIViewDistance();
++ }
++ // view distance = load distance + 1
++ return data.lastLoadDistance - 1;
++ }
++
++ public static int getAPISendViewDistance(final Player player) {
++ return getAPISendViewDistance(((CraftPlayer)player).getHandle());
++ }
++
++ public static int getAPISendViewDistance(final ServerPlayer player) {
++ final ServerLevel level = (ServerLevel)player.level;
++ final PlayerChunkLoaderData data = player.chunkLoader;
++ if (data == null) {
++ return level.playerChunkLoader.getAPISendViewDistance();
++ }
++ return data.lastSendDistance;
++ }
++
++ private final ServerLevel world;
++
++ public RegionisedPlayerChunkLoader(final ServerLevel world) {
++ this.world = world;
++ }
++
++ public void addPlayer(final ServerPlayer player) {
++ TickThread.ensureTickThread(player, "Cannot add player to player chunk loader async");
++ if (!player.isRealPlayer) {
++ return;
++ }
++
++ if (player.chunkLoader != null) {
++ throw new IllegalStateException("Player is already added to player chunk loader");
++ }
++
++ final PlayerChunkLoaderData loader = new PlayerChunkLoaderData(this.world, player);
++
++ player.chunkLoader = loader;
++ loader.add();
++ }
++
++ public void updatePlayer(final ServerPlayer player) {
++ final PlayerChunkLoaderData loader = player.chunkLoader;
++ if (loader != null) {
++ loader.update();
++ }
++ }
++
++ public void removePlayer(final ServerPlayer player) {
++ TickThread.ensureTickThread(player, "Cannot remove player from player chunk loader async");
++ if (!player.isRealPlayer) {
++ return;
++ }
++
++ final PlayerChunkLoaderData loader = player.chunkLoader;
++
++ if (loader == null) {
++ throw new IllegalStateException("Player is already removed from player chunk loader");
++ }
++
++ loader.remove();
++ player.chunkLoader = null;
++ }
++
++ public void setSendDistance(final int distance) {
++ this.world.setSendViewDistance(distance);
++ }
++
++ public void setLoadDistance(final int distance) {
++ this.world.setLoadViewDistance(distance);
++ }
++
++ public void setTickDistance(final int distance) {
++ this.world.setTickViewDistance(distance);
++ }
++
++ // Note: follow the player chunk loader so everything stays consistent...
++ public int getAPITickDistance() {
++ final ViewDistances distances = this.world.getViewDistances();
++ final int tickViewDistance = PlayerChunkLoaderData.getTickDistance(-1, distances.tickViewDistance);
++ return tickViewDistance;
++ }
++
++ public int getAPIViewDistance() {
++ final ViewDistances distances = this.world.getViewDistances();
++ final int tickViewDistance = PlayerChunkLoaderData.getTickDistance(-1, distances.tickViewDistance);
++ final int loadDistance = PlayerChunkLoaderData.getLoadViewDistance(tickViewDistance, -1, distances.loadViewDistance);
++
++ // loadDistance = api view distance + 1
++ return loadDistance - 1;
++ }
++
++ public int getAPISendViewDistance() {
++ final ViewDistances distances = this.world.getViewDistances();
++ final int tickViewDistance = PlayerChunkLoaderData.getTickDistance(-1, distances.tickViewDistance);
++ final int loadDistance = PlayerChunkLoaderData.getLoadViewDistance(tickViewDistance, -1, distances.loadViewDistance);
++ final int sendViewDistance = PlayerChunkLoaderData.getSendViewDistance(
++ loadDistance, -1, -1, distances.sendViewDistance
++ );
++
++ return sendViewDistance;
++ }
++
++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ, final boolean borderOnly) {
++ return borderOnly ? this.isChunkSentBorderOnly(player, chunkX, chunkZ) : this.isChunkSent(player, chunkX, chunkZ);
++ }
++
++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerChunkLoaderData loader = player.chunkLoader;
++ if (loader == null) {
++ return false;
++ }
++
++ return loader.sentChunks.contains(CoordinateUtils.getChunkKey(chunkX, chunkZ));
++ }
++
++ public boolean isChunkSentBorderOnly(final ServerPlayer player, final int chunkX, final int chunkZ) {
++ final PlayerChunkLoaderData loader = player.chunkLoader;
++ if (loader == null) {
++ return false;
++ }
++
++ for (int dz = -1; dz <= 1; ++dz) {
++ for (int dx = -1; dx <= 1; ++dx) {
++ if (!loader.sentChunks.contains(CoordinateUtils.getChunkKey(dx + chunkX, dz + chunkZ))) {
++ return true;
++ }
++ }
++ }
++
++ return false;
++ }
++
++ public void tick() {
++ TickThread.ensureTickThread("Cannot tick player chunk loader async");
++ for (final ServerPlayer player : this.world.players()) {
++ player.chunkLoader.update();
++ }
++ }
++
++ public void tickMidTick() {
++ final long time = System.nanoTime();
++ for (final ServerPlayer player : this.world.players()) {
++ player.chunkLoader.midTickUpdate(time);
++ }
++ }
++
++ private static long[] generateBFSOrder(final int radius) {
++ final LongArrayList chunks = new LongArrayList();
++ final LongArrayFIFOQueue queue = new LongArrayFIFOQueue();
++ final LongOpenHashSet seen = new LongOpenHashSet();
++
++ seen.add(CoordinateUtils.getChunkKey(0, 0));
++ queue.enqueue(CoordinateUtils.getChunkKey(0, 0));
++ while (!queue.isEmpty()) {
++ final long chunk = queue.dequeueLong();
++ final int chunkX = CoordinateUtils.getChunkX(chunk);
++ final int chunkZ = CoordinateUtils.getChunkZ(chunk);
++
++ // important that the addition to the list is here, rather than when enqueueing neighbours
++ // ensures the order is actually kept
++ chunks.add(chunk);
++
++ // -x
++ final long n1 = CoordinateUtils.getChunkKey(chunkX - 1, chunkZ);
++ // -z
++ final long n2 = CoordinateUtils.getChunkKey(chunkX, chunkZ - 1);
++ // +x
++ final long n3 = CoordinateUtils.getChunkKey(chunkX + 1, chunkZ);
++ // +z
++ final long n4 = CoordinateUtils.getChunkKey(chunkX, chunkZ + 1);
++
++ final long[] list = new long[] {n1, n2, n3, n4};
++
++ for (final long neighbour : list) {
++ final int neighbourX = CoordinateUtils.getChunkX(neighbour);
++ final int neighbourZ = CoordinateUtils.getChunkZ(neighbour);
++ if (Math.max(Math.abs(neighbourX), Math.abs(neighbourZ)) > radius) {
++ // don't enqueue out of range
++ continue;
++ }
++ if (!seen.add(neighbour)) {
++ continue;
++ }
++ queue.enqueue(neighbour);
++ }
++ }
++
++ return chunks.toLongArray();
++ }
++
++ public static final class PlayerChunkLoaderData {
++
++ private static final AtomicLong ID_GENERATOR = new AtomicLong();
++ private final long id = ID_GENERATOR.incrementAndGet();
++ private final Long idBoxed = Long.valueOf(this.id);
++
++ // expected that this list returns for a given radius, the set of chunks ordered
++ // by manhattan distance
++ private static final long[][] SEARCH_RADIUS_ITERATION_LIST = new long[65][];
++ static {
++ for (int i = 0; i < SEARCH_RADIUS_ITERATION_LIST.length; ++i) {
++ // a BFS around -x, -z, +x, +z will give increasing manhatten distance
++ SEARCH_RADIUS_ITERATION_LIST[i] = generateBFSOrder(i);
++ }
++ }
++
++ private final ServerPlayer player;
++ private final ServerLevel world;
++
++ private int lastChunkX = Integer.MIN_VALUE;
++ private int lastChunkZ = Integer.MIN_VALUE;
++
++ private int lastSendDistance = Integer.MIN_VALUE;
++ private int lastLoadDistance = Integer.MIN_VALUE;
++ private int lastTickDistance = Integer.MIN_VALUE;
++
++ private int lastSentChunkCenterX = Integer.MIN_VALUE;
++ private int lastSentChunkCenterZ = Integer.MIN_VALUE;
++
++ private int lastSentChunkRadius = Integer.MIN_VALUE;
++ private int lastSentSimulationDistance = Integer.MIN_VALUE;
++
++ private boolean canGenerateChunks = true;
++
++ private final ArrayDeque> delayedTicketOps = new ArrayDeque<>();
++ private final LongOpenHashSet sentChunks = new LongOpenHashSet();
++
++ private static final byte CHUNK_TICKET_STAGE_NONE = 0;
++ private static final byte CHUNK_TICKET_STAGE_LOADING = 1;
++ private static final byte CHUNK_TICKET_STAGE_LOADED = 2;
++ private static final byte CHUNK_TICKET_STAGE_GENERATING = 3;
++ private static final byte CHUNK_TICKET_STAGE_GENERATED = 4;
++ private static final byte CHUNK_TICKET_STAGE_TICK = 5;
++ private static final int[] TICKET_STAGE_TO_LEVEL = new int[] {
++ ChunkHolderManager.MAX_TICKET_LEVEL + 1,
++ LOADED_TICKET_LEVEL,
++ LOADED_TICKET_LEVEL,
++ GENERATED_TICKET_LEVEL,
++ GENERATED_TICKET_LEVEL,
++ TICK_TICKET_LEVEL
++ };
++ private final Long2ByteOpenHashMap chunkTicketStage = new Long2ByteOpenHashMap();
++ {
++ this.chunkTicketStage.defaultReturnValue(CHUNK_TICKET_STAGE_NONE);
++ }
++
++ // rate limiting
++ private final MultiIntervalledCounter chunkSendCounter = new MultiIntervalledCounter(
++ TimeUnit.MILLISECONDS.toNanos(50L), TimeUnit.MILLISECONDS.toNanos(250L), TimeUnit.SECONDS.toNanos(1L)
++ );
++ private final MultiIntervalledCounter chunkLoadTicketCounter = new MultiIntervalledCounter(
++ TimeUnit.MILLISECONDS.toNanos(50L), TimeUnit.MILLISECONDS.toNanos(250L), TimeUnit.SECONDS.toNanos(1L)
++ );
++ private final MultiIntervalledCounter chunkGenerateTicketCounter = new MultiIntervalledCounter(
++ TimeUnit.MILLISECONDS.toNanos(50L), TimeUnit.MILLISECONDS.toNanos(250L), TimeUnit.SECONDS.toNanos(1L)
++ );
++
++ // queues
++ private final LongComparator CLOSEST_MANHATTAN_DIST = (final long c1, final long c2) -> {
++ final int c1x = CoordinateUtils.getChunkX(c1);
++ final int c1z = CoordinateUtils.getChunkZ(c1);
++
++ final int c2x = CoordinateUtils.getChunkX(c2);
++ final int c2z = CoordinateUtils.getChunkZ(c2);
++
++ final int centerX = PlayerChunkLoaderData.this.lastChunkX;
++ final int centerZ = PlayerChunkLoaderData.this.lastChunkZ;
++
++ return Integer.compare(
++ Math.abs(c1x - centerX) + Math.abs(c1z - centerZ),
++ Math.abs(c2x - centerX) + Math.abs(c2z - centerZ)
++ );
++ };
++ private final LongHeapPriorityQueue sendQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST);
++ private final LongHeapPriorityQueue tickingQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST);
++ private final LongHeapPriorityQueue generatingQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST);
++ private final LongHeapPriorityQueue genQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST);
++ private final LongHeapPriorityQueue loadingQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST);
++ private final LongHeapPriorityQueue loadQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST);
++
++ public PlayerChunkLoaderData(final ServerLevel world, final ServerPlayer player) {
++ this.world = world;
++ this.player = player;
++ }
++
++ private void flushDelayedTicketOps() {
++ if (this.delayedTicketOps.isEmpty()) {
++ return;
++ }
++ this.world.chunkTaskScheduler.chunkHolderManager.pushDelayedTicketUpdates(this.delayedTicketOps);
++ this.delayedTicketOps.clear();
++ this.world.chunkTaskScheduler.chunkHolderManager.tryDrainTicketUpdates();
++ }
++
++ private void pushDelayedTicketOp(final ChunkHolderManager.TicketOperation, ?> op) {
++ this.delayedTicketOps.addLast(op);
++ }
++
++ private void sendChunk(final int chunkX, final int chunkZ) {
++ if (this.sentChunks.add(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ this.world.getChunkSource().chunkMap.updateChunkTracking(this.player,
++ new ChunkPos(chunkX, chunkZ), new MutableObject<>(), false, true); // unloaded, loaded
++ return;
++ }
++ throw new IllegalStateException();
++ }
++
++ private void sendUnloadChunk(final int chunkX, final int chunkZ) {
++ if (!this.sentChunks.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
++ return;
++ }
++ this.sendUnloadChunkRaw(chunkX, chunkZ);
++ }
++
++ private void sendUnloadChunkRaw(final int chunkX, final int chunkZ) {
++ this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player,
++ new ChunkPos(chunkX, chunkZ), null, true, false); // unloaded, loaded
++ }
++
++ private final SingleUserAreaMap broadcastMap = new SingleUserAreaMap<>(this) {
++ @Override
++ protected void addCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) {
++ // do nothing, we only care about remove
++ }
++
++ @Override
++ protected void removeCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) {
++ parameter.sendUnloadChunk(chunkX, chunkZ);
++ }
++ };
++ private final SingleUserAreaMap loadTicketCleanup = new SingleUserAreaMap<>(this) {
++ @Override
++ protected void addCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) {
++ // do nothing, we only care about remove
++ }
++
++ @Override
++ protected void removeCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) {
++ final long chunk = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ final byte ticketStage = parameter.chunkTicketStage.remove(chunk);
++ final int level = TICKET_STAGE_TO_LEVEL[ticketStage];
++ if (level > ChunkHolderManager.MAX_TICKET_LEVEL) {
++ return;
++ }
++
++ parameter.pushDelayedTicketOp(ChunkHolderManager.TicketOperation.addAndRemove(
++ chunk,
++ TicketType.UNKNOWN, level, new ChunkPos(chunkX, chunkZ),
++ REGION_PLAYER_TICKET, level, parameter.idBoxed
++ ));
++ }
++ };
++ private final SingleUserAreaMap tickMap = new SingleUserAreaMap<>(this) {
++ @Override
++ protected void addCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) {
++ // do nothing, we will detect ticking chunks when we try to load them
++ }
++
++ @Override
++ protected void removeCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) {
++ final long chunk = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ // note: by the time this is called, the tick cleanup should have ran - so, if the chunk is at
++ // the tick stage it was deemed in range for loading. Thus, we need to move it to generated
++ if (!parameter.chunkTicketStage.replace(chunk, CHUNK_TICKET_STAGE_TICK, CHUNK_TICKET_STAGE_GENERATED)) {
++ return;
++ }
++
++ // Since we are possibly downgrading the ticket level, we add an unknown ticket so that
++ // the level is kept until tick().
++ parameter.pushDelayedTicketOp(ChunkHolderManager.TicketOperation.addAndRemove(
++ chunk,
++ TicketType.UNKNOWN, TICK_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ),
++ REGION_PLAYER_TICKET, TICK_TICKET_LEVEL, parameter.idBoxed
++ ));
++ // keep chunk at new generated level
++ parameter.pushDelayedTicketOp(ChunkHolderManager.TicketOperation.addOp(
++ chunk,
++ REGION_PLAYER_TICKET, GENERATED_TICKET_LEVEL, parameter.idBoxed
++ ));
++ }
++ };
++
++ private static boolean wantChunkLoaded(final int centerX, final int centerZ, final int chunkX, final int chunkZ,
++ final int sendRadius) {
++ // expect sendRadius to be = 1 + target viewable radius
++ return ChunkMap.isChunkInRange(chunkX, chunkZ, centerX, centerZ, sendRadius);
++ }
++
++ private static int getClientViewDistance(final ServerPlayer player) {
++ final Integer vd = player.clientViewDistance;
++ return vd == null ? -1 : Math.max(0, vd.intValue());
++ }
++
++ private static int getTickDistance(final int playerTickViewDistance, final int worldTickViewDistance) {
++ return playerTickViewDistance < 0 ? worldTickViewDistance : playerTickViewDistance;
++ }
++
++ private static int getLoadViewDistance(final int tickViewDistance, final int playerLoadViewDistance,
++ final int worldLoadViewDistance) {
++ return Math.max(tickViewDistance + 1, playerLoadViewDistance < 0 ? worldLoadViewDistance : playerLoadViewDistance);
++ }
++
++ private static int getSendViewDistance(final int loadViewDistance, final int clientViewDistance,
++ final int playerSendViewDistance, final int worldSendViewDistance) {
++ return Math.min(
++ loadViewDistance,
++ playerSendViewDistance < 0 ? (!GlobalConfiguration.get().chunkLoadingAdvanced.autoConfigSendDistance || clientViewDistance < 0 ? (worldSendViewDistance < 0 ? loadViewDistance : worldSendViewDistance) : clientViewDistance + 1) : playerSendViewDistance
++ );
++ }
++
++ private Packet> updateClientChunkRadius(final int radius) {
++ this.lastSentChunkRadius = radius;
++ return new ClientboundSetChunkCacheRadiusPacket(radius);
++ }
++
++ private Packet> updateClientSimulationDistance(final int distance) {
++ this.lastSentSimulationDistance = distance;
++ return new ClientboundSetSimulationDistancePacket(distance);
++ }
++
++ private Packet> updateClientChunkCenter(final int chunkX, final int chunkZ) {
++ this.lastSentChunkCenterX = chunkX;
++ this.lastSentChunkCenterZ = chunkZ;
++ return new ClientboundSetChunkCacheCenterPacket(chunkX, chunkZ);
++ }
++
++ private boolean canPlayerGenerateChunks() {
++ return !this.player.isSpectator() || this.world.getGameRules().getBoolean(GameRules.RULE_SPECTATORSGENERATECHUNKS);
++ }
++
++ private int getMaxChunkLoads() {
++ final int radiusChunks = (2 * this.lastLoadDistance + 1) * (2 * this.lastLoadDistance + 1);
++ int configLimit = GlobalConfiguration.get().chunkLoadingAdvanced.playerMaxConcurrentChunkLoads;
++ if (configLimit == 0) {
++ // by default, only allow 1/10th of the chunks in the view distance to be concurrently active
++ configLimit = Math.max(5, radiusChunks / 10);
++ } else if (configLimit < 0) {
++ configLimit = Integer.MAX_VALUE;
++ } // else: use the value configured
++ configLimit = configLimit - this.loadingQueue.size();
++
++ int rateLimit;
++ double configRate = GlobalConfiguration.get().chunkLoadingBasic.playerMaxChunkLoadRate;
++ if (configRate < 0.0 || configRate > (1000.0 * (double)radiusChunks)) {
++ // getMaxCountBeforeViolation may not work with large config rates, so by checking against the load count we ensure
++ // there are no issues with the cast to integer
++ rateLimit = Integer.MAX_VALUE;
++ } else {
++ rateLimit = (int)this.chunkLoadTicketCounter.getMaxCountBeforeViolation(configRate);
++ }
++
++ return Math.min(configLimit, rateLimit);
++ }
++
++ private int getMaxChunkGenerates() {
++ final int radiusChunks = (2 * this.lastLoadDistance + 1) * (2 * this.lastLoadDistance + 1);
++ int configLimit = GlobalConfiguration.get().chunkLoadingAdvanced.playerMaxConcurrentChunkGenerates;
++ if (configLimit == 0) {
++ // by default, only allow 1/10th of the chunks in the view distance to be concurrently active
++ configLimit = Math.max(5, radiusChunks / 10);
++ } else if (configLimit < 0) {
++ configLimit = Integer.MAX_VALUE;
++ } // else: use the value configured
++ configLimit = configLimit - this.generatingQueue.size();
++
++ int rateLimit;
++ double configRate = GlobalConfiguration.get().chunkLoadingBasic.playerMaxChunkGenerateRate;
++ if (configRate < 0.0 || configRate > (1000.0 * (double)radiusChunks)) {
++ // getMaxCountBeforeViolation may not work with large config rates, so by checking against the load count we ensure
++ // there are no issues with the cast to integer
++ rateLimit = Integer.MAX_VALUE;
++ } else {
++ rateLimit = (int)this.chunkGenerateTicketCounter.getMaxCountBeforeViolation(configRate);
++ }
++
++ return Math.min(configLimit, rateLimit);
++ }
++
++ private int getMaxChunkSends() {
++ final int radiusChunks = (2 * this.lastSendDistance + 1) * (2 * this.lastSendDistance + 1);
++
++ int rateLimit;
++ double configRate = GlobalConfiguration.get().chunkLoadingBasic.playerMaxChunkSendRate;
++ if (configRate < 0.0 || configRate > (1000.0 * (double)radiusChunks)) {
++ // getMaxCountBeforeViolation may not work with large config rates, so by checking against the load count we ensure
++ // there are no issues with the cast to integer
++ rateLimit = Integer.MAX_VALUE;
++ } else {
++ rateLimit = (int)this.chunkSendCounter.getMaxCountBeforeViolation(configRate);
++ }
++
++ return rateLimit;
++ }
++
++ private boolean wantChunkSent(final int chunkX, final int chunkZ) {
++ final int dx = this.lastChunkX - chunkX;
++ final int dz = this.lastChunkZ - chunkZ;
++ return Math.max(Math.abs(dx), Math.abs(dz)) <= this.lastSendDistance && wantChunkLoaded(
++ this.lastChunkX, this.lastChunkZ, chunkX, chunkZ, this.lastSendDistance
++ );
++ }
++
++ private boolean wantChunkTicked(final int chunkX, final int chunkZ) {
++ final int dx = this.lastChunkX - chunkX;
++ final int dz = this.lastChunkZ - chunkZ;
++ return Math.max(Math.abs(dx), Math.abs(dz)) <= this.lastTickDistance;
++ }
++
++ void midTickUpdate(final long time) {
++ TickThread.ensureTickThread(this.player, "Cannot tick player chunk loader async");
++ // update rate limits
++ this.chunkSendCounter.update(time);
++ this.chunkGenerateTicketCounter.update(time);
++ this.chunkLoadTicketCounter.update(time);
++
++ // try to progress chunk loads
++ while (!this.loadingQueue.isEmpty()) {
++ final long pendingLoadChunk = this.loadingQueue.firstLong();
++ final int pendingChunkX = CoordinateUtils.getChunkX(pendingLoadChunk);
++ final int pendingChunkZ = CoordinateUtils.getChunkZ(pendingLoadChunk);
++ final ChunkAccess pending = this.world.chunkSource.getChunkAtImmediately(pendingChunkX, pendingChunkZ);
++ if (pending == null) {
++ // nothing to do here
++ break;
++ }
++ // chunk has loaded, so we can take it out of the queue
++ this.loadingQueue.dequeueLong();
++
++ // try to move to generate queue
++ final byte prev = this.chunkTicketStage.put(pendingLoadChunk, CHUNK_TICKET_STAGE_LOADED);
++ if (prev != CHUNK_TICKET_STAGE_LOADING) {
++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_LOADING + ", not " + prev);
++ }
++
++ if (this.canGenerateChunks || this.isLoadedChunkGeneratable(pending)) {
++ this.genQueue.enqueue(pendingLoadChunk);
++ } // else: don't want to generate, so just leave it loaded
++ }
++
++ // try to push more chunk loads
++ int loadSlots;
++ while ((loadSlots = Math.min(this.getMaxChunkLoads(), this.loadQueue.size())) > 0) {
++ final LongArrayList chunks = new LongArrayList(loadSlots);
++ int actualLoadsQueued = 0;
++ for (int i = 0; i < loadSlots; ++i) {
++ final long chunk = this.loadQueue.dequeueLong();
++ final byte prev = this.chunkTicketStage.put(chunk, CHUNK_TICKET_STAGE_LOADING);
++ if (prev != CHUNK_TICKET_STAGE_NONE) {
++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_NONE + ", not " + prev);
++ }
++ this.pushDelayedTicketOp(
++ ChunkHolderManager.TicketOperation.addOp(
++ chunk,
++ REGION_PLAYER_TICKET, LOADED_TICKET_LEVEL, this.idBoxed
++ )
++ );
++ chunks.add(chunk);
++ this.loadingQueue.enqueue(chunk);
++
++ if (this.world.chunkSource.getChunkAtImmediately(CoordinateUtils.getChunkX(chunk), CoordinateUtils.getChunkZ(chunk)) == null) {
++ // this is a good enough approximation for counting, but NOT for actual state management
++ ++actualLoadsQueued;
++ }
++ }
++ if (actualLoadsQueued > 0) {
++ this.chunkLoadTicketCounter.addTime(time, (long)actualLoadsQueued);
++ }
++
++ // here we need to flush tickets, as scheduleChunkLoad requires tickets to be propagated with addTicket = false
++ this.flushDelayedTicketOps();
++ // we only need to call scheduleChunkLoad because the loaded ticket level is not enough to start the chunk
++ // load - only generate ticket levels start anything, but they start generation...
++ // propagate levels
++ // Note: this CAN call plugin logic, so it is VITAL that our bookkeeping logic is completely done by the time this is invoked
++ this.world.chunkTaskScheduler.chunkHolderManager.processTicketUpdates();
++
++ for (int i = 0; i < loadSlots; ++i) {
++ final long queuedLoadChunk = chunks.getLong(i);
++ final int queuedChunkX = CoordinateUtils.getChunkX(queuedLoadChunk);
++ final int queuedChunkZ = CoordinateUtils.getChunkZ(queuedLoadChunk);
++ this.world.chunkTaskScheduler.scheduleChunkLoad(
++ queuedChunkX, queuedChunkZ, ChunkStatus.EMPTY, false, PrioritisedExecutor.Priority.NORMAL, null
++ );
++ }
++ }
++
++ // try to progress chunk generations
++ while (!this.generatingQueue.isEmpty()) {
++ final long pendingGenChunk = this.generatingQueue.firstLong();
++ final int pendingChunkX = CoordinateUtils.getChunkX(pendingGenChunk);
++ final int pendingChunkZ = CoordinateUtils.getChunkZ(pendingGenChunk);
++ final LevelChunk pending = this.world.chunkSource.getChunkAtIfLoadedMainThreadNoCache(pendingChunkX, pendingChunkZ);
++ if (pending == null) {
++ // nothing to do here
++ break;
++ }
++
++ // chunk has generated, so we can take it out of queue
++ this.generatingQueue.dequeueLong();
++
++ final byte prev = this.chunkTicketStage.put(pendingGenChunk, CHUNK_TICKET_STAGE_GENERATED);
++ if (prev != CHUNK_TICKET_STAGE_GENERATING) {
++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_GENERATING + ", not " + prev);
++ }
++
++ // try to move to send queue
++ if (this.wantChunkSent(pendingChunkX, pendingChunkZ)) {
++ this.sendQueue.enqueue(pendingGenChunk);
++ }
++ // try to move to tick queue
++ if (this.wantChunkTicked(pendingChunkX, pendingChunkZ)) {
++ this.tickingQueue.enqueue(pendingGenChunk);
++ }
++ }
++
++ // try to push more chunk generations
++ int genSlots;
++ while ((genSlots = Math.min(this.getMaxChunkGenerates(), this.genQueue.size())) > 0) {
++ int actualGenerationsQueued = 0;
++ for (int i = 0; i < genSlots; ++i) {
++ final long chunk = this.genQueue.dequeueLong();
++ final byte prev = this.chunkTicketStage.put(chunk, CHUNK_TICKET_STAGE_GENERATING);
++ if (prev != CHUNK_TICKET_STAGE_LOADED) {
++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_LOADED + ", not " + prev);
++ }
++ this.pushDelayedTicketOp(
++ ChunkHolderManager.TicketOperation.addAndRemove(
++ chunk,
++ REGION_PLAYER_TICKET, GENERATED_TICKET_LEVEL, this.idBoxed,
++ REGION_PLAYER_TICKET, LOADED_TICKET_LEVEL, this.idBoxed
++ )
++ );
++ this.generatingQueue.enqueue(chunk);
++ final ChunkAccess existingChunk = this.world.chunkSource.getChunkAtImmediately(CoordinateUtils.getChunkX(chunk), CoordinateUtils.getChunkZ(chunk));
++ if (existingChunk == null || !existingChunk.getStatus().isOrAfter(ChunkStatus.FULL)) {
++ // this is a good enough approximation for counting, but NOT for actual state management
++ ++actualGenerationsQueued;
++ }
++ }
++ if (actualGenerationsQueued > 0) {
++ this.chunkGenerateTicketCounter.addTime(time, (long)actualGenerationsQueued);
++ }
++ }
++
++ // try to pull ticking chunks
++ tick_check_outer:
++ while (!this.tickingQueue.isEmpty()) {
++ final long pendingTicking = this.tickingQueue.firstLong();
++ final int pendingChunkX = CoordinateUtils.getChunkX(pendingTicking);
++ final int pendingChunkZ = CoordinateUtils.getChunkZ(pendingTicking);
++
++ final int tickingReq = 2;
++ for (int dz = -tickingReq; dz <= tickingReq; ++dz) {
++ for (int dx = -tickingReq; dx <= tickingReq; ++dx) {
++ if ((dx | dz) == 0) {
++ continue;
++ }
++ final long neighbour = CoordinateUtils.getChunkKey(dx + pendingChunkX, dz + pendingChunkZ);
++ final byte stage = this.chunkTicketStage.get(neighbour);
++ if (stage != CHUNK_TICKET_STAGE_GENERATED && stage != CHUNK_TICKET_STAGE_TICK) {
++ break tick_check_outer;
++ }
++ }
++ }
++ // only gets here if all neighbours were marked as generated or ticking themselves
++ this.tickingQueue.dequeueLong();
++ this.pushDelayedTicketOp(
++ ChunkHolderManager.TicketOperation.addAndRemove(
++ pendingTicking,
++ REGION_PLAYER_TICKET, TICK_TICKET_LEVEL, this.idBoxed,
++ REGION_PLAYER_TICKET, GENERATED_TICKET_LEVEL, this.idBoxed
++ )
++ );
++ // there is no queue to add after ticking
++ final byte prev = this.chunkTicketStage.put(pendingTicking, CHUNK_TICKET_STAGE_TICK);
++ if (prev != CHUNK_TICKET_STAGE_GENERATED) {
++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_GENERATED + ", not " + prev);
++ }
++ }
++
++ // try to pull sending chunks
++ final int maxSends = this.getMaxChunkSends();
++ final int sendSlots = Math.min(maxSends, this.sendQueue.size());
++ for (int i = 0; i < sendSlots; ++i) {
++ final long pendingSend = this.sendQueue.firstLong();
++ final int pendingSendX = CoordinateUtils.getChunkX(pendingSend);
++ final int pendingSendZ = CoordinateUtils.getChunkZ(pendingSend);
++ final LevelChunk chunk = this.world.chunkSource.getChunkAtIfLoadedMainThreadNoCache(pendingSendX, pendingSendZ);
++ if (!chunk.areNeighboursLoaded(1)) {
++ // nothing to do
++ break;
++ }
++ this.sendQueue.dequeueLong();
++
++ this.sendChunk(pendingSendX, pendingSendZ);
++ }
++ if (sendSlots > 0) {
++ this.chunkSendCounter.addTime(time, sendSlots);
++ }
++
++ this.flushDelayedTicketOps();
++ // we assume propagate ticket levels happens after this call
++ }
++
++ void add() {
++ final ViewDistances playerDistances = this.player.getViewDistances();
++ final ViewDistances worldDistances = this.world.getViewDistances();
++ final int chunkX = this.player.chunkPosition().x;
++ final int chunkZ = this.player.chunkPosition().z;
++
++ final int tickViewDistance = getTickDistance(playerDistances.tickViewDistance, worldDistances.tickViewDistance);
++ // load view cannot be less-than tick view + 1
++ final int loadViewDistance = getLoadViewDistance(tickViewDistance, playerDistances.loadViewDistance, worldDistances.loadViewDistance);
++ // send view cannot be greater-than load view
++ final int clientViewDistance = getClientViewDistance(this.player);
++ final int sendViewDistance = getSendViewDistance(loadViewDistance, clientViewDistance, playerDistances.sendViewDistance, worldDistances.sendViewDistance);
++
++ // send view distances
++ this.player.connection.send(this.updateClientChunkRadius(sendViewDistance));
++ this.player.connection.send(this.updateClientSimulationDistance(tickViewDistance));
++
++ // add to distance maps
++ this.broadcastMap.add(chunkX, chunkZ, sendViewDistance);
++ this.loadTicketCleanup.add(chunkX, chunkZ, loadViewDistance + 1);
++ this.tickMap.add(chunkX, chunkZ, tickViewDistance);
++
++ // update chunk center
++ this.player.connection.send(this.updateClientChunkCenter(chunkX, chunkZ));
++
++ // now we can update
++ this.update();
++ }
++
++ private boolean isLoadedChunkGeneratable(final int chunkX, final int chunkZ) {
++ return this.isLoadedChunkGeneratable(this.world.chunkSource.getChunkAtImmediately(chunkX, chunkZ));
++ }
++
++ private boolean isLoadedChunkGeneratable(final ChunkAccess chunkAccess) {
++ final BelowZeroRetrogen belowZeroRetrogen;
++ return chunkAccess != null && (
++ chunkAccess.getStatus() == ChunkStatus.FULL ||
++ ((belowZeroRetrogen = chunkAccess.getBelowZeroRetrogen()) != null && belowZeroRetrogen.targetStatus().isOrAfter(ChunkStatus.FULL))
++ );
++ }
++
++ void update() {
++ final ViewDistances playerDistances = this.player.getViewDistances();
++ final ViewDistances worldDistances = this.world.getViewDistances();
++
++ final int tickViewDistance = getTickDistance(playerDistances.tickViewDistance, worldDistances.tickViewDistance);
++ // load view cannot be less-than tick view + 1
++ final int loadViewDistance = getLoadViewDistance(tickViewDistance, playerDistances.loadViewDistance, worldDistances.loadViewDistance);
++ // send view cannot be greater-than load view
++ final int clientViewDistance = getClientViewDistance(this.player);
++ final int sendViewDistance = getSendViewDistance(loadViewDistance, clientViewDistance, playerDistances.sendViewDistance, worldDistances.sendViewDistance);
++
++ final ChunkPos playerPos = this.player.chunkPosition();
++ final boolean canGenerateChunks = this.canPlayerGenerateChunks();
++ final int currentChunkX = playerPos.x;
++ final int currentChunkZ = playerPos.z;
++
++ final int prevChunkX = this.lastChunkX;
++ final int prevChunkZ = this.lastChunkZ;
++
++ if (
++ // has view distance stayed the same?
++ sendViewDistance == this.lastSendDistance
++ && loadViewDistance == this.lastLoadDistance
++ && tickViewDistance == this.lastTickDistance
++
++ // has our chunk stayed the same?
++ && prevChunkX == currentChunkX
++ && prevChunkZ == currentChunkZ
++
++ // can we still generate chunks?
++ && this.canGenerateChunks == canGenerateChunks
++ ) {
++ // nothing we care about changed, so we're not re-calculating
++ return;
++ }
++
++ // update distance maps
++ this.broadcastMap.update(currentChunkX, currentChunkZ, sendViewDistance);
++ this.loadTicketCleanup.update(currentChunkX, currentChunkZ, loadViewDistance + 1);
++ this.tickMap.update(currentChunkX, currentChunkZ, tickViewDistance);
++ if (sendViewDistance > loadViewDistance || tickViewDistance > (loadViewDistance - 1)) {
++ throw new IllegalStateException();
++ }
++
++ // update VDs for client
++ // this should be after the distance map updates, as they will send unload packets
++ if (this.lastSentChunkRadius != sendViewDistance) {
++ this.player.connection.send(this.updateClientChunkRadius(sendViewDistance));
++ }
++ if (this.lastSentSimulationDistance != tickViewDistance) {
++ this.player.connection.send(this.updateClientSimulationDistance(tickViewDistance));
++ }
++
++ this.sendQueue.clear();
++ this.tickingQueue.clear();
++ this.generatingQueue.clear();
++ this.genQueue.clear();
++ this.loadingQueue.clear();
++ this.loadQueue.clear();
++
++ this.lastChunkX = currentChunkX;
++ this.lastChunkZ = currentChunkZ;
++ this.lastSendDistance = sendViewDistance;
++ this.lastLoadDistance = loadViewDistance;
++ this.lastTickDistance = tickViewDistance;
++ this.canGenerateChunks = canGenerateChunks;
++
++ // +1 since we need to load chunks +1 around the load view distance...
++ final long[] toIterate = SEARCH_RADIUS_ITERATION_LIST[loadViewDistance + 1];
++ // the iteration order is by increasing manhattan distance - so, we do NOT need to
++ // sort anything in the queue!
++ for (final long deltaChunk : toIterate) {
++ final int dx = CoordinateUtils.getChunkX(deltaChunk);
++ final int dz = CoordinateUtils.getChunkZ(deltaChunk);
++ final int chunkX = dx + currentChunkX;
++ final int chunkZ = dz + currentChunkZ;
++ final long chunk = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ final int squareDistance = Math.max(Math.abs(dx), Math.abs(dz));
++ final int manhattanDistance = Math.abs(dx) + Math.abs(dz);
++
++ // since chunk sending is not by radius alone, we need an extra check here to account for
++ // everything <= sendDistance
++ // Note: Vanilla may want to send chunks outside the send view distance, so we do need
++ // the dist <= view check
++ final boolean sendChunk = squareDistance <= sendViewDistance
++ && wantChunkLoaded(currentChunkX, currentChunkZ, chunkX, chunkZ, sendViewDistance);
++ final boolean sentChunk = sendChunk ? this.sentChunks.contains(chunk) : this.sentChunks.remove(chunk);
++
++ if (!sendChunk && sentChunk) {
++ // have sent the chunk, but don't want it anymore
++ // unload it now
++ this.sendUnloadChunkRaw(chunkX, chunkZ);
++ }
++
++ final byte stage = this.chunkTicketStage.get(chunk);
++ switch (stage) {
++ case CHUNK_TICKET_STAGE_NONE: {
++ // we want the chunk to be at least loaded
++ this.loadQueue.enqueue(chunk);
++ break;
++ }
++ case CHUNK_TICKET_STAGE_LOADING: {
++ this.loadingQueue.enqueue(chunk);
++ break;
++ }
++ case CHUNK_TICKET_STAGE_LOADED: {
++ if (canGenerateChunks || this.isLoadedChunkGeneratable(chunkX, chunkZ)) {
++ this.genQueue.enqueue(chunk);
++ }
++ break;
++ }
++ case CHUNK_TICKET_STAGE_GENERATING: {
++ this.generatingQueue.enqueue(chunk);
++ break;
++ }
++ case CHUNK_TICKET_STAGE_GENERATED: {
++ if (sendChunk && !sentChunk) {
++ this.sendQueue.enqueue(chunk);
++ }
++ if (squareDistance <= tickViewDistance) {
++ this.tickingQueue.enqueue(chunk);
++ }
++ break;
++ }
++ case CHUNK_TICKET_STAGE_TICK: {
++ if (sendChunk && !sentChunk) {
++ this.sendQueue.enqueue(chunk);
++ }
++ break;
++ }
++ default: {
++ throw new IllegalStateException("Unknown stage: " + stage);
++ }
++ }
++ }
++
++ // update the chunk center
++ // this must be done last so that the client does not ignore any of our unload chunk packets above
++ if (this.lastSentChunkCenterX != currentChunkX || this.lastSentChunkCenterZ != currentChunkZ) {
++ this.player.connection.send(this.updateClientChunkCenter(currentChunkX, currentChunkZ));
++ }
++
++ this.flushDelayedTicketOps();
++ }
++
++ void remove() {
++ // sends the chunk unload packets
++ this.broadcastMap.remove();
++ // cleans up loading/generating tickets
++ this.loadTicketCleanup.remove();
++ // cleans up ticking tickets
++ this.tickMap.remove();
++
++ // purge queues
++ this.sendQueue.clear();
++ this.tickingQueue.clear();
++ this.generatingQueue.clear();
++ this.genQueue.clear();
++ this.loadingQueue.clear();
++ this.loadQueue.clear();
++
++ // flush ticket changes
++ this.flushDelayedTicketOps();
++
++ // now all tickets should be removed, which is all of our external state
++ }
++ }
++
++ private static final class MultiIntervalledCounter {
++
++ private final IntervalledCounter[] counters;
++
++ public MultiIntervalledCounter(final long... intervals) {
++ final IntervalledCounter[] counters = new IntervalledCounter[intervals.length];
++ for (int i = 0; i < intervals.length; ++i) {
++ counters[i] = new IntervalledCounter(intervals[i]);
++ }
++ this.counters = counters;
++ }
++
++ public long getMaxCountBeforeViolation(final double rate) {
++ long count = Long.MAX_VALUE;
++ for (final IntervalledCounter counter : this.counters) {
++ final long sum = counter.getSum();
++ final long interval = counter.getInterval();
++ // rate = sum / interval
++ // so, sum = rate*interval
++ final long maxSum = (long)Math.floor(rate * (1.0E-9 * (double)interval));
++ final long diff = maxSum - sum;
++ if (diff < count) {
++ count = diff;
++ }
++ }
++
++ return Math.max(0L, count);
++ }
++
++ public void update(final long time) {
++ for (final IntervalledCounter counter : this.counters) {
++ counter.updateCurrentTime(time);
++ }
++ }
++
++ public void updateAndAdd(final long count, final long time) {
++ for (final IntervalledCounter counter : this.counters) {
++ counter.updateAndAdd(count, time);
++ }
++ }
++
++ public void addTime(final long time, final long count) {
++ for (final IntervalledCounter counter : this.counters) {
++ counter.addTime(time, count);
++ }
++ }
++
++ public double getMaxRate() {
++ double ret = 0.0;
++
++ for (final IntervalledCounter counter : this.counters) {
++ final double counterRate = counter.getRate();
++ if (counterRate > ret) {
++ ret = counterRate;
++ }
++ }
++
++ return ret;
++ }
++ }
++
++ // TODO rebase into util patch
++ public static abstract class SingleUserAreaMap {
++
++ private static final int NOT_SET = Integer.MIN_VALUE;
++
++ private final T parameter;
++ private int lastChunkX = NOT_SET;
++ private int lastChunkZ = NOT_SET;
++ private int distance = NOT_SET;
++
++ public SingleUserAreaMap(final T parameter) {
++ this.parameter = parameter;
++ }
++
++ /* math sign function except 0 returns 1 */
++ protected static int sign(int val) {
++ return 1 | (val >> (Integer.SIZE - 1));
++ }
++
++ protected abstract void addCallback(final T parameter, final int chunkX, final int chunkZ);
++
++ protected abstract void removeCallback(final T parameter, final int chunkX, final int chunkZ);
++
++ private void addToNew(final T parameter, final int chunkX, final int chunkZ, final int distance) {
++ final int maxX = chunkX + distance;
++ final int maxZ = chunkZ + distance;
++
++ for (int cx = chunkX - distance; cx <= maxX; ++cx) {
++ for (int cz = chunkZ - distance; cz <= maxZ; ++cz) {
++ this.addCallback(parameter, cx, cz);
++ }
++ }
++ }
++
++ private void removeFromOld(final T parameter, final int chunkX, final int chunkZ, final int distance) {
++ final int maxX = chunkX + distance;
++ final int maxZ = chunkZ + distance;
++
++ for (int cx = chunkX - distance; cx <= maxX; ++cx) {
++ for (int cz = chunkZ - distance; cz <= maxZ; ++cz) {
++ this.removeCallback(parameter, cx, cz);
++ }
++ }
++ }
++
++ public final boolean add(final int chunkX, final int chunkZ, final int distance) {
++ if (distance < 0) {
++ throw new IllegalArgumentException(Integer.toString(distance));
++ }
++ if (this.lastChunkX != NOT_SET) {
++ return false;
++ }
++ this.lastChunkX = chunkX;
++ this.lastChunkZ = chunkZ;
++ this.distance = distance;
++
++ this.addToNew(this.parameter, chunkX, chunkZ, distance);
++
++ return true;
++ }
++
++ public final boolean update(final int toX, final int toZ, final int newViewDistance) {
++ if (newViewDistance < 0) {
++ throw new IllegalArgumentException(Integer.toString(newViewDistance));
++ }
++ final int fromX = this.lastChunkX;
++ final int fromZ = this.lastChunkZ;
++ final int oldViewDistance = this.distance;
++ if (fromX == NOT_SET) {
++ return false;
++ }
++
++ this.lastChunkX = toX;
++ this.lastChunkZ = toZ;
++
++ final T parameter = this.parameter;
++
++
++ final int dx = toX - fromX;
++ final int dz = toZ - fromZ;
++
++ final int totalX = IntegerUtil.branchlessAbs(fromX - toX);
++ final int totalZ = IntegerUtil.branchlessAbs(fromZ - toZ);
++
++ if (Math.max(totalX, totalZ) > (2 * Math.max(newViewDistance, oldViewDistance))) {
++ // teleported?
++ this.removeFromOld(parameter, fromX, fromZ, oldViewDistance);
++ this.addToNew(parameter, toX, toZ, newViewDistance);
++ return true;
++ }
++
++ if (oldViewDistance != newViewDistance) {
++ // remove loop
++
++ final int oldMinX = fromX - oldViewDistance;
++ final int oldMinZ = fromZ - oldViewDistance;
++ final int oldMaxX = fromX + oldViewDistance;
++ final int oldMaxZ = fromZ + oldViewDistance;
++ for (int currX = oldMinX; currX <= oldMaxX; ++currX) {
++ for (int currZ = oldMinZ; currZ <= oldMaxZ; ++currZ) {
++
++ // only remove if we're outside the new view distance...
++ if (Math.max(IntegerUtil.branchlessAbs(currX - toX), IntegerUtil.branchlessAbs(currZ - toZ)) > newViewDistance) {
++ this.removeCallback(parameter, currX, currZ);
++ }
++ }
++ }
++
++ // add loop
++
++ final int newMinX = toX - newViewDistance;
++ final int newMinZ = toZ - newViewDistance;
++ final int newMaxX = toX + newViewDistance;
++ final int newMaxZ = toZ + newViewDistance;
++ for (int currX = newMinX; currX <= newMaxX; ++currX) {
++ for (int currZ = newMinZ; currZ <= newMaxZ; ++currZ) {
++
++ // only add if we're outside the old view distance...
++ if (Math.max(IntegerUtil.branchlessAbs(currX - fromX), IntegerUtil.branchlessAbs(currZ - fromZ)) > oldViewDistance) {
++ this.addCallback(parameter, currX, currZ);
++ }
++ }
++ }
++
++ return true;
++ }
++
++ // x axis is width
++ // z axis is height
++ // right refers to the x axis of where we moved
++ // top refers to the z axis of where we moved
++
++ // same view distance
++
++ // used for relative positioning
++ final int up = sign(dz); // 1 if dz >= 0, -1 otherwise
++ final int right = sign(dx); // 1 if dx >= 0, -1 otherwise
++
++ // The area excluded by overlapping the two view distance squares creates four rectangles:
++ // Two on the left, and two on the right. The ones on the left we consider the "removed" section
++ // and on the right the "added" section.
++ // https://i.imgur.com/MrnOBgI.png is a reference image. Note that the outside border is not actually
++ // exclusive to the regions they surround.
++
++ // 4 points of the rectangle
++ int maxX; // exclusive
++ int minX; // inclusive
++ int maxZ; // exclusive
++ int minZ; // inclusive
++
++ if (dx != 0) {
++ // handle right addition
++
++ maxX = toX + (oldViewDistance * right) + right; // exclusive
++ minX = fromX + (oldViewDistance * right) + right; // inclusive
++ maxZ = fromZ + (oldViewDistance * up) + up; // exclusive
++ minZ = toZ - (oldViewDistance * up); // inclusive
++
++ for (int currX = minX; currX != maxX; currX += right) {
++ for (int currZ = minZ; currZ != maxZ; currZ += up) {
++ this.addCallback(parameter, currX, currZ);
++ }
++ }
++ }
++
++ if (dz != 0) {
++ // handle up addition
++
++ maxX = toX + (oldViewDistance * right) + right; // exclusive
++ minX = toX - (oldViewDistance * right); // inclusive
++ maxZ = toZ + (oldViewDistance * up) + up; // exclusive
++ minZ = fromZ + (oldViewDistance * up) + up; // inclusive
++
++ for (int currX = minX; currX != maxX; currX += right) {
++ for (int currZ = minZ; currZ != maxZ; currZ += up) {
++ this.addCallback(parameter, currX, currZ);
++ }
++ }
++ }
++
++ if (dx != 0) {
++ // handle left removal
++
++ maxX = toX - (oldViewDistance * right); // exclusive
++ minX = fromX - (oldViewDistance * right); // inclusive
++ maxZ = fromZ + (oldViewDistance * up) + up; // exclusive
++ minZ = toZ - (oldViewDistance * up); // inclusive
++
++ for (int currX = minX; currX != maxX; currX += right) {
++ for (int currZ = minZ; currZ != maxZ; currZ += up) {
++ this.removeCallback(parameter, currX, currZ);
++ }
++ }
++ }
++
++ if (dz != 0) {
++ // handle down removal
++
++ maxX = fromX + (oldViewDistance * right) + right; // exclusive
++ minX = fromX - (oldViewDistance * right); // inclusive
++ maxZ = toZ - (oldViewDistance * up); // exclusive
++ minZ = fromZ - (oldViewDistance * up); // inclusive
++
++ for (int currX = minX; currX != maxX; currX += right) {
++ for (int currZ = minZ; currZ != maxZ; currZ += up) {
++ this.removeCallback(parameter, currX, currZ);
++ }
++ }
++ }
++
++ return true;
++ }
++
++ public final boolean remove() {
++ final int chunkX = this.lastChunkX;
++ final int chunkZ = this.lastChunkZ;
++ final int distance = this.distance;
++ if (chunkX == NOT_SET) {
++ return false;
++ }
++
++ this.lastChunkX = this.lastChunkZ = this.distance = NOT_SET;
++
++ this.removeFromOld(this.parameter, chunkX, chunkZ, distance);
++
++ return true;
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+index e5d9c6f2cbe11c2ded6d8ad111fa6a8b2086dfba..c6d20bc2f0eab737338db6b88dacb63f0decb66c 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+@@ -1,5 +1,6 @@
+ package io.papermc.paper.chunk.system.scheduling;
+
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
+ import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
+ import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable;
+ import co.aikar.timings.Timing;
+@@ -493,6 +494,21 @@ public final class ChunkHolderManager {
+ }
+ }
+
++ // atomic with respect to all add/remove/addandremove ticket calls for the given chunk
++ public boolean addIfRemovedTicket(final long chunk, final TicketType addType, final int addLevel, final T addIdentifier,
++ final TicketType removeType, final int removeLevel, final V removeIdentifier) {
++ this.ticketLock.lock();
++ try {
++ if (this.removeTicketAtLevel(removeType, chunk, removeLevel, removeIdentifier)) {
++ this.addTicketAtLevel(addType, chunk, addLevel, addIdentifier);
++ return true;
++ }
++ return false;
++ } finally {
++ this.ticketLock.unlock();
++ }
++ }
++
+ public void removeAllTicketsFor(final TicketType ticketType, final int ticketLevel, final T ticketIdentifier) {
+ if (ticketLevel > MAX_TICKET_LEVEL) {
+ return;
+@@ -900,6 +916,142 @@ public final class ChunkHolderManager {
+ }
+ }
+
++ public enum TicketOperationType {
++ ADD, REMOVE, ADD_IF_REMOVED, ADD_AND_REMOVE
++ }
++
++ public static record TicketOperation (
++ TicketOperationType op, long chunkCoord,
++ TicketType ticketType, int ticketLevel, T identifier,
++ TicketType ticketType2, int ticketLevel2, V identifier2
++ ) {
++
++ private TicketOperation(TicketOperationType op, long chunkCoord,
++ TicketType ticketType, int ticketLevel, T identifier) {
++ this(op, chunkCoord, ticketType, ticketLevel, identifier, null, 0, null);
++ }
++
++ public static TicketOperation addOp(final ChunkPos chunk, final TicketType type, final int ticketLevel, final T identifier) {
++ return addOp(CoordinateUtils.getChunkKey(chunk), type, ticketLevel, identifier);
++ }
++
++ public static TicketOperation addOp(final int chunkX, final int chunkZ, final TicketType type, final int ticketLevel, final T identifier) {
++ return addOp(CoordinateUtils.getChunkKey(chunkX, chunkZ), type, ticketLevel, identifier);
++ }
++
++ public static TicketOperation addOp(final long chunk, final TicketType type, final int ticketLevel, final T identifier) {
++ return new TicketOperation<>(TicketOperationType.ADD, chunk, type, ticketLevel, identifier);
++ }
++
++ public static TicketOperation removeOp(final ChunkPos chunk, final TicketType type, final int ticketLevel, final T identifier) {
++ return removeOp(CoordinateUtils.getChunkKey(chunk), type, ticketLevel, identifier);
++ }
++
++ public static TicketOperation removeOp(final int chunkX, final int chunkZ, final TicketType type, final int ticketLevel, final T identifier) {
++ return removeOp(CoordinateUtils.getChunkKey(chunkX, chunkZ), type, ticketLevel, identifier);
++ }
++
++ public static TicketOperation removeOp(final long chunk, final TicketType type, final int ticketLevel, final T identifier) {
++ return new TicketOperation<>(TicketOperationType.REMOVE, chunk, type, ticketLevel, identifier);
++ }
++
++ public static TicketOperation addIfRemovedOp(final long chunk,
++ final TicketType addType, final int addLevel, final T addIdentifier,
++ final TicketType removeType, final int removeLevel, final V removeIdentifier) {
++ return new TicketOperation<>(
++ TicketOperationType.ADD_IF_REMOVED, chunk, addType, addLevel, addIdentifier,
++ removeType, removeLevel, removeIdentifier
++ );
++ }
++
++ public static TicketOperation addAndRemove(final long chunk,
++ final TicketType addType, final int addLevel, final T addIdentifier,
++ final TicketType removeType, final int removeLevel, final V removeIdentifier) {
++ return new TicketOperation<>(
++ TicketOperationType.ADD_AND_REMOVE, chunk, addType, addLevel, addIdentifier,
++ removeType, removeLevel, removeIdentifier
++ );
++ }
++ }
++
++ private final MultiThreadedQueue> delayedTicketUpdates = new MultiThreadedQueue<>();
++
++ // note: MUST hold ticket lock, otherwise operation ordering is lost
++ private boolean drainTicketUpdates() {
++ boolean ret = false;
++
++ TicketOperation operation;
++ while ((operation = this.delayedTicketUpdates.poll()) != null) {
++ switch (operation.op) {
++ case ADD: {
++ ret |= this.addTicketAtLevel(operation.ticketType, operation.chunkCoord, operation.ticketLevel, operation.identifier);
++ break;
++ }
++ case REMOVE: {
++ ret |= this.removeTicketAtLevel(operation.ticketType, operation.chunkCoord, operation.ticketLevel, operation.identifier);
++ break;
++ }
++ case ADD_IF_REMOVED: {
++ ret |= this.addIfRemovedTicket(
++ operation.chunkCoord,
++ operation.ticketType, operation.ticketLevel, operation.identifier,
++ operation.ticketType2, operation.ticketLevel2, operation.identifier2
++ );
++ break;
++ }
++ case ADD_AND_REMOVE: {
++ ret = true;
++ this.addAndRemoveTickets(
++ operation.chunkCoord,
++ operation.ticketType, operation.ticketLevel, operation.identifier,
++ operation.ticketType2, operation.ticketLevel2, operation.identifier2
++ );
++ break;
++ }
++ }
++ }
++
++ return ret;
++ }
++
++ public Boolean tryDrainTicketUpdates() {
++ final boolean acquired = this.ticketLock.tryLock();
++ try {
++ if (!acquired) {
++ return null;
++ }
++
++ return Boolean.valueOf(this.drainTicketUpdates());
++ } finally {
++ if (acquired) {
++ this.ticketLock.unlock();
++ }
++ }
++ }
++
++ public void pushDelayedTicketUpdate(final TicketOperation, ?> operation) {
++ this.delayedTicketUpdates.add(operation);
++ }
++
++ public void pushDelayedTicketUpdates(final Collection> operations) {
++ this.delayedTicketUpdates.addAll(operations);
++ }
++
++ public Boolean tryProcessTicketUpdates() {
++ final boolean acquired = this.ticketLock.tryLock();
++ try {
++ if (!acquired) {
++ return null;
++ }
++
++ return Boolean.valueOf(this.processTicketUpdates(false, true, null));
++ } finally {
++ if (acquired) {
++ this.ticketLock.unlock();
++ }
++ }
++ }
++
+ private final ThreadLocal BLOCK_TICKET_UPDATES = ThreadLocal.withInitial(() -> {
+ return Boolean.FALSE;
+ });
+@@ -948,6 +1100,8 @@ public final class ChunkHolderManager {
+
+ this.ticketLock.lock();
+ try {
++ this.drainTicketUpdates();
++
+ final boolean levelsUpdated = this.ticketLevelPropagator.propagateUpdates();
+ if (levelsUpdated) {
+ // Unlike CB, ticket level updates cannot happen recursively. Thank god.
+diff --git a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
+index 8d442c5a498ecf288a0cc0c54889c6e2fda849ce..9f5f0d8ddc8f480b48079c70e38c9c08eff403f6 100644
+--- a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
++++ b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
+@@ -287,4 +287,43 @@ public class GlobalConfiguration extends ConfigurationPart {
+ public boolean useDimensionTypeForCustomSpawners = false;
+ public boolean strictAdvancementDimensionCheck = false;
+ }
++
++ public ChunkLoadingBasic chunkLoadingBasic;
++
++ public class ChunkLoadingBasic extends ConfigurationPart {
++ @Comment("The maximum rate in chunks per second that the server will send to any individual player. Set to -1 to disable this limit.")
++ public double playerMaxChunkSendRate = 75.0;
++
++ @Comment(
++ "The maximum rate at which chunks will load for any individual player. " +
++ "Note that this setting also affects chunk generations, since a chunk load is always first issued to test if a" +
++ "chunk is already generated. Set to -1 to disable this limit."
++ )
++ public double playerMaxChunkLoadRate = 100.0;
++
++ @Comment("The maximum rate at which chunks will generate for any individual player. Set to -1 to disable this limit.")
++ public double playerMaxChunkGenerateRate = -1.0;
++ }
++
++ public ChunkLoadingAdvanced chunkLoadingAdvanced;
++
++ public class ChunkLoadingAdvanced extends ConfigurationPart {
++ @Comment(
++ "Set to true if the server will match the chunk send radius that clients have configured" +
++ "in their view distance settings if the client is less-than the server's send distance."
++ )
++ public boolean autoConfigSendDistance = true;
++
++ @Comment(
++ "Specifies the maximum amount of concurrent chunk loads that an individual player can have." +
++ "Set to 0 to let the server configure it automatically per player, or set it to -1 to disable the limit."
++ )
++ public int playerMaxConcurrentChunkLoads = 0;
++
++ @Comment(
++ "Specifies the maximum amount of concurrent chunk generations that an individual player can have." +
++ "Set to 0 to let the server configure it automatically per player, or set it to -1 to disable the limit."
++ )
++ public int playerMaxConcurrentChunkGenerates = 0;
++ }
+ }
+diff --git a/src/main/java/io/papermc/paper/util/IntervalledCounter.java b/src/main/java/io/papermc/paper/util/IntervalledCounter.java
+index cea9c098ade00ee87b8efc8164ab72f5279758f0..197224e31175252d8438a8df585bbb65f2288d7f 100644
+--- a/src/main/java/io/papermc/paper/util/IntervalledCounter.java
++++ b/src/main/java/io/papermc/paper/util/IntervalledCounter.java
+@@ -2,6 +2,8 @@ package io.papermc.paper.util;
+
+ public final class IntervalledCounter {
+
++ private static final int INITIAL_SIZE = 8;
++
+ protected long[] times;
+ protected long[] counts;
+ protected final long interval;
+@@ -11,8 +13,8 @@ public final class IntervalledCounter {
+ protected int tail; // exclusive
+
+ public IntervalledCounter(final long interval) {
+- this.times = new long[8];
+- this.counts = new long[8];
++ this.times = new long[INITIAL_SIZE];
++ this.counts = new long[INITIAL_SIZE];
+ this.interval = interval;
+ }
+
+@@ -67,13 +69,13 @@ public final class IntervalledCounter {
+ this.tail = nextTail;
+ }
+
+- public void updateAndAdd(final int count) {
++ public void updateAndAdd(final long count) {
+ final long currTime = System.nanoTime();
+ this.updateCurrentTime(currTime);
+ this.addTime(currTime, count);
+ }
+
+- public void updateAndAdd(final int count, final long currTime) {
++ public void updateAndAdd(final long count, final long currTime) {
+ this.updateCurrentTime(currTime);
+ this.addTime(currTime, count);
+ }
+@@ -93,9 +95,13 @@ public final class IntervalledCounter {
+ this.tail = size;
+
+ if (tail >= head) {
++ // sequentially ordered from [head, tail)
+ System.arraycopy(oldElements, head, newElements, 0, size);
+ System.arraycopy(oldCounts, head, newCounts, 0, size);
+ } else {
++ // ordered from [head, length)
++ // then followed by [0, tail)
++
+ System.arraycopy(oldElements, head, newElements, 0, oldElements.length - head);
+ System.arraycopy(oldElements, 0, newElements, oldElements.length - head, tail);
+
+@@ -106,10 +112,18 @@ public final class IntervalledCounter {
+
+ // returns in units per second
+ public double getRate() {
+- return this.size() / (this.interval * 1.0e-9);
++ return (double)this.sum / ((double)this.interval * 1.0E-9);
++ }
++
++ public long getInterval() {
++ return this.interval;
+ }
+
+- public long size() {
++ public long getSum() {
+ return this.sum;
+ }
++
++ public int totalDataPoints() {
++ return this.tail >= this.head ? (this.tail - this.head) : (this.tail + (this.counts.length - this.head));
++ }
+ }
+diff --git a/src/main/java/io/papermc/paper/util/MCUtil.java b/src/main/java/io/papermc/paper/util/MCUtil.java
+index d1a59c2af0557a816c094983ec60097fb4de060c..6898c704e60d89d53c8ed114e5e12f73ed63605a 100644
+--- a/src/main/java/io/papermc/paper/util/MCUtil.java
++++ b/src/main/java/io/papermc/paper/util/MCUtil.java
+@@ -602,8 +602,8 @@ public final class MCUtil {
+
+ worldData.addProperty("is-loaded", loadedWorlds.contains(bukkitWorld));
+ worldData.addProperty("name", world.getWorld().getName());
+- worldData.addProperty("view-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()); // Paper - replace chunk loader system
+- worldData.addProperty("tick-view-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()); // Paper - replace chunk loader system
++ worldData.addProperty("view-distance", world.getWorld().getViewDistance()); // Paper - replace chunk loader system
++ worldData.addProperty("tick-view-distance", world.getWorld().getSimulationDistance()); // Paper - replace chunk loader system
+ worldData.addProperty("keep-spawn-loaded", world.keepSpawnInMemory);
+ worldData.addProperty("keep-spawn-loaded-range", world.paperConfig().spawn.keepSpawnLoadedRange * 16);
+
+diff --git a/src/main/java/net/minecraft/server/level/ChunkHolder.java b/src/main/java/net/minecraft/server/level/ChunkHolder.java
+index bc46479fd0622a90fd98ac88f92b2840a22a2d04..0b9cb85c063f913ad9245bafb8587d2f06c0ac6e 100644
+--- a/src/main/java/net/minecraft/server/level/ChunkHolder.java
++++ b/src/main/java/net/minecraft/server/level/ChunkHolder.java
+@@ -128,6 +128,26 @@ public class ChunkHolder {
+ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet playersInChunkTickRange;
+ // Paper end - optimise anyPlayerCloseEnoughForSpawning
+
++ // Paper start - replace player chunk loader
++ private final com.destroystokyo.paper.util.maplist.ReferenceList playersSentChunkTo = new com.destroystokyo.paper.util.maplist.ReferenceList<>();
++
++ public void addPlayer(ServerPlayer player) {
++ if (!this.playersSentChunkTo.add(player)) {
++ throw new IllegalStateException("Already sent chunk " + this.pos + " to player " + player);
++ }
++ }
++
++ public void removePlayer(ServerPlayer player) {
++ if (!this.playersSentChunkTo.remove(player)) {
++ throw new IllegalStateException("Have not sent chunk " + this.pos + " to player " + player);
++ }
++ }
++
++ public boolean hasChunkBeenSent() {
++ return this.playersSentChunkTo.size() != 0;
++ }
++ // Paper end - replace player chunk loader
++
+ public ChunkHolder(ChunkPos pos, LevelHeightAccessor world, LevelLightEngine lightingProvider, ChunkHolder.PlayerProvider playersWatchingChunkProvider, io.papermc.paper.chunk.system.scheduling.NewChunkHolder newChunkHolder) { // Paper - rewrite chunk system
+ this.newChunkHolder = newChunkHolder; // Paper - rewrite chunk system
+ this.chunkToSaveHistory = null;
+@@ -225,6 +245,11 @@ public class ChunkHolder {
+ // Paper - rewrite chunk system
+
+ public void blockChanged(BlockPos pos) {
++ // Paper start - replace player chunk loader
++ if (this.playersSentChunkTo.size() == 0) {
++ return;
++ }
++ // Paper end - replace player chunk loader
+ LevelChunk chunk = this.getSendingChunk(); // Paper - no-tick view distance
+
+ if (chunk != null) {
+@@ -251,7 +276,7 @@ public class ChunkHolder {
+ LevelChunk chunk = this.getSendingChunk();
+ // Paper end - no-tick view distance
+
+- if (chunk != null) {
++ if (this.playersSentChunkTo.size() != 0 && chunk != null) { // Paper - replace player chunk loader
+ int j = this.lightEngine.getMinLightSection();
+ int k = this.lightEngine.getMaxLightSection();
+
+@@ -351,27 +376,32 @@ public class ChunkHolder {
+
+ }
+
+- public void broadcast(Packet> packet, boolean onlyOnWatchDistanceEdge) {
+- // Paper start - per player view distance
+- // there can be potential desync with player's last mapped section and the view distance map, so use the
+- // view distance map here.
+- com.destroystokyo.paper.util.misc.PlayerAreaMap viewDistanceMap = this.chunkMap.playerChunkManager.broadcastMap; // Paper - replace old player chunk manager
+- com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet players = viewDistanceMap.getObjectsInRange(this.pos);
+- if (players == null) {
+- return;
+- }
++ // Paper start - rewrite player chunk loader
++ public List getPlayers(boolean onlyOnWatchDistanceEdge) {
++ List ret = new java.util.ArrayList<>();
+
+- Object[] backingSet = players.getBackingSet();
+- for (int i = 0, len = backingSet.length; i < len; ++i) {
+- if (!(backingSet[i] instanceof ServerPlayer player)) {
++ for (int i = 0, len = this.playersSentChunkTo.size(); i < len; ++i) {
++ ServerPlayer player = this.playersSentChunkTo.getUnchecked(i);
++ if (onlyOnWatchDistanceEdge && !this.chunkMap.level.playerChunkLoader.isChunkSent(player, this.pos.x, this.pos.z, onlyOnWatchDistanceEdge)) {
+ continue;
+ }
+- if (!this.chunkMap.playerChunkManager.isChunkSent(player, this.pos.x, this.pos.z, onlyOnWatchDistanceEdge)) {
++ ret.add(player);
++ }
++
++ return ret;
++ }
++ // Paper end - rewrite player chunk loader
++
++ public void broadcast(Packet> packet, boolean onlyOnWatchDistanceEdge) {
++ // Paper start - rewrite player chunk loader - modeled after the above
++ for (int i = 0, len = this.playersSentChunkTo.size(); i < len; ++i) {
++ ServerPlayer player = this.playersSentChunkTo.getUnchecked(i);
++ if (onlyOnWatchDistanceEdge && !this.chunkMap.level.playerChunkLoader.isChunkSent(player, this.pos.x, this.pos.z, onlyOnWatchDistanceEdge)) {
+ continue;
+ }
+ player.connection.send(packet);
+ }
+- // Paper end - per player view distance
++ // Paper end - rewrite player chunk loader
+ }
+
+ // Paper - rewrite chunk system
+diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java
+index 2212f9f48636357265d8e44aba415ea4f09f1fe7..870f4d6fae8c14502b4653f246a2df9e345ccca3 100644
+--- a/src/main/java/net/minecraft/server/level/ChunkMap.java
++++ b/src/main/java/net/minecraft/server/level/ChunkMap.java
+@@ -196,7 +196,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
+ // Paper end - use distance map to optimise tracker
+
+ void addPlayerToDistanceMaps(ServerPlayer player) {
+- this.playerChunkManager.addPlayer(player); // Paper - replace chunk loader
++ this.level.playerChunkLoader.addPlayer(player); // Paper - replace chunk loader
+ int chunkX = MCUtil.getChunkCoordinate(player.getX());
+ int chunkZ = MCUtil.getChunkCoordinate(player.getZ());
+ // Note: players need to be explicitly added to distance maps before they can be updated
+@@ -218,7 +218,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
+ }
+
+ void removePlayerFromDistanceMaps(ServerPlayer player) {
+- this.playerChunkManager.removePlayer(player); // Paper - replace chunk loader
++ this.level.playerChunkLoader.removePlayer(player); // Paper - replace chunk loader
+
+ // Paper start - optimise ChunkMap#anyPlayerCloseEnoughForSpawning
+ this.playerMobSpawnMap.remove(player);
+@@ -241,7 +241,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
+ int chunkX = MCUtil.getChunkCoordinate(player.getX());
+ int chunkZ = MCUtil.getChunkCoordinate(player.getZ());
+ // Note: players need to be explicitly added to distance maps before they can be updated
+- this.playerChunkManager.updatePlayer(player); // Paper - replace chunk loader
++ this.level.playerChunkLoader.updatePlayer(player); // Paper - replace chunk loader
+ this.playerChunkTickRangeMap.update(player, chunkX, chunkZ, DistanceManager.MOB_SPAWN_RANGE); // Paper - optimise ChunkMap#anyPlayerCloseEnoughForSpawning
+ // Paper start - per player mob spawning
+ if (this.playerMobDistanceMap != null) {
+@@ -813,7 +813,11 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
+
+ // Paper start - replace player loader system
+ public void setTickViewDistance(int distance) {
+- this.playerChunkManager.setTickDistance(distance);
++ this.level.playerChunkLoader.setTickDistance(distance);
++ }
++
++ public void setSendViewDistance(int distance) {
++ this.level.playerChunkLoader.setSendDistance(distance);
+ }
+ // Paper end - replace player loader system
+ public void setViewDistance(int watchDistance) {
+@@ -823,20 +827,22 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
+ int k = this.viewDistance;
+
+ this.viewDistance = j;
+- this.playerChunkManager.setLoadDistance(this.viewDistance); // Paper - replace player loader system
++ this.level.playerChunkLoader.setLoadDistance(this.viewDistance); // Paper - replace player loader system
+ }
+
+ }
+
+ public void updateChunkTracking(ServerPlayer player, ChunkPos pos, MutableObject> packet, boolean oldWithinViewDistance, boolean newWithinViewDistance) { // Paper - public // Paper - Anti-Xray - Bypass
+ if (player.level == this.level) {
++ ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos.toLong()); // Paper - replace chunk loader system - move up
+ if (newWithinViewDistance && !oldWithinViewDistance) {
+- ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos.toLong());
++ // Paper - replace chunk loader system - move up
+
+ if (playerchunk != null) {
+ LevelChunk chunk = playerchunk.getSendingChunk(); // Paper - replace chunk loader system
+
+ if (chunk != null) {
++ playerchunk.addPlayer(player); // Paper - replace chunk loader system
+ this.playerLoadedChunk(player, packet, chunk);
+ }
+
+@@ -845,6 +851,11 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
+ }
+
+ if (!newWithinViewDistance && oldWithinViewDistance) {
++ // Paper start - replace chunk loader system
++ if (playerchunk != null) {
++ playerchunk.removePlayer(player);
++ }
++ // Paper end - replace chunk loader system
+ player.untrackChunk(pos);
+ }
+
+@@ -1148,34 +1159,18 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
+ // Paper - replaced by PlayerChunkLoader
+
+ this.updateMaps(player); // Paper - distance maps
+- this.playerChunkManager.updatePlayer(player); // Paper - respond to movement immediately
+
+ }
+
+ @Override
+ public List getPlayers(ChunkPos chunkPos, boolean onlyOnWatchDistanceEdge) {
+ // Paper start - per player view distance
+- // there can be potential desync with player's last mapped section and the view distance map, so use the
+- // view distance map here.
+- com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet players = this.playerChunkManager.broadcastMap.getObjectsInRange(chunkPos);
+- if (players == null) {
+- return java.util.Collections.emptyList();
+- }
+-
+- List ret = new java.util.ArrayList<>(players.size());
+-
+- Object[] backingSet = players.getBackingSet();
+- for (int i = 0, len = backingSet.length; i < len; ++i) {
+- if (!(backingSet[i] instanceof ServerPlayer player)) {
+- continue;
+- }
+- if (!this.playerChunkManager.isChunkSent(player, chunkPos.x, chunkPos.z, onlyOnWatchDistanceEdge)) {
+- continue;
+- }
+- ret.add(player);
++ ChunkHolder holder = this.getVisibleChunkIfPresent(chunkPos.toLong());
++ if (holder == null) {
++ return new java.util.ArrayList<>();
++ } else {
++ return holder.getPlayers(onlyOnWatchDistanceEdge);
+ }
+-
+- return ret;
+ // Paper end - per player view distance
+ }
+
+@@ -1599,7 +1594,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
+ double vec3d_dx = player.getX() - this.entity.getX();
+ double vec3d_dz = player.getZ() - this.entity.getZ();
+ // Paper end - remove allocation of Vec3D here
+- double d0 = (double) Math.min(this.getEffectiveRange(), io.papermc.paper.chunk.PlayerChunkLoader.getSendViewDistance(player) * 16); // Paper - per player view distance
++ double d0 = (double) Math.min(this.getEffectiveRange(), io.papermc.paper.chunk.system.ChunkSystem.getSendViewDistance(player) * 16); // Paper - per player view distance
+ double d1 = vec3d_dx * vec3d_dx + vec3d_dz * vec3d_dz; // Paper
+ double d2 = d0 * d0;
+ boolean flag = d1 <= d2 && this.entity.broadcastToPlayer(player);
+diff --git a/src/main/java/net/minecraft/server/level/DistanceManager.java b/src/main/java/net/minecraft/server/level/DistanceManager.java
+index 52cba8f68d274cce106304aef1249a95474d3238..88fca8b160df6804f30ed2cf8cf1f645085434e2 100644
+--- a/src/main/java/net/minecraft/server/level/DistanceManager.java
++++ b/src/main/java/net/minecraft/server/level/DistanceManager.java
+@@ -184,17 +184,17 @@ public abstract class DistanceManager {
+ }
+
+ protected void updatePlayerTickets(int viewDistance) {
+- this.chunkMap.playerChunkManager.setTargetNoTickViewDistance(viewDistance); // Paper - route to player chunk manager
++ this.chunkMap.setViewDistance(viewDistance);// Paper - route to player chunk manager
+ }
+
+ // Paper start
+ public int getSimulationDistance() {
+- return this.chunkMap.playerChunkManager.getTargetTickViewDistance(); // Paper - route to player chunk manager
++ return this.chunkMap.level.playerChunkLoader.getAPITickDistance();
+ }
+ // Paper end
+
+ public void updateSimulationDistance(int simulationDistance) {
+- this.chunkMap.playerChunkManager.setTargetTickViewDistance(simulationDistance); // Paper - route to player chunk manager
++ this.chunkMap.level.playerChunkLoader.setTickDistance(simulationDistance); // Paper - route to player chunk manager
+ }
+
+ public int getNaturalSpawnChunkCount() {
+diff --git a/src/main/java/net/minecraft/server/level/ServerChunkCache.java b/src/main/java/net/minecraft/server/level/ServerChunkCache.java
+index ca84eddbdb1e198b899750e5f6b3eafd25ce970f..736f37979c882e41e7571202df38eb6a2923fcb0 100644
+--- a/src/main/java/net/minecraft/server/level/ServerChunkCache.java
++++ b/src/main/java/net/minecraft/server/level/ServerChunkCache.java
+@@ -645,7 +645,7 @@ public class ServerChunkCache extends ChunkSource {
+ this.level.getProfiler().popPush("chunks");
+ if (tickChunks) {
+ this.level.timings.chunks.startTiming(); // Paper - timings
+- this.chunkMap.playerChunkManager.tick(); // Paper - this is mostly is to account for view distance changes
++ this.chunkMap.level.playerChunkLoader.tick(); // Paper - replace player chunk loader - this is mostly required to account for view distance changes
+ this.tickChunks();
+ this.level.timings.chunks.stopTiming(); // Paper - timings
+ }
+@@ -1001,7 +1001,7 @@ public class ServerChunkCache extends ChunkSource {
+ @Override
+ // CraftBukkit start - process pending Chunk loadCallback() and unloadCallback() after each run task
+ public boolean pollTask() {
+- ServerChunkCache.this.chunkMap.playerChunkManager.tickMidTick();
++ ServerChunkCache.this.chunkMap.level.playerChunkLoader.tickMidTick(); // Paper - replace player chunk loader
+ if (ServerChunkCache.this.runDistanceManagerUpdates()) {
+ return true;
+ }
+diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java
+index 54c2b7fba83d6f06dba95b1bb5b487a02048d6e6..714637cdd9dcdbffa344b19e77944fb3c7541ff7 100644
+--- a/src/main/java/net/minecraft/server/level/ServerLevel.java
++++ b/src/main/java/net/minecraft/server/level/ServerLevel.java
+@@ -523,6 +523,48 @@ public class ServerLevel extends Level implements WorldGenLevel {
+ }
+ // Paper end - optimise get nearest players for entity AI
+
++ public final io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader playerChunkLoader = new io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader(this);
++ private final java.util.concurrent.atomic.AtomicReference viewDistances = new java.util.concurrent.atomic.AtomicReference<>(new io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.ViewDistances(-1, -1, -1));
++
++ public io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.ViewDistances getViewDistances() {
++ return this.viewDistances.get();
++ }
++
++ private void updateViewDistance(final java.util.function.Function update) {
++ for (io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.ViewDistances curr = this.viewDistances.get();;) {
++ if (this.viewDistances.compareAndSet(curr, update.apply(curr))) {
++ return;
++ }
++ }
++ }
++
++ public void setTickViewDistance(final int distance) {
++ if ((distance < io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE)) {
++ throw new IllegalArgumentException("Tick view distance must be a number between " + io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE) + ", got: " + distance);
++ }
++ this.updateViewDistance((input) -> {
++ return input.setTickViewDistance(distance);
++ });
++ }
++
++ public void setLoadViewDistance(final int distance) {
++ if (distance != -1 && (distance < io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Load view distance must be a number between " + io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance);
++ }
++ this.updateViewDistance((input) -> {
++ return input.setLoadViewDistance(distance);
++ });
++ }
++
++ public void setSendViewDistance(final int distance) {
++ if (distance != -1 && (distance < io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Send view distance must be a number between " + io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance);
++ }
++ this.updateViewDistance((input) -> {
++ return input.setSendViewDistance(distance);
++ });
++ }
++
+ // Add env and gen to constructor, IWorldDataServer -> WorldDataServer
+ public ServerLevel(MinecraftServer minecraftserver, Executor executor, LevelStorageSource.LevelStorageAccess convertable_conversionsession, PrimaryLevelData iworlddataserver, ResourceKey resourcekey, LevelStem worlddimension, ChunkProgressListener worldloadlistener, boolean flag, long i, List list, boolean flag1, org.bukkit.World.Environment env, org.bukkit.generator.ChunkGenerator gen, org.bukkit.generator.BiomeProvider biomeProvider) {
+ // Holder holder = worlddimension.type(); // CraftBukkit - decompile error
+diff --git a/src/main/java/net/minecraft/server/level/ServerPlayer.java b/src/main/java/net/minecraft/server/level/ServerPlayer.java
+index 7d6d3c8556033d289fdadc489e73fba478fce41a..869daafbc236b3ff63f878e5fe28427fde75afe5 100644
+--- a/src/main/java/net/minecraft/server/level/ServerPlayer.java
++++ b/src/main/java/net/minecraft/server/level/ServerPlayer.java
+@@ -269,6 +269,48 @@ public class ServerPlayer extends Player {
+ public PlayerNaturallySpawnCreaturesEvent playerNaturallySpawnedEvent; // Paper
+ public org.bukkit.event.player.PlayerQuitEvent.QuitReason quitReason = null; // Paper - there are a lot of changes to do if we change all methods leading to the event
+
++ private final java.util.concurrent.atomic.AtomicReference viewDistances = new java.util.concurrent.atomic.AtomicReference<>(new io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.ViewDistances(-1, -1, -1));
++ public io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.PlayerChunkLoaderData chunkLoader;
++
++ public io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.ViewDistances getViewDistances() {
++ return this.viewDistances.get();
++ }
++
++ private void updateViewDistance(final java.util.function.Function update) {
++ for (io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.ViewDistances curr = this.viewDistances.get();;) {
++ if (this.viewDistances.compareAndSet(curr, update.apply(curr))) {
++ return;
++ }
++ }
++ }
++
++ public void setTickViewDistance(final int distance) {
++ if ((distance < io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE)) {
++ throw new IllegalArgumentException("Tick view distance must be a number between " + io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE) + ", got: " + distance);
++ }
++ this.updateViewDistance((input) -> {
++ return input.setTickViewDistance(distance);
++ });
++ }
++
++ public void setLoadViewDistance(final int distance) {
++ if (distance != -1 && (distance < io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Load view distance must be a number between " + io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance);
++ }
++ this.updateViewDistance((input) -> {
++ return input.setLoadViewDistance(distance);
++ });
++ }
++
++ public void setSendViewDistance(final int distance) {
++ if (distance != -1 && (distance < io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1)) {
++ throw new IllegalArgumentException("Send view distance must be a number between " + io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance);
++ }
++ this.updateViewDistance((input) -> {
++ return input.setSendViewDistance(distance);
++ });
++ }
++
+ public ServerPlayer(MinecraftServer server, ServerLevel world, GameProfile profile) {
+ super(world, world.getSharedSpawnPos(), world.getSharedSpawnAngle(), profile);
+ this.chatVisibility = ChatVisiblity.FULL;
+diff --git a/src/main/java/net/minecraft/server/players/PlayerList.java b/src/main/java/net/minecraft/server/players/PlayerList.java
+index 4b754f6eae683248d7fe11d6d6cb168d5dd696a2..3c9d08c37a44a60bc70387d8d0dbd0a39ea98a26 100644
+--- a/src/main/java/net/minecraft/server/players/PlayerList.java
++++ b/src/main/java/net/minecraft/server/players/PlayerList.java
+@@ -270,7 +270,7 @@ public abstract class PlayerList {
+ boolean flag1 = gamerules.getBoolean(GameRules.RULE_REDUCEDDEBUGINFO);
+
+ // Spigot - view distance
+- playerconnection.send(new ClientboundLoginPacket(player.getId(), worlddata.isHardcore(), player.gameMode.getGameModeForPlayer(), player.gameMode.getPreviousGameModeForPlayer(), this.server.levelKeys(), this.synchronizedRegistries, worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), this.getMaxPlayers(), worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance(), worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance(), flag1, !flag, worldserver1.isDebug(), worldserver1.isFlat(), player.getLastDeathLocation())); // Paper - replace old player chunk management
++ playerconnection.send(new ClientboundLoginPacket(player.getId(), worlddata.isHardcore(), player.gameMode.getGameModeForPlayer(), player.gameMode.getPreviousGameModeForPlayer(), this.server.levelKeys(), this.synchronizedRegistries, worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), this.getMaxPlayers(), worldserver1.getWorld().getSendViewDistance(), worldserver1.getWorld().getSimulationDistance(), flag1, !flag, worldserver1.isDebug(), worldserver1.isFlat(), player.getLastDeathLocation())); // Paper - replace old player chunk management
+ player.getBukkitEntity().sendSupportedChannels(); // CraftBukkit
+ playerconnection.send(new ClientboundUpdateEnabledFeaturesPacket(FeatureFlags.REGISTRY.toNames(worldserver1.enabledFeatures())));
+ playerconnection.send(new ClientboundCustomPayloadPacket(ClientboundCustomPayloadPacket.BRAND, (new FriendlyByteBuf(Unpooled.buffer())).writeUtf(this.getServer().getServerModName())));
+@@ -898,8 +898,8 @@ public abstract class PlayerList {
+ // CraftBukkit start
+ LevelData worlddata = worldserver1.getLevelData();
+ entityplayer1.connection.send(new ClientboundRespawnPacket(worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), entityplayer1.gameMode.getGameModeForPlayer(), entityplayer1.gameMode.getPreviousGameModeForPlayer(), worldserver1.isDebug(), worldserver1.isFlat(), (byte) i, entityplayer1.getLastDeathLocation()));
+- entityplayer1.connection.send(new ClientboundSetChunkCacheRadiusPacket(worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance())); // Spigot // Paper - replace old player chunk management
+- entityplayer1.connection.send(new ClientboundSetSimulationDistancePacket(worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance())); // Spigot // Paper - replace old player chunk management
++ entityplayer1.connection.send(new ClientboundSetChunkCacheRadiusPacket(worldserver1.getWorld().getSendViewDistance())); // Spigot // Paper - replace old player chunk management
++ entityplayer1.connection.send(new ClientboundSetSimulationDistancePacket(worldserver1.getWorld().getSimulationDistance())); // Spigot // Paper - replace old player chunk management
+ entityplayer1.spawnIn(worldserver1);
+ entityplayer1.unsetRemoved();
+ entityplayer1.connection.teleport(new Location(worldserver1.getWorld(), entityplayer1.getX(), entityplayer1.getY(), entityplayer1.getZ(), entityplayer1.getYRot(), entityplayer1.getXRot()));
+diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java
+index 3cbf801b2e5420c0e870f73788deb550e49ad54d..60003ff929f7ac6b34f9230c53ccbd54dc9e176b 100644
+--- a/src/main/java/net/minecraft/world/level/Level.java
++++ b/src/main/java/net/minecraft/world/level/Level.java
+@@ -627,7 +627,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable {
+ this.sendBlockUpdated(blockposition, iblockdata1, iblockdata, i);
+ // Paper start - per player view distance - allow block updates for non-ticking chunks in player view distance
+ // if copied from above
+- } else if ((i & 2) != 0 && (!this.isClientSide || (i & 4) == 0) && (this.isClientSide || chunk == null || ((ServerLevel)this).getChunkSource().chunkMap.playerChunkManager.broadcastMap.getObjectsInRange(io.papermc.paper.util.MCUtil.getCoordinateKey(blockposition)) != null)) { // Paper - replace old player chunk management
++ } else if ((i & 2) != 0 && (!this.isClientSide || (i & 4) == 0)) { // Paper - replace old player chunk management
+ ((ServerLevel)this).getChunkSource().blockChanged(blockposition);
+ // Paper end - per player view distance
+ }
+diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
+index 28e4b302284f955a73e75d0f4276d55fb51826f5..e776eb8afef978938da084f9ae29d611181b43fe 100644
+--- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
++++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
+@@ -184,43 +184,6 @@ public class LevelChunk extends ChunkAccess {
+
+ protected void onNeighbourChange(final long bitsetBefore, final long bitsetAfter) {
+
+- // Paper start - no-tick view distance
+- ServerChunkCache chunkProviderServer = ((ServerLevel)this.level).getChunkSource();
+- net.minecraft.server.level.ChunkMap chunkMap = chunkProviderServer.chunkMap;
+- // this code handles the addition of ticking tickets - the distance map handles the removal
+- if (!areNeighboursLoaded(bitsetBefore, 2) && areNeighboursLoaded(bitsetAfter, 2)) {
+- if (chunkMap.playerChunkManager.tickMap.getObjectsInRange(this.coordinateKey) != null) { // Paper - replace old player chunk loading system
+- // now we're ready for entity ticking
+- chunkProviderServer.mainThreadProcessor.execute(() -> {
+- // double check that this condition still holds.
+- if (LevelChunk.this.areNeighboursLoaded(2) && chunkMap.playerChunkManager.tickMap.getObjectsInRange(LevelChunk.this.coordinateKey) != null) { // Paper - replace old player chunk loading system
+- chunkMap.playerChunkManager.onChunkPlayerTickReady(this.chunkPos.x, this.chunkPos.z); // Paper - replace old player chunk
+- chunkProviderServer.addTicketAtLevel(net.minecraft.server.level.TicketType.PLAYER, LevelChunk.this.chunkPos, 31, LevelChunk.this.chunkPos); // 31 -> entity ticking, TODO check on update
+- }
+- });
+- }
+- }
+-
+- // this code handles the chunk sending
+- if (!areNeighboursLoaded(bitsetBefore, 1) && areNeighboursLoaded(bitsetAfter, 1)) {
+- // Paper start - replace old player chunk loading system
+- if (chunkMap.playerChunkManager.isChunkNearPlayers(this.chunkPos.x, this.chunkPos.z)) {
+- // the post processing is expensive, so we don't want to run it unless we're actually near
+- // a player.
+- chunkProviderServer.mainThreadProcessor.execute(() -> {
+- if (!LevelChunk.this.areNeighboursLoaded(1)) {
+- return;
+- }
+- LevelChunk.this.postProcessGeneration();
+- if (!LevelChunk.this.areNeighboursLoaded(1)) {
+- return;
+- }
+- chunkMap.playerChunkManager.onChunkSendReady(this.chunkPos.x, this.chunkPos.z);
+- });
+- }
+- // Paper end - replace old player chunk loading system
+- }
+- // Paper end - no-tick view distance
+ }
+
+ public final boolean isAnyNeighborsLoaded() {
+@@ -906,7 +869,6 @@ public class LevelChunk extends ChunkAccess {
+ // Paper - rewrite chunk system - move into separate callback
+ org.bukkit.Server server = this.level.getCraftServer();
+ // Paper - rewrite chunk system - move into separate callback
+- ((ServerLevel)this.level).getChunkSource().chunkMap.playerChunkManager.onChunkLoad(this.chunkPos.x, this.chunkPos.z); // Paper - rewrite player chunk management
+ if (server != null) {
+ /*
+ * If it's a new world, the first few chunks are generated inside
+@@ -1074,6 +1036,7 @@ public class LevelChunk extends ChunkAccess {
+ BlockState iblockdata1 = Block.updateFromNeighbourShapes(iblockdata, this.level, blockposition);
+
+ this.level.setBlock(blockposition, iblockdata1, 20);
++ if (iblockdata1 != iblockdata) this.level.chunkSource.blockChanged(blockposition); // Paper - replace player chunk loader - notify since we send before processing full updates
+ }
+ }
+
+@@ -1093,7 +1056,6 @@ public class LevelChunk extends ChunkAccess {
+ this.upgradeData.upgrade(this);
+ } finally { // Paper start - replace chunk loader system
+ this.isPostProcessingDone = true;
+- this.level.getChunkSource().chunkMap.playerChunkManager.onChunkPostProcessing(this.chunkPos.x, this.chunkPos.z);
+ }
+ // Paper end - replace chunk loader system
+ }
+diff --git a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
+index 4cb0307935aa63d44aac55c80ee50be074d7913c..d33476ffa49d7f6388bb227f8a57cf115a74698f 100644
+--- a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
++++ b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
+@@ -2257,12 +2257,12 @@ public class CraftWorld extends CraftRegionAccessor implements World {
+ // Spigot start
+ @Override
+ public int getViewDistance() {
+- return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance(); // Paper - replace old player chunk management
++ return this.getHandle().playerChunkLoader.getAPIViewDistance(); // Paper - replace player chunk loader
+ }
+
+ @Override
+ public int getSimulationDistance() {
+- return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance(); // Paper - replace old player chunk management
++ return this.getHandle().playerChunkLoader.getAPITickDistance(); // Paper - replace player chunk loader
+ }
+ // Spigot end
+ // Paper start - view distance api
+@@ -2296,12 +2296,12 @@ public class CraftWorld extends CraftRegionAccessor implements World {
+
+ @Override
+ public int getSendViewDistance() {
+- return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance();
++ return this.getHandle().playerChunkLoader.getAPISendViewDistance(); // Paper - replace player chunk loader
+ }
+
+ @Override
+ public void setSendViewDistance(int viewDistance) {
+- getHandle().getChunkSource().chunkMap.playerChunkManager.setSendDistance(viewDistance);
++ this.getHandle().chunkSource.chunkMap.setSendViewDistance(viewDistance); // Paper - replace player chunk loader
+ }
+ // Paper end - view distance api
+
+diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java
+index 7c43de6ad6bd7259c6bcb2a55e312e8abfcf546b..0351eb67bac6ce257f820af60aa3bba9f45da687 100644
+--- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java
++++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java
+@@ -188,44 +188,22 @@ public class CraftPlayer extends CraftHumanEntity implements Player {
+ // Paper start - implement view distances
+ @Override
+ public int getViewDistance() {
+- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
+- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
+- if (data == null) {
+- return chunkMap.playerChunkManager.getTargetNoTickViewDistance();
+- }
+- return data.getTargetNoTickViewDistance();
++ return io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.getAPIViewDistance(this);
+ }
+
+ @Override
+ public void setViewDistance(int viewDistance) {
+- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
+- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
+- if (data == null) {
+- throw new IllegalStateException("Player is not attached to world");
+- }
+-
+- data.setTargetNoTickViewDistance(viewDistance);
++ this.getHandle().setLoadViewDistance(viewDistance < 0 ? viewDistance : viewDistance + 1);
+ }
+
+ @Override
+ public int getSimulationDistance() {
+- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
+- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
+- if (data == null) {
+- return chunkMap.playerChunkManager.getTargetTickViewDistance();
+- }
+- return data.getTargetTickViewDistance();
++ return io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.getAPITickViewDistance(this);
+ }
+
+ @Override
+ public void setSimulationDistance(int simulationDistance) {
+- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
+- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
+- if (data == null) {
+- throw new IllegalStateException("Player is not attached to world");
+- }
+-
+- data.setTargetTickViewDistance(simulationDistance);
++ this.getHandle().setTickViewDistance(simulationDistance);
+ }
+
+ @Override
+@@ -240,23 +218,12 @@ public class CraftPlayer extends CraftHumanEntity implements Player {
+
+ @Override
+ public int getSendViewDistance() {
+- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
+- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
+- if (data == null) {
+- return chunkMap.playerChunkManager.getTargetSendDistance();
+- }
+- return data.getTargetSendViewDistance();
++ return io.papermc.paper.chunk.system.RegionisedPlayerChunkLoader.getAPISendViewDistance(this);
+ }
+
+ @Override
+ public void setSendViewDistance(int viewDistance) {
+- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
+- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
+- if (data == null) {
+- throw new IllegalStateException("Player is not attached to world");
+- }
+-
+- data.setTargetSendViewDistance(viewDistance);
++ this.getHandle().setSendViewDistance(viewDistance);
+ }
+ // Paper end - implement view distances
+
diff --git a/patches/server/0003-Make-ChunkStatus.EMPTY-not-rely-on-the-main-thread-f.patch b/patches/server/0003-Make-ChunkStatus.EMPTY-not-rely-on-the-main-thread-f.patch
new file mode 100644
index 0000000..0238da0
--- /dev/null
+++ b/patches/server/0003-Make-ChunkStatus.EMPTY-not-rely-on-the-main-thread-f.patch
@@ -0,0 +1,395 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Spottedleaf
+Date: Thu, 16 Feb 2023 16:50:05 -0800
+Subject: [PATCH] Make ChunkStatus.EMPTY not rely on the main thread for
+ completion
+
+In order to do this, we need to push the POI consistency checks
+to a later status. Since FULL is the only other status that
+uses the main thread, it can go there.
+
+The consistency checks are only really for when a desync occurs,
+and so that delaying the check only matters when the chunk data
+has desync'd. As long as the desync is sorted before the
+chunk is full loaded (i.e before setBlock can occur on
+a chunk), it should not matter.
+
+This change is primarily due to behavioural changes
+in the chunk task queue brought by region threading -
+which is to split the queue into separate regions. As such,
+it is required that in order for the sync load to complete
+that the region owning the chunk drain and execute the task
+while ticking. However, that is not always possible in
+region threading. Thus, removing the main thread reliance allows
+the chunk to progress without requiring a tick thread.
+Specifically, this allows far sync loads (outside of a specific
+regions bounds) to occur without issue - namely with structure
+searching.
+
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
+index fb42d776f15f735fb59e972e00e2b512c23a8387..300700477ee34bc22b31315825c0e40f61070cd5 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
+@@ -2,6 +2,8 @@ package io.papermc.paper.chunk.system.scheduling;
+
+ import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
+ import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.poi.PoiChunk;
+ import net.minecraft.server.level.ChunkMap;
+ import net.minecraft.server.level.ServerLevel;
+ import net.minecraft.world.level.chunk.ChunkAccess;
+@@ -9,10 +11,13 @@ import net.minecraft.world.level.chunk.ChunkStatus;
+ import net.minecraft.world.level.chunk.ImposterProtoChunk;
+ import net.minecraft.world.level.chunk.LevelChunk;
+ import net.minecraft.world.level.chunk.ProtoChunk;
++import org.slf4j.Logger;
+ import java.lang.invoke.VarHandle;
+
+ public final class ChunkFullTask extends ChunkProgressionTask implements Runnable {
+
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
+ protected final NewChunkHolder chunkHolder;
+ protected final ChunkAccess fromChunk;
+ protected final PrioritisedExecutor.PrioritisedTask convertToFullTask;
+@@ -35,6 +40,15 @@ public final class ChunkFullTask extends ChunkProgressionTask implements Runnabl
+ // See Vanilla protoChunkToFullChunk for what this function should be doing
+ final LevelChunk chunk;
+ try {
++ // moved from the load from nbt stage into here
++ final PoiChunk poiChunk = this.chunkHolder.getPoiChunk();
++ if (poiChunk == null) {
++ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
++ } else {
++ poiChunk.load();
++ this.world.getPoiManager().checkConsistency(this.fromChunk);
++ }
++
+ if (this.fromChunk instanceof ImposterProtoChunk wrappedFull) {
+ chunk = wrappedFull.getWrapped();
+ } else {
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
+index 3df793f7e6bb67f40e7387a72fdafb912a7b1373..31657c387156f789d5c04ad3413d049bc32f1359 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
+@@ -25,6 +25,7 @@ import org.slf4j.Logger;
+ import java.lang.invoke.VarHandle;
+ import java.util.Map;
+ import java.util.concurrent.atomic.AtomicInteger;
++import java.util.concurrent.atomic.AtomicLong;
+ import java.util.function.Consumer;
+
+ public final class ChunkLoadTask extends ChunkProgressionTask {
+@@ -34,9 +35,11 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+ private final NewChunkHolder chunkHolder;
+ private final ChunkDataLoadTask loadTask;
+
+- private boolean cancelled;
++ private volatile boolean cancelled;
+ private NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
+ private NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
++ private GenericDataLoadTask.TaskResult loadResult;
++ private final AtomicInteger taskCountToComplete = new AtomicInteger(3); // one for poi, one for entity, and one for chunk data
+
+ protected ChunkLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
+ final NewChunkHolder chunkHolder, final PrioritisedExecutor.Priority priority) {
+@@ -44,10 +47,18 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+ this.chunkHolder = chunkHolder;
+ this.loadTask = new ChunkDataLoadTask(scheduler, world, chunkX, chunkZ, priority);
+ this.loadTask.addCallback((final GenericDataLoadTask.TaskResult result) -> {
+- ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right());
++ ChunkLoadTask.this.loadResult = result; // must be before getAndDecrement
++ ChunkLoadTask.this.tryCompleteLoad();
+ });
+ }
+
++ private void tryCompleteLoad() {
++ if (this.taskCountToComplete.decrementAndGet() == 0) {
++ final GenericDataLoadTask.TaskResult result = this.cancelled ? null : this.loadResult; // only after the getAndDecrement
++ ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right());
++ }
++ }
++
+ @Override
+ public ChunkStatus getTargetStatus() {
+ return ChunkStatus.EMPTY;
+@@ -65,11 +76,8 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+ final NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
+ final NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
+
+- final AtomicInteger count = new AtomicInteger();
+ final Consumer> scheduleLoadTask = (final GenericDataLoadTask.TaskResult, ?> result) -> {
+- if (count.decrementAndGet() == 0) {
+- ChunkLoadTask.this.loadTask.schedule(false);
+- }
++ ChunkLoadTask.this.tryCompleteLoad();
+ };
+
+ // NOTE: it is IMPOSSIBLE for getOrLoadEntityData/getOrLoadPoiData to complete synchronously, because
+@@ -85,16 +93,16 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+ }
+ if (!this.chunkHolder.isEntityChunkNBTLoaded()) {
+ entityLoadTask = this.chunkHolder.getOrLoadEntityData((Consumer)scheduleLoadTask);
+- count.setPlain(count.getPlain() + 1);
+ } else {
+ entityLoadTask = null;
++ this.taskCountToComplete.getAndDecrement(); // we know the chunk load is not done here, as it is not scheduled
+ }
+
+ if (!this.chunkHolder.isPoiChunkLoaded()) {
+ poiLoadTask = this.chunkHolder.getOrLoadPoiData((Consumer)scheduleLoadTask);
+- count.setPlain(count.getPlain() + 1);
+ } else {
+ poiLoadTask = null;
++ this.taskCountToComplete.getAndDecrement(); // we know the chunk load is not done here, as it is not scheduled
+ }
+
+ this.entityLoadTask = entityLoadTask;
+@@ -107,14 +115,11 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+ entityLoadTask.schedule();
+ }
+
+- if (poiLoadTask != null) {
++ if (poiLoadTask != null) {
+ poiLoadTask.schedule();
+ }
+
+- if (entityLoadTask == null && poiLoadTask == null) {
+- // no need to wait on those, we can schedule now
+- this.loadTask.schedule(false);
+- }
++ this.loadTask.schedule(false);
+ }
+
+ @Override
+@@ -129,15 +134,20 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+
+ /*
+ Note: The entityLoadTask/poiLoadTask do not complete when cancelled,
+- but this is fine because if they are successfully cancelled then
+- we will successfully cancel the load task, which will complete when cancelled
++ so we need to manually try to complete in those cases
++ It is also important to note that we set the cancelled field first, just in case
++ the chunk load task attempts to complete with a non-null value
+ */
+
+ if (this.entityLoadTask != null) {
+- this.entityLoadTask.cancel();
++ if (this.entityLoadTask.cancel()) {
++ this.tryCompleteLoad();
++ }
+ }
+ if (this.poiLoadTask != null) {
+- this.poiLoadTask.cancel();
++ if (this.poiLoadTask.cancel()) {
++ this.tryCompleteLoad();
++ }
+ }
+ this.loadTask.cancel();
+ }
+@@ -249,7 +259,7 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+ }
+ }
+
+- public final class ChunkDataLoadTask extends CallbackDataLoadTask {
++ public static final class ChunkDataLoadTask extends CallbackDataLoadTask {
+ protected ChunkDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
+ final int chunkZ, final PrioritisedExecutor.Priority priority) {
+ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.CHUNK_DATA, priority);
+@@ -262,7 +272,7 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+
+ @Override
+ protected boolean hasOnMain() {
+- return true;
++ return false;
+ }
+
+ @Override
+@@ -272,35 +282,30 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+
+ @Override
+ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
+- return this.scheduler.createChunkTask(this.chunkX, this.chunkZ, run, priority);
++ throw new UnsupportedOperationException();
+ }
+
+ @Override
+- protected TaskResult completeOnMainOffMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
+- if (data != null) {
+- return null;
+- }
+-
+- final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
+- if (poiChunk == null) {
+- LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
+- } else if (!poiChunk.isLoaded()) {
+- // need to call poiChunk.load() on main
+- return null;
+- }
++ protected TaskResult completeOnMainOffMain(final ChunkAccess data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
++ }
+
+- return new TaskResult<>(this.getEmptyChunk(), null);
++ private ProtoChunk getEmptyChunk() {
++ return new ProtoChunk(
++ new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world,
++ this.world.registryAccess().registryOrThrow(Registries.BIOME), (BlendingData)null
++ );
+ }
+
+ @Override
+- protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) {
++ protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) {
+ if (throwable != null) {
+ LOGGER.error("Failed to load chunk data for task: " + this.toString() + ", chunk data will be lost", throwable);
+- return new TaskResult<>(null, null);
++ return new TaskResult<>(this.getEmptyChunk(), null);
+ }
+
+ if (data == null) {
+- return new TaskResult<>(null, null);
++ return new TaskResult<>(this.getEmptyChunk(), null);
+ }
+
+ // need to convert data, and then deserialize it
+@@ -319,53 +324,18 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
+ this.world, chunkMap.getPoiManager(), chunkPos, converted, true
+ );
+
+- return new TaskResult<>(chunkHolder, null);
++ return new TaskResult<>(chunkHolder.protoChunk, null);
+ } catch (final ThreadDeath death) {
+ throw death;
+ } catch (final Throwable thr2) {
+ LOGGER.error("Failed to parse chunk data for task: " + this.toString() + ", chunk data will be lost", thr2);
+- return new TaskResult<>(null, thr2);
++ return new TaskResult<>(this.getEmptyChunk(), thr2);
+ }
+ }
+
+- private ProtoChunk getEmptyChunk() {
+- return new ProtoChunk(
+- new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world,
+- this.world.registryAccess().registryOrThrow(Registries.BIOME), (BlendingData)null
+- );
+- }
+-
+ @Override
+- protected TaskResult runOnMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
+- final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
+- if (poiChunk == null) {
+- LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
+- } else {
+- poiChunk.load();
+- }
+-
+- if (data == null || data.protoChunk == null) {
+- // throwable could be non-null, but the off-main task will print its exceptions - so we don't need to care,
+- // it's handled already
+-
+- return new TaskResult<>(this.getEmptyChunk(), null);
+- }
+-
+- // have tasks to run (at this point, it's just the POI consistency checking)
+- try {
+- if (data.tasks != null) {
+- for (int i = 0, len = data.tasks.size(); i < len; ++i) {
+- data.tasks.poll().run();
+- }
+- }
+-
+- return new TaskResult<>(data.protoChunk, null);
+- } catch (final ThreadDeath death) {
+- throw death;
+- } catch (final Throwable thr2) {
+- LOGGER.error("Failed to parse main tasks for task " + this.toString() + ", chunk data will be lost", thr2);
+- return new TaskResult<>(this.getEmptyChunk(), null);
+- }
++ protected TaskResult runOnMain(final ChunkAccess data, final Throwable throwable) {
++ throw new UnsupportedOperationException();
+ }
+ }
+
+diff --git a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java
+index 8950b220b9a3512cd4667beb7bdec0e82e07edc6..9be85eb0abec02bc0e0eded71c34ab1c565c63e7 100644
+--- a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java
++++ b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java
+@@ -328,6 +328,12 @@ public class PoiManager extends SectionStorage {
+ }
+ }
+ }
++
++ public void checkConsistency(net.minecraft.world.level.chunk.ChunkAccess chunk) {
++ for (LevelChunkSection section : chunk.getSections()) {
++ this.checkConsistencyWithBlocks(chunk.getPos(), section);
++ }
++ }
+ // Paper end - rewrite chunk system
+
+ public void checkConsistencyWithBlocks(ChunkPos chunkPos, LevelChunkSection chunkSection) {
+diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
+index d4c4d37bcef14e392739d9aae9e20b7d69b05c12..256642f2e2aa66f7e8c00cae91a75060a8817c9c 100644
+--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
++++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
+@@ -122,13 +122,11 @@ public class ChunkSerializer {
+ public static final class InProgressChunkHolder {
+
+ public final ProtoChunk protoChunk;
+- public final java.util.ArrayDeque tasks;
+
+ public CompoundTag poiData;
+
+- public InProgressChunkHolder(final ProtoChunk protoChunk, final java.util.ArrayDeque tasks) {
++ public InProgressChunkHolder(final ProtoChunk protoChunk) {
+ this.protoChunk = protoChunk;
+- this.tasks = tasks;
+ }
+ }
+ // Paper end
+@@ -136,7 +134,6 @@ public class ChunkSerializer {
+ public static ProtoChunk read(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt) {
+ // Paper start - add variant for async calls
+ InProgressChunkHolder holder = loadChunk(world, poiStorage, chunkPos, nbt, true);
+- holder.tasks.forEach(Runnable::run);
+ return holder.protoChunk;
+ }
+
+@@ -145,7 +142,6 @@ public class ChunkSerializer {
+ private static final boolean JUST_CORRUPT_IT = Boolean.getBoolean("Paper.ignoreWorldDataVersion");
+ // Paper end
+ public static InProgressChunkHolder loadChunk(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt, boolean distinguish) {
+- java.util.ArrayDeque tasksToExecuteOnMain = new java.util.ArrayDeque<>();
+ // Paper end
+ // Paper start - Do NOT attempt to load chunks saved with newer versions
+ if (nbt.contains("DataVersion", 99)) {
+@@ -223,9 +219,7 @@ public class ChunkSerializer {
+ LevelChunkSection chunksection = new LevelChunkSection(b0, datapaletteblock, (PalettedContainer) object); // CraftBukkit - read/write
+
+ achunksection[k] = chunksection;
+- tasksToExecuteOnMain.add(() -> { // Paper - delay this task since we're executing off-main
+- poiStorage.checkConsistencyWithBlocks(chunkPos, chunksection);
+- }); // Paper - delay this task since we're executing off-main
++ // Paper - rewrite chunk system - moved to final load stage
+ }
+
+ boolean flag3 = nbttagcompound1.contains("BlockLight", 7);
+@@ -403,7 +397,7 @@ public class ChunkSerializer {
+ }
+
+ if (chunkstatus_type == ChunkStatus.ChunkType.LEVELCHUNK) {
+- return new InProgressChunkHolder(new ImposterProtoChunk((LevelChunk) object1, false), tasksToExecuteOnMain); // Paper - Async chunk loading
++ return new InProgressChunkHolder(new ImposterProtoChunk((LevelChunk) object1, false)); // Paper - Async chunk loading
+ } else {
+ ProtoChunk protochunk1 = (ProtoChunk) object1;
+
+@@ -446,7 +440,7 @@ public class ChunkSerializer {
+ protochunk1.setCarvingMask(worldgenstage_features, new CarvingMask(nbttagcompound4.getLongArray(s1), ((ChunkAccess) object1).getMinBuildHeight()));
+ }
+
+- return new InProgressChunkHolder(protochunk1, tasksToExecuteOnMain); // Paper - Async chunk loading
++ return new InProgressChunkHolder(protochunk1); // Paper - Async chunk loading
+ }
+ }
+
diff --git a/patches/server/0004-Threaded-Regions.patch b/patches/server/0004-Threaded-Regions.patch
new file mode 100644
index 0000000..6d27f0f
--- /dev/null
+++ b/patches/server/0004-Threaded-Regions.patch
@@ -0,0 +1,20522 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Spottedleaf
+Date: Sun, 2 Oct 2022 21:28:53 -0700
+Subject: [PATCH] Threaded Regions
+
+Connection thread-safety fixes
+
+- send packet
+- pending addition
+
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java b/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java
+index f4415f782b32fed25da98e44b172f717c4d46e34..ba7c24b3627a1827721d2462add15fdd4adbed90 100644
+--- a/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java
++++ b/src/main/java/ca/spottedleaf/concurrentutil/collection/MultiThreadedQueue.java
+@@ -392,6 +392,24 @@ public class MultiThreadedQueue implements Queue {
+ }
+ }
+
++ /**
++ * Returns whether this queue is currently add-blocked. That is, whether {@link #add(Object)} and friends will return {@code false}.
++ */
++ public boolean isAddBlocked() {
++ for (LinkedNode tail = this.getTailOpaque();;) {
++ LinkedNode next = tail.getNextVolatile();
++ if (next == null) {
++ return false;
++ }
++
++ if (next == tail) {
++ return true;
++ }
++
++ tail = next;
++ }
++ }
++
+ /**
+ * Atomically removes the head from this queue if it exists, otherwise prevents additions to this queue if no
+ * head is removed.
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/ImproveReentrantLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/ImproveReentrantLock.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..9df9881396f4a69b51acaae562b12b8ce0a48443
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/lock/ImproveReentrantLock.java
+@@ -0,0 +1,139 @@
++package ca.spottedleaf.concurrentutil.lock;
++
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import java.lang.invoke.VarHandle;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.locks.AbstractQueuedSynchronizer;
++import java.util.concurrent.locks.Condition;
++import java.util.concurrent.locks.Lock;
++
++/**
++ * Implementation of {@link Lock} that should outperform {@link java.util.concurrent.locks.ReentrantLock}.
++ * The lock is considered a non-fair lock, as specified by {@link java.util.concurrent.locks.ReentrantLock},
++ * and additionally does not support the creation of Conditions.
++ *
++ *
++ * Specifically, this implementation is careful to avoid synchronisation penalties when multi-acquiring and
++ * multi-releasing locks from the same thread, and additionally avoids unnecessary synchronisation penalties
++ * when releasing the lock.
++ *
++ */
++public class ImproveReentrantLock implements Lock {
++
++ private final InternalLock lock = new InternalLock();
++
++ private static final class InternalLock extends AbstractQueuedSynchronizer {
++
++ private volatile Thread owner;
++ private static final VarHandle OWNER_HANDLE = ConcurrentUtil.getVarHandle(InternalLock.class, "owner", Thread.class);
++ private int count;
++
++ private Thread getOwnerPlain() {
++ return (Thread)OWNER_HANDLE.get(this);
++ }
++
++ private Thread getOwnerVolatile() {
++ return (Thread)OWNER_HANDLE.getVolatile(this);
++ }
++
++ private void setOwnerRelease(final Thread to) {
++ OWNER_HANDLE.setRelease(this, to);
++ }
++
++ private void setOwnerVolatile(final Thread to) {
++ OWNER_HANDLE.setVolatile(this, to);
++ }
++
++ private Thread compareAndExchangeOwnerVolatile(final Thread expect, final Thread update) {
++ return (Thread)OWNER_HANDLE.compareAndExchange(this, expect, update);
++ }
++
++ @Override
++ protected final boolean tryAcquire(int acquires) {
++ final Thread current = Thread.currentThread();
++ final Thread owner = this.getOwnerVolatile();
++
++ // When trying to blind acquire the lock, using just compare and exchange is faster
++ // than reading the owner field first - but comes at the cost of performing the compare and exchange
++ // even if the current thread owns the lock
++ if ((owner == null && null == this.compareAndExchangeOwnerVolatile(null, current)) || owner == current) {
++ this.count += acquires;
++ return true;
++ }
++
++ return false;
++ }
++
++ @Override
++ protected final boolean tryRelease(int releases) {
++ if (this.getOwnerPlain() == Thread.currentThread()) {
++ final int newCount = this.count -= releases;
++ if (newCount == 0) {
++ // When the caller, which is release(), attempts to signal the next node, it will use volatile
++ // to retrieve the node and status.
++ // Let's say that we have written this field null as release, and then checked for a next node
++ // using volatile and then determined there are no waiters.
++ // While a call to tryAcquire() can fail for another thread since the write may not
++ // publish yet, once the thread adds itself to the waiters list it will synchronise with
++ // the write to the field, since the volatile write to put the thread on the waiter list
++ // will synchronise with the volatile read we did earlier to check for any
++ // waiters.
++ this.setOwnerRelease(null);
++ return true;
++ }
++ return false;
++ }
++ throw new IllegalMonitorStateException();
++ }
++ }
++
++ /**
++ * Returns the thread that owns the lock, or returns {@code null} if there is no such thread.
++ */
++ public Thread getLockOwner() {
++ return this.lock.getOwnerVolatile();
++ }
++
++ /**
++ * Returns whether the current thread owns the lock.
++ */
++ public boolean isHeldByCurrentThread() {
++ return this.lock.getOwnerPlain() == Thread.currentThread();
++ }
++
++ @Override
++ public void lock() {
++ this.lock.acquire(1);
++ }
++
++ @Override
++ public void lockInterruptibly() throws InterruptedException {
++ if (Thread.interrupted()) {
++ throw new InterruptedException();
++ }
++ this.lock.acquireInterruptibly(1);
++ }
++
++ @Override
++ public boolean tryLock() {
++ return this.lock.tryAcquire(1);
++ }
++
++ @Override
++ public boolean tryLock(final long time, final TimeUnit unit) throws InterruptedException {
++ if (Thread.interrupted()) {
++ throw new InterruptedException();
++ }
++ return this.lock.tryAcquire(1) || this.lock.tryAcquireNanos(1, unit.toNanos(time));
++ }
++
++ @Override
++ public void unlock() {
++ this.lock.release(1);
++ }
++
++ @Override
++ public Condition newCondition() {
++ throw new UnsupportedOperationException();
++ }
++}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/RBLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/RBLock.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..793a7326141b7d83395585b3d32b0a7e8a6238a7
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/lock/RBLock.java
+@@ -0,0 +1,303 @@
++package ca.spottedleaf.concurrentutil.lock;
++
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import java.lang.invoke.VarHandle;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.locks.Condition;
++import java.util.concurrent.locks.Lock;
++import java.util.concurrent.locks.LockSupport;
++
++// ReentrantBiasedLock
++public final class RBLock implements Lock {
++
++ private volatile LockWaiter owner;
++ private static final VarHandle OWNER_HANDLE = ConcurrentUtil.getVarHandle(RBLock.class, "owner", LockWaiter.class);
++
++ private volatile LockWaiter tail;
++ private static final VarHandle TAIL_HANDLE = ConcurrentUtil.getVarHandle(RBLock.class, "tail", LockWaiter.class);
++
++ public RBLock() {
++ // we can have the initial state as if it was locked by this thread, then unlocked
++ final LockWaiter dummy = new LockWaiter(null, LockWaiter.STATE_BIASED, null);
++ this.setOwnerPlain(dummy);
++ // release ensures correct publishing
++ this.setTailRelease(dummy);
++ }
++
++ private LockWaiter getOwnerVolatile() {
++ return (LockWaiter)OWNER_HANDLE.getVolatile(this);
++ }
++
++ private void setOwnerPlain(final LockWaiter value) {
++ OWNER_HANDLE.set(this, value);
++ }
++
++ private void setOwnerRelease(final LockWaiter value) {
++ OWNER_HANDLE.setRelease(this, value);
++ }
++
++
++
++ private void setTailOpaque(final LockWaiter newTail) {
++ TAIL_HANDLE.setOpaque(this, newTail);
++ }
++
++ private void setTailRelease(final LockWaiter newTail) {
++ TAIL_HANDLE.setRelease(this, newTail);
++ }
++
++ private LockWaiter getTailOpaque() {
++ return (LockWaiter)TAIL_HANDLE.getOpaque(this);
++ }
++
++
++ private void appendWaiter(final LockWaiter waiter) {
++ // Similar to MultiThreadedQueue#appendList
++ int failures = 0;
++
++ for (LockWaiter currTail = this.getTailOpaque(), curr = currTail;;) {
++ /* It has been experimentally shown that placing the read before the backoff results in significantly greater performance */
++ /* It is likely due to a cache miss caused by another write to the next field */
++ final LockWaiter next = curr.getNextVolatile();
++
++ for (int i = 0; i < failures; ++i) {
++ Thread.onSpinWait();
++ }
++
++ if (next == null) {
++ final LockWaiter compared = curr.compareAndExchangeNextVolatile(null, waiter);
++
++ if (compared == null) {
++ /* Added */
++ /* Avoid CASing on tail more than we need to */
++ /* CAS to avoid setting an out-of-date tail */
++ if (this.getTailOpaque() == currTail) {
++ this.setTailOpaque(waiter);
++ }
++ return;
++ }
++
++ ++failures;
++ curr = compared;
++ continue;
++ }
++
++ if (curr == currTail) {
++ /* Tail is likely not up-to-date */
++ curr = next;
++ } else {
++ /* Try to update to tail */
++ if (currTail == (currTail = this.getTailOpaque())) {
++ curr = next;
++ } else {
++ curr = currTail;
++ }
++ }
++ }
++ }
++
++ // required that expected is already appended to the wait chain
++ private boolean tryAcquireBiased(final LockWaiter expected) {
++ final LockWaiter owner = this.getOwnerVolatile();
++ if (owner.getNextVolatile() == expected && owner.getStateVolatile() == LockWaiter.STATE_BIASED) {
++ this.setOwnerRelease(expected);
++ return true;
++ }
++ return false;
++ }
++
++ @Override
++ public void lock() {
++ final Thread currThread = Thread.currentThread();
++ final LockWaiter owner = this.getOwnerVolatile();
++
++ // try to fast acquire
++
++ final LockWaiter acquireObj;
++ boolean needAppend = true;
++
++ if (owner.getNextVolatile() != null) {
++ // unlikely we are able to fast acquire
++ acquireObj = new LockWaiter(currThread, 1, null);
++ } else {
++ // may be able to fast acquire the lock
++ if (owner.owner == currThread) {
++ final int oldState = owner.incrementState();
++ if (oldState == LockWaiter.STATE_BIASED) {
++ // in this case, we may not have the lock.
++ final LockWaiter next = owner.getNextVolatile();
++ if (next == null) {
++ // we win the lock
++ return;
++ } else {
++ // we have incremented the state, which means any tryAcquireBiased() will fail.
++ // The next waiter may be waiting for us, so we need to re-set our state and then
++ // try to push the lock to them.
++ // We cannot simply claim ownership of the lock, since we don't know if the next waiter saw
++ // the biased state
++ owner.setStateRelease(LockWaiter.STATE_BIASED);
++ LockSupport.unpark(next.owner);
++
++ acquireObj = new LockWaiter(currThread, 1, null);
++ // fall through to slower lock logic
++ }
++ } else {
++ // we already have the lock
++ return;
++ }
++ } else {
++ acquireObj = new LockWaiter(currThread, 1, null);
++ if (owner.getStateVolatile() == LockWaiter.STATE_BIASED) {
++ // we may be able to quickly acquire the lock
++ if (owner.getNextVolatile() == null && null == owner.compareAndExchangeNextVolatile(null, acquireObj)) {
++ if (owner.getStateVolatile() == LockWaiter.STATE_BIASED) {
++ this.setOwnerRelease(acquireObj);
++ return;
++ } else {
++ needAppend = false;
++ // we failed to acquire, but we can block instead - we did CAS to the next immediate owner
++ }
++ }
++ } // else: fall through to append and wait code
++ }
++ }
++
++ if (needAppend) {
++ this.appendWaiter(acquireObj); // append to end of waiters
++ }
++
++ // failed to fast acquire, so now we may need to block
++ final int spinAttempts = 10;
++ for (int i = 0; i < spinAttempts; ++i) {
++ for (int k = 0; k <= i; ++i) {
++ Thread.onSpinWait();
++ }
++ if (this.tryAcquireBiased(acquireObj)) {
++ // acquired
++ return;
++ }
++ }
++
++ // slow acquire
++ while (!this.tryAcquireBiased(acquireObj)) {
++ LockSupport.park(this);
++ }
++ }
++
++ /**
++ * {@inheritDoc}
++ * @throws IllegalMonitorStateException If the current thread does not own the lock.
++ */
++ @Override
++ public void unlock() {
++ final LockWaiter owner = this.getOwnerVolatile();
++
++ final int oldState;
++ if (owner.owner != Thread.currentThread() || (oldState = owner.getStatePlain()) <= 0) {
++ throw new IllegalMonitorStateException();
++ }
++
++ owner.setStateRelease(oldState - 1);
++
++ if (oldState != 1) {
++ return;
++ }
++
++ final LockWaiter next = owner.getNextVolatile();
++
++ if (next == null) {
++ // we can leave the lock in biased state, which will save a CAS
++ return;
++ }
++
++ // we have TWO cases:
++ // waiter saw the lock in biased state
++ // waiter did not see the lock in biased state
++ // the problem is that if the waiter saw the lock in the biased state, then it now owns the lock. but if it did not,
++ // then we still own the lock.
++
++ // However, by unparking always, the waiter will try to acquire the biased lock from us.
++ LockSupport.unpark(next.owner);
++ }
++
++ @Override
++ public void lockInterruptibly() throws InterruptedException {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ public boolean tryLock() {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
++ throw new UnsupportedOperationException();
++ }
++
++ @Override
++ public Condition newCondition() {
++ throw new UnsupportedOperationException();
++ }
++
++ static final class LockWaiter {
++
++ static final int STATE_BIASED = 0;
++
++ private volatile LockWaiter next;
++ private volatile int state;
++ private Thread owner;
++
++ private static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(LockWaiter.class, "next", LockWaiter.class);
++ private static final VarHandle STATE_HANDLE = ConcurrentUtil.getVarHandle(LockWaiter.class, "state", int.class);
++
++
++ private LockWaiter compareAndExchangeNextVolatile(final LockWaiter expect, final LockWaiter update) {
++ return (LockWaiter)NEXT_HANDLE.compareAndExchange((LockWaiter)this, expect, update);
++ }
++
++ private void setNextPlain(final LockWaiter next) {
++ NEXT_HANDLE.set((LockWaiter)this, next);
++ }
++
++ private LockWaiter getNextOpaque() {
++ return (LockWaiter)NEXT_HANDLE.getOpaque((LockWaiter)this);
++ }
++
++ private LockWaiter getNextVolatile() {
++ return (LockWaiter)NEXT_HANDLE.getVolatile((LockWaiter)this);
++ }
++
++
++
++ private int getStatePlain() {
++ return (int)STATE_HANDLE.get((LockWaiter)this);
++ }
++
++ private int getStateVolatile() {
++ return (int)STATE_HANDLE.getVolatile((LockWaiter)this);
++ }
++
++ private void setStatePlain(final int value) {
++ STATE_HANDLE.set((LockWaiter)this, value);
++ }
++
++ private void setStateRelease(final int value) {
++ STATE_HANDLE.setRelease((LockWaiter)this, value);
++ }
++
++ public LockWaiter(final Thread owner, final int initialState, final LockWaiter next) {
++ this.owner = owner;
++ this.setStatePlain(initialState);
++ this.setNextPlain(next);
++ }
++
++ public int incrementState() {
++ final int old = this.getStatePlain();
++ // Technically, we DO NOT need release for old != BIASED. But we care about optimising only for x86,
++ // which is a simple MOV for everything but volatile.
++ this.setStateRelease(old + 1);
++ return old;
++ }
++ }
++}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRInt2IntHashTable.java b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRInt2IntHashTable.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..7869cc177c95e26dd9e1d3db5b50e996956edb24
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRInt2IntHashTable.java
+@@ -0,0 +1,664 @@
++package ca.spottedleaf.concurrentutil.map;
++
++import ca.spottedleaf.concurrentutil.util.ArrayUtil;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import ca.spottedleaf.concurrentutil.util.Validate;
++import io.papermc.paper.util.IntegerUtil;
++import java.lang.invoke.VarHandle;
++import java.util.Arrays;
++import java.util.function.Consumer;
++import java.util.function.IntConsumer;
++
++public class SWMRInt2IntHashTable {
++
++ protected int size;
++
++ protected TableEntry[] table;
++
++ protected final float loadFactor;
++
++ protected static final VarHandle SIZE_HANDLE = ConcurrentUtil.getVarHandle(SWMRInt2IntHashTable.class, "size", int.class);
++ protected static final VarHandle TABLE_HANDLE = ConcurrentUtil.getVarHandle(SWMRInt2IntHashTable.class, "table", TableEntry[].class);
++
++ /* size */
++
++ protected final int getSizePlain() {
++ return (int)SIZE_HANDLE.get(this);
++ }
++
++ protected final int getSizeOpaque() {
++ return (int)SIZE_HANDLE.getOpaque(this);
++ }
++
++ protected final int getSizeAcquire() {
++ return (int)SIZE_HANDLE.getAcquire(this);
++ }
++
++ protected final void setSizePlain(final int value) {
++ SIZE_HANDLE.set(this, value);
++ }
++
++ protected final void setSizeOpaque(final int value) {
++ SIZE_HANDLE.setOpaque(this, value);
++ }
++
++ protected final void setSizeRelease(final int value) {
++ SIZE_HANDLE.setRelease(this, value);
++ }
++
++ /* table */
++
++ protected final TableEntry[] getTablePlain() {
++ //noinspection unchecked
++ return (TableEntry[])TABLE_HANDLE.get(this);
++ }
++
++ protected final TableEntry[] getTableAcquire() {
++ //noinspection unchecked
++ return (TableEntry[])TABLE_HANDLE.getAcquire(this);
++ }
++
++ protected final void setTablePlain(final TableEntry[] table) {
++ TABLE_HANDLE.set(this, table);
++ }
++
++ protected final void setTableRelease(final TableEntry[] table) {
++ TABLE_HANDLE.setRelease(this, table);
++ }
++
++ protected static final int DEFAULT_CAPACITY = 16;
++ protected static final float DEFAULT_LOAD_FACTOR = 0.75f;
++ protected static final int MAXIMUM_CAPACITY = Integer.MIN_VALUE >>> 1;
++
++ /**
++ * Constructs this map with a capacity of {@code 16} and load factor of {@code 0.75f}.
++ */
++ public SWMRInt2IntHashTable() {
++ this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR);
++ }
++
++ /**
++ * Constructs this map with the specified capacity and load factor of {@code 0.75f}.
++ * @param capacity specified initial capacity, > 0
++ */
++ public SWMRInt2IntHashTable(final int capacity) {
++ this(capacity, DEFAULT_LOAD_FACTOR);
++ }
++
++ /**
++ * Constructs this map with the specified capacity and load factor.
++ * @param capacity specified capacity, > 0
++ * @param loadFactor specified load factor, > 0 && finite
++ */
++ public SWMRInt2IntHashTable(final int capacity, final float loadFactor) {
++ final int tableSize = getCapacityFor(capacity);
++
++ if (loadFactor <= 0.0 || !Float.isFinite(loadFactor)) {
++ throw new IllegalArgumentException("Invalid load factor: " + loadFactor);
++ }
++
++ //noinspection unchecked
++ final TableEntry[] table = new TableEntry[tableSize];
++ this.setTablePlain(table);
++
++ if (tableSize == MAXIMUM_CAPACITY) {
++ this.threshold = -1;
++ } else {
++ this.threshold = getTargetCapacity(tableSize, loadFactor);
++ }
++
++ this.loadFactor = loadFactor;
++ }
++
++ /**
++ * Constructs this map with a capacity of {@code 16} or the specified map's size, whichever is larger, and
++ * with a load factor of {@code 0.75f}.
++ * All of the specified map's entries are copied into this map.
++ * @param other The specified map.
++ */
++ public SWMRInt2IntHashTable(final SWMRInt2IntHashTable other) {
++ this(DEFAULT_CAPACITY, DEFAULT_LOAD_FACTOR, other);
++ }
++
++ /**
++ * Constructs this map with a minimum capacity of the specified capacity or the specified map's size, whichever is larger, and
++ * with a load factor of {@code 0.75f}.
++ * All of the specified map's entries are copied into this map.
++ * @param capacity specified capacity, > 0
++ * @param other The specified map.
++ */
++ public SWMRInt2IntHashTable(final int capacity, final SWMRInt2IntHashTable other) {
++ this(capacity, DEFAULT_LOAD_FACTOR, other);
++ }
++
++ /**
++ * Constructs this map with a min capacity of the specified capacity or the specified map's size, whichever is larger, and
++ * with the specified load factor.
++ * All of the specified map's entries are copied into this map.
++ * @param capacity specified capacity, > 0
++ * @param loadFactor specified load factor, > 0 && finite
++ * @param other The specified map.
++ */
++ public SWMRInt2IntHashTable(final int capacity, final float loadFactor, final SWMRInt2IntHashTable other) {
++ this(Math.max(Validate.notNull(other, "Null map").size(), capacity), loadFactor);
++ this.putAll(other);
++ }
++
++ public final float getLoadFactor() {
++ return this.loadFactor;
++ }
++
++ protected static int getCapacityFor(final int capacity) {
++ if (capacity <= 0) {
++ throw new IllegalArgumentException("Invalid capacity: " + capacity);
++ }
++ if (capacity >= MAXIMUM_CAPACITY) {
++ return MAXIMUM_CAPACITY;
++ }
++ return IntegerUtil.roundCeilLog2(capacity);
++ }
++
++ /** Callers must still use acquire when reading the value of the entry. */
++ protected final TableEntry getEntryForOpaque(final int key) {
++ final int hash = SWMRInt2IntHashTable.getHash(key);
++ final TableEntry[] table = this.getTableAcquire();
++
++ for (TableEntry curr = ArrayUtil.getOpaque(table, hash & (table.length - 1)); curr != null; curr = curr.getNextOpaque()) {
++ if (key == curr.key) {
++ return curr;
++ }
++ }
++
++ return null;
++ }
++
++ protected final TableEntry getEntryForPlain(final int key) {
++ final int hash = SWMRInt2IntHashTable.getHash(key);
++ final TableEntry[] table = this.getTablePlain();
++
++ for (TableEntry curr = table[hash & (table.length - 1)]; curr != null; curr = curr.getNextPlain()) {
++ if (key == curr.key) {
++ return curr;
++ }
++ }
++
++ return null;
++ }
++
++ /* MT-Safe */
++
++ /** must be deterministic given a key */
++ protected static int getHash(final int key) {
++ return it.unimi.dsi.fastutil.HashCommon.mix(key);
++ }
++
++ // rets -1 if capacity*loadFactor is too large
++ protected static int getTargetCapacity(final int capacity, final float loadFactor) {
++ final double ret = (double)capacity * (double)loadFactor;
++ if (Double.isInfinite(ret) || ret >= ((double)Integer.MAX_VALUE)) {
++ return -1;
++ }
++
++ return (int)ret;
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ @Override
++ public boolean equals(final Object obj) {
++ if (this == obj) {
++ return true;
++ }
++ /* Make no attempt to deal with concurrent modifications */
++ if (!(obj instanceof SWMRInt2IntHashTable)) {
++ return false;
++ }
++ final SWMRInt2IntHashTable other = (SWMRInt2IntHashTable)obj;
++
++ if (this.size() != other.size()) {
++ return false;
++ }
++
++ final TableEntry[] table = this.getTableAcquire();
++
++ for (int i = 0, len = table.length; i < len; ++i) {
++ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ final int value = curr.getValueAcquire();
++
++ final int otherValue = other.get(curr.key);
++ if (value != otherValue) {
++ return false;
++ }
++ }
++ }
++
++ return true;
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ @Override
++ public int hashCode() {
++ /* Make no attempt to deal with concurrent modifications */
++ int hash = 0;
++ final TableEntry[] table = this.getTableAcquire();
++
++ for (int i = 0, len = table.length; i < len; ++i) {
++ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ hash += curr.hashCode();
++ }
++ }
++
++ return hash;
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ @Override
++ public String toString() {
++ final StringBuilder builder = new StringBuilder(64);
++ builder.append("SingleWriterMultiReaderHashMap:{");
++
++ this.forEach((final int key, final int value) -> {
++ builder.append("{key: \"").append(key).append("\", value: \"").append(value).append("\"}");
++ });
++
++ return builder.append('}').toString();
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ @Override
++ public SWMRInt2IntHashTable clone() {
++ return new SWMRInt2IntHashTable(this.getTableAcquire().length, this.loadFactor, this);
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public void forEach(final Consumer super SWMRInt2IntHashTable.TableEntry> action) {
++ Validate.notNull(action, "Null action");
++
++ final TableEntry[] table = this.getTableAcquire();
++ for (int i = 0, len = table.length; i < len; ++i) {
++ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ action.accept(curr);
++ }
++ }
++ }
++
++ @FunctionalInterface
++ public static interface BiIntIntConsumer {
++ public void accept(final int key, final int value);
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public void forEach(final BiIntIntConsumer action) {
++ Validate.notNull(action, "Null action");
++
++ final TableEntry[] table = this.getTableAcquire();
++ for (int i = 0, len = table.length; i < len; ++i) {
++ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ final int value = curr.getValueAcquire();
++
++ action.accept(curr.key, value);
++ }
++ }
++ }
++
++ /**
++ * Provides the specified consumer with all keys contained within this map.
++ * @param action The specified consumer.
++ */
++ public void forEachKey(final IntConsumer action) {
++ Validate.notNull(action, "Null action");
++
++ final TableEntry[] table = this.getTableAcquire();
++ for (int i = 0, len = table.length; i < len; ++i) {
++ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ action.accept(curr.key);
++ }
++ }
++ }
++
++ /**
++ * Provides the specified consumer with all values contained within this map. Equivalent to {@code map.values().forEach(Consumer)}.
++ * @param action The specified consumer.
++ */
++ public void forEachValue(final IntConsumer action) {
++ Validate.notNull(action, "Null action");
++
++ final TableEntry[] table = this.getTableAcquire();
++ for (int i = 0, len = table.length; i < len; ++i) {
++ for (TableEntry curr = ArrayUtil.getOpaque(table, i); curr != null; curr = curr.getNextOpaque()) {
++ final int value = curr.getValueAcquire();
++
++ action.accept(value);
++ }
++ }
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public int get(final int key) {
++ final TableEntry entry = this.getEntryForOpaque(key);
++ return entry == null ? 0 : entry.getValueAcquire();
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public boolean containsKey(final int key) {
++ final TableEntry entry = this.getEntryForOpaque(key);
++ return entry != null;
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public int getOrDefault(final int key, final int defaultValue) {
++ final TableEntry entry = this.getEntryForOpaque(key);
++
++ return entry == null ? defaultValue : entry.getValueAcquire();
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public int size() {
++ return this.getSizeAcquire();
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public boolean isEmpty() {
++ return this.getSizeAcquire() == 0;
++ }
++
++ /* Non-MT-Safe */
++
++ protected int threshold;
++
++ protected final void checkResize(final int minCapacity) {
++ if (minCapacity <= this.threshold || this.threshold < 0) {
++ return;
++ }
++
++ final TableEntry[] table = this.getTablePlain();
++ int newCapacity = minCapacity >= MAXIMUM_CAPACITY ? MAXIMUM_CAPACITY : IntegerUtil.roundCeilLog2(minCapacity);
++ if (newCapacity < 0) {
++ newCapacity = MAXIMUM_CAPACITY;
++ }
++ if (newCapacity <= table.length) {
++ if (newCapacity == MAXIMUM_CAPACITY) {
++ return;
++ }
++ newCapacity = table.length << 1;
++ }
++
++ //noinspection unchecked
++ final TableEntry[] newTable = new TableEntry[newCapacity];
++ final int indexMask = newCapacity - 1;
++
++ for (int i = 0, len = table.length; i < len; ++i) {
++ for (TableEntry entry = table[i]; entry != null; entry = entry.getNextPlain()) {
++ final int key = entry.key;
++ final int hash = SWMRInt2IntHashTable.getHash(key);
++ final int index = hash & indexMask;
++
++ /* we need to create a new entry since there could be reading threads */
++ final TableEntry insert = new TableEntry(key, entry.getValuePlain());
++
++ final TableEntry prev = newTable[index];
++
++ newTable[index] = insert;
++ insert.setNextPlain(prev);
++ }
++ }
++
++ if (newCapacity == MAXIMUM_CAPACITY) {
++ this.threshold = -1; /* No more resizing */
++ } else {
++ this.threshold = getTargetCapacity(newCapacity, this.loadFactor);
++ }
++ this.setTableRelease(newTable); /* use release to publish entries in table */
++ }
++
++ protected final int addToSize(final int num) {
++ final int newSize = this.getSizePlain() + num;
++
++ this.setSizeOpaque(newSize);
++ this.checkResize(newSize);
++
++ return newSize;
++ }
++
++ protected final int removeFromSize(final int num) {
++ final int newSize = this.getSizePlain() - num;
++
++ this.setSizeOpaque(newSize);
++
++ return newSize;
++ }
++
++ protected final int put(final int key, final int value, final boolean onlyIfAbsent) {
++ final TableEntry[] table = this.getTablePlain();
++ final int hash = SWMRInt2IntHashTable.getHash(key);
++ final int index = hash & (table.length - 1);
++
++ final TableEntry head = table[index];
++ if (head == null) {
++ final TableEntry insert = new TableEntry(key, value);
++ ArrayUtil.setRelease(table, index, insert);
++ this.addToSize(1);
++ return 0;
++ }
++
++ for (TableEntry curr = head;;) {
++ if (key == curr.key) {
++ if (onlyIfAbsent) {
++ return curr.getValuePlain();
++ }
++
++ final int currVal = curr.getValuePlain();
++ curr.setValueRelease(value);
++ return currVal;
++ }
++
++ final TableEntry next = curr.getNextPlain();
++ if (next != null) {
++ curr = next;
++ continue;
++ }
++
++ final TableEntry insert = new TableEntry(key, value);
++
++ curr.setNextRelease(insert);
++ this.addToSize(1);
++ return 0;
++ }
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public int put(final int key, final int value) {
++ return this.put(key, value, false);
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public int putIfAbsent(final int key, final int value) {
++ return this.put(key, value, true);
++ }
++
++ protected final int remove(final int key, final int hash) {
++ final TableEntry[] table = this.getTablePlain();
++ final int index = (table.length - 1) & hash;
++
++ final TableEntry head = table[index];
++ if (head == null) {
++ return 0;
++ }
++
++ if (head.key == key) {
++ ArrayUtil.setRelease(table, index, head.getNextPlain());
++ this.removeFromSize(1);
++
++ return head.getValuePlain();
++ }
++
++ for (TableEntry curr = head.getNextPlain(), prev = head; curr != null; prev = curr, curr = curr.getNextPlain()) {
++ if (key == curr.key) {
++ prev.setNextRelease(curr.getNextPlain());
++ this.removeFromSize(1);
++
++ return curr.getValuePlain();
++ }
++ }
++
++ return 0;
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public int remove(final int key) {
++ return this.remove(key, SWMRInt2IntHashTable.getHash(key));
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public void putAll(final SWMRInt2IntHashTable map) {
++ Validate.notNull(map, "Null map");
++
++ final int size = map.size();
++ this.checkResize(Math.max(this.getSizePlain() + size/2, size)); /* preemptively resize */
++ map.forEach(this::put);
++ }
++
++ /**
++ * {@inheritDoc}
++ *
++ * This call is non-atomic and the order that which entries are removed is undefined. The clear operation itself
++ * is release ordered, that is, after the clear operation is performed a release fence is performed.
++ *
++ */
++ public void clear() {
++ Arrays.fill(this.getTablePlain(), null);
++ this.setSizeRelease(0);
++ }
++
++ public static final class TableEntry {
++
++ protected final int key;
++ protected int value;
++
++ protected TableEntry next;
++
++ protected static final VarHandle VALUE_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "value", Object.class);
++ protected static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(TableEntry.class, "next", TableEntry.class);
++
++ /* value */
++
++ protected final int getValuePlain() {
++ //noinspection unchecked
++ return (int)VALUE_HANDLE.get(this);
++ }
++
++ protected final int getValueAcquire() {
++ //noinspection unchecked
++ return (int)VALUE_HANDLE.getAcquire(this);
++ }
++
++ protected final void setValueRelease(final int to) {
++ VALUE_HANDLE.setRelease(this, to);
++ }
++
++ /* next */
++
++ protected final TableEntry getNextPlain() {
++ //noinspection unchecked
++ return (TableEntry)NEXT_HANDLE.get(this);
++ }
++
++ protected final TableEntry getNextOpaque() {
++ //noinspection unchecked
++ return (TableEntry)NEXT_HANDLE.getOpaque(this);
++ }
++
++ protected final void setNextPlain(final TableEntry next) {
++ NEXT_HANDLE.set(this, next);
++ }
++
++ protected final void setNextRelease(final TableEntry next) {
++ NEXT_HANDLE.setRelease(this, next);
++ }
++
++ protected TableEntry(final int key, final int value) {
++ this.key = key;
++ this.value = value;
++ }
++
++ public int getKey() {
++ return this.key;
++ }
++
++ public int getValue() {
++ return this.getValueAcquire();
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ public int setValue(final int value) {
++ final int curr = this.getValuePlain();
++
++ this.setValueRelease(value);
++ return curr;
++ }
++
++ protected static int hash(final int key, final int value) {
++ return SWMRInt2IntHashTable.getHash(key) ^ SWMRInt2IntHashTable.getHash(value);
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ @Override
++ public int hashCode() {
++ return hash(this.key, this.getValueAcquire());
++ }
++
++ /**
++ * {@inheritDoc}
++ */
++ @Override
++ public boolean equals(final Object obj) {
++ if (this == obj) {
++ return true;
++ }
++
++ if (!(obj instanceof TableEntry)) {
++ return false;
++ }
++ final TableEntry other = (TableEntry)obj;
++ final int otherKey = other.getKey();
++ final int thisKey = this.getKey();
++ final int otherValue = other.getValueAcquire();
++ final int thisVal = this.getValueAcquire();
++ return (thisKey == otherKey) && (thisVal == otherValue);
++ }
++ }
++
++}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java
+index 1e98f778ffa0a7bb00ebccaaa8bde075183e41f0..aebe82cbe8bc20e5f4260a871d7b620e5092b2c9 100644
+--- a/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java
++++ b/src/main/java/ca/spottedleaf/concurrentutil/map/SWMRLong2ObjectHashTable.java
+@@ -534,6 +534,44 @@ public class SWMRLong2ObjectHashTable {
+ return null;
+ }
+
++ protected final V remove(final long key, final int hash, final V expect) {
++ final TableEntry[] table = this.getTablePlain();
++ final int index = (table.length - 1) & hash;
++
++ final TableEntry head = table[index];
++ if (head == null) {
++ return null;
++ }
++
++ if (head.key == key) {
++ final V val = head.value;
++ if (val == expect || val.equals(expect)) {
++ ArrayUtil.setRelease(table, index, head.getNextPlain());
++ this.removeFromSize(1);
++
++ return head.getValuePlain();
++ } else {
++ return null;
++ }
++ }
++
++ for (TableEntry curr = head.getNextPlain(), prev = head; curr != null; prev = curr, curr = curr.getNextPlain()) {
++ if (key == curr.key) {
++ final V val = curr.value;
++ if (val == expect || val.equals(expect)) {
++ prev.setNextRelease(curr.getNextPlain());
++ this.removeFromSize(1);
++
++ return curr.getValuePlain();
++ } else {
++ return null;
++ }
++ }
++ }
++
++ return null;
++ }
++
+ /**
+ * {@inheritDoc}
+ */
+@@ -541,6 +579,10 @@ public class SWMRLong2ObjectHashTable {
+ return this.remove(key, SWMRLong2ObjectHashTable.getHash(key));
+ }
+
++ public boolean remove(final long key, final V expect) {
++ return this.remove(key, SWMRLong2ObjectHashTable.getHash(key), expect) != null;
++ }
++
+ /**
+ * {@inheritDoc}
+ */
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/scheduler/SchedulerThreadPool.java b/src/main/java/ca/spottedleaf/concurrentutil/scheduler/SchedulerThreadPool.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..f579ad58ea7db20d6d7b89abbab3a4dfadaaeaee
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/scheduler/SchedulerThreadPool.java
+@@ -0,0 +1,534 @@
++package ca.spottedleaf.concurrentutil.scheduler;
++
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import ca.spottedleaf.concurrentutil.util.TimeUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.util.set.LinkedSortedSet;
++import org.slf4j.Logger;
++import java.lang.invoke.VarHandle;
++import java.util.BitSet;
++import java.util.Comparator;
++import java.util.PriorityQueue;
++import java.util.concurrent.ThreadFactory;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.concurrent.locks.LockSupport;
++import java.util.function.BooleanSupplier;
++
++public class SchedulerThreadPool {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ public static final long DEADLINE_NOT_SET = Long.MIN_VALUE;
++
++ private static final Comparator TICK_COMPARATOR_BY_TIME = (final SchedulableTick t1, final SchedulableTick t2) -> {
++ final int timeCompare = TimeUtil.compareTimes(t1.scheduledStart, t2.scheduledStart);
++ if (timeCompare != 0) {
++ return timeCompare;
++ }
++
++ return Long.compare(t1.id, t2.id);
++ };
++
++ private final TickThreadRunner[] runners;
++ private final Thread[] threads;
++ private final LinkedSortedSet awaiting = new LinkedSortedSet<>(TICK_COMPARATOR_BY_TIME);
++ private final PriorityQueue queued = new PriorityQueue<>(TICK_COMPARATOR_BY_TIME);
++ private final BitSet idleThreads;
++
++ private final Object scheduleLock = new Object();
++
++ private volatile boolean halted;
++
++ public SchedulerThreadPool(final int threads, final ThreadFactory threadFactory) {
++ final BitSet idleThreads = new BitSet(threads);
++ for (int i = 0; i < threads; ++i) {
++ idleThreads.set(i);
++ }
++ this.idleThreads = idleThreads;
++
++ final TickThreadRunner[] runners = new TickThreadRunner[threads];
++ final Thread[] t = new Thread[threads];
++ for (int i = 0; i < threads; ++i) {
++ runners[i] = new TickThreadRunner(i, this);
++ t[i] = threadFactory.newThread(runners[i]);
++ }
++
++ this.threads = t;
++ this.runners = runners;
++ }
++
++ /**
++ * Starts the threads in this pool.
++ */
++ public void start() {
++ for (final Thread thread : this.threads) {
++ thread.start();
++ }
++ }
++
++ /**
++ * Attempts to prevent further execution of tasks, optionally waiting for the scheduler threads to die.
++ *
++ * @param sync Whether to wait for the scheduler threads to die.
++ * @param maxWaitNS The maximum time, in ns, to wait for the scheduler threads to die.
++ * @return {@code true} if sync was false, or if sync was true and the scheduler threads died before the timeout.
++ * Otherwise, returns {@code false} if the time elapsed exceeded the maximum wait time.
++ */
++ public boolean halt(final boolean sync, final long maxWaitNS) {
++ this.halted = true;
++ for (final Thread thread : this.threads) {
++ // force response to halt
++ LockSupport.unpark(thread);
++ }
++ final long time = System.nanoTime();
++ if (sync) {
++ // start at 10 * 0.5ms -> 5ms
++ for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) {
++ boolean allDead = true;
++ for (final Thread thread : this.threads) {
++ if (thread.isAlive()) {
++ allDead = false;
++ break;
++ }
++ }
++ if (allDead) {
++ return true;
++ }
++ if ((System.nanoTime() - time) >= maxWaitNS) {
++ return false;
++ }
++ }
++ }
++
++ return true;
++ }
++
++ /**
++ * Returns an array of the underlying scheduling threads.
++ */
++ public Thread[] getThreads() {
++ return this.threads.clone();
++ }
++
++ private void insertFresh(final SchedulableTick task) {
++ final TickThreadRunner[] runners = this.runners;
++
++ final int firstIdleThread = this.idleThreads.nextSetBit(0);
++
++ if (firstIdleThread != -1) {
++ // push to idle thread
++ this.idleThreads.clear(firstIdleThread);
++ final TickThreadRunner runner = runners[firstIdleThread];
++ task.awaitingLink = this.awaiting.addLast(task);
++ runner.acceptTask(task);
++ return;
++ }
++
++ // try to replace the last awaiting task
++ final SchedulableTick last = this.awaiting.last();
++
++ if (last != null && TICK_COMPARATOR_BY_TIME.compare(task, last) < 0) {
++ // need to replace the last task
++ this.awaiting.pollLast();
++ last.awaitingLink = null;
++ task.awaitingLink = this.awaiting.addLast(task);
++ // need to add task to queue to be picked up later
++ this.queued.add(last);
++
++ final TickThreadRunner runner = last.ownedBy;
++ runner.replaceTask(task);
++
++ return;
++ }
++
++ // add to queue, will be picked up later
++ this.queued.add(task);
++ }
++
++ private void takeTask(final TickThreadRunner runner, final SchedulableTick tick) {
++ if (!this.awaiting.remove(tick.awaitingLink)) {
++ throw new IllegalStateException("Task is not in awaiting");
++ }
++ tick.awaitingLink = null;
++ }
++
++ private SchedulableTick returnTask(final TickThreadRunner runner, final SchedulableTick reschedule) {
++ if (reschedule != null) {
++ this.queued.add(reschedule);
++ }
++ final SchedulableTick ret = this.queued.poll();
++ if (ret == null) {
++ this.idleThreads.set(runner.id);
++ } else {
++ ret.awaitingLink = this.awaiting.addLast(ret);
++ }
++
++ return ret;
++ }
++
++ public void schedule(final SchedulableTick task) {
++ synchronized (this.scheduleLock) {
++ if (!task.tryMarkScheduled()) {
++ throw new IllegalStateException("Task " + task + " is already scheduled or cancelled");
++ }
++
++ task.schedulerOwnedBy = this;
++
++ this.insertFresh(task);
++ }
++ }
++
++ public boolean updateTickStartToMax(final SchedulableTick task, final long newStart) {
++ synchronized (this.scheduleLock) {
++ if (TimeUtil.compareTimes(newStart, task.getScheduledStart()) <= 0) {
++ return false;
++ }
++ if (this.queued.remove(task)) {
++ task.setScheduledStart(newStart);
++ this.queued.add(task);
++ return true;
++ }
++ if (task.awaitingLink != null) {
++ this.awaiting.remove(task.awaitingLink);
++ task.awaitingLink = null;
++
++ // re-queue task
++ task.setScheduledStart(newStart);
++ this.queued.add(task);
++
++ // now we need to replace the task the runner was waiting for
++ final TickThreadRunner runner = task.ownedBy;
++ final SchedulableTick replace = this.queued.poll();
++
++ // replace cannot be null, since we have added a task to queued
++ if (replace != task) {
++ runner.replaceTask(replace);
++ }
++
++ return true;
++ }
++
++ return false;
++ }
++ }
++
++ /**
++ * Returns {@code null} if the task is not scheduled, returns {@code TRUE} if the task was cancelled
++ * and was queued to execute, returns {@code FALSE} if the task was cancelled but was executing.
++ */
++ public Boolean tryRetire(final SchedulableTick task) {
++ if (task.schedulerOwnedBy != this) {
++ return null;
++ }
++
++ synchronized (this.scheduleLock) {
++ if (this.queued.remove(task)) {
++ // cancelled, and no runner owns it - so return
++ return Boolean.TRUE;
++ }
++ if (task.awaitingLink != null) {
++ this.awaiting.remove(task.awaitingLink);
++ task.awaitingLink = null;
++ // here we need to replace the task the runner was waiting for
++ final TickThreadRunner runner = task.ownedBy;
++ final SchedulableTick replace = this.queued.poll();
++
++ if (replace == null) {
++ // nothing to replace with, set to idle
++ this.idleThreads.set(runner.id);
++ runner.forceIdle();
++ } else {
++ runner.replaceTask(replace);
++ }
++
++ return Boolean.TRUE;
++ }
++
++ // could not find it in queue
++ return task.tryMarkCancelled() ? Boolean.FALSE : null;
++ }
++ }
++
++ public void notifyTasks(final SchedulableTick task) {
++ // Not implemented
++ }
++
++ /**
++ * Represents a tickable task that can be scheduled into a {@link SchedulerThreadPool}.
++ *
++ * A tickable task is expected to run on a fixed interval, which is determined by
++ * the {@link SchedulerThreadPool}.
++ *
++ *
++ * A tickable task can have intermediate tasks that can be executed before its tick method is ran. Instead of
++ * the {@link SchedulerThreadPool} parking in-between ticks, the scheduler will instead drain
++ * intermediate tasks from scheduled tasks. The parsing of intermediate tasks allows the scheduler to take
++ * advantage of downtime to reduce the intermediate task load from tasks once they begin ticking.
++ *
++ *
++ * It is guaranteed that {@link #runTick()} and {@link #runTasks(BooleanSupplier)} are never
++ * invoked in parallel.
++ * It is required that when intermediate tasks are scheduled, that {@link SchedulerThreadPool#notifyTasks(SchedulableTick)}
++ * is invoked for any scheduled task - otherwise, {@link #runTasks(BooleanSupplier)} may not be invoked to
++ * parse intermediate tasks.
++ *
++ */
++ public static abstract class SchedulableTick {
++ private static final AtomicLong ID_GENERATOR = new AtomicLong();
++ public final long id = ID_GENERATOR.getAndIncrement();
++
++ private static final int SCHEDULE_STATE_NOT_SCHEDULED = 0;
++ private static final int SCHEDULE_STATE_SCHEDULED = 1;
++ private static final int SCHEDULE_STATE_CANCELLED = 2;
++
++ private final AtomicInteger scheduled = new AtomicInteger();
++ private SchedulerThreadPool schedulerOwnedBy;
++ private long scheduledStart = DEADLINE_NOT_SET;
++ private TickThreadRunner ownedBy;
++
++ private LinkedSortedSet.Link awaitingLink;
++
++ private boolean tryMarkScheduled() {
++ return this.scheduled.compareAndSet(SCHEDULE_STATE_NOT_SCHEDULED, SCHEDULE_STATE_SCHEDULED);
++ }
++
++ private boolean tryMarkCancelled() {
++ return this.scheduled.compareAndSet(SCHEDULE_STATE_SCHEDULED, SCHEDULE_STATE_CANCELLED);
++ }
++
++ private boolean isScheduled() {
++ return this.scheduled.get() == SCHEDULE_STATE_SCHEDULED;
++ }
++
++ protected final long getScheduledStart() {
++ return this.scheduledStart;
++ }
++
++ /**
++ * If this task is scheduled, then this may only be invoked during {@link #runTick()},
++ * and {@link #runTasks(BooleanSupplier)}
++ */
++ protected final void setScheduledStart(final long value) {
++ this.scheduledStart = value;
++ }
++
++ /**
++ * Executes the tick.
++ *
++ * It is the callee's responsibility to invoke {@link #setScheduledStart(long)} to adjust the start of
++ * the next tick.
++ *
++ * @return {@code true} if the task should continue to be scheduled, {@code false} otherwise.
++ */
++ public abstract boolean runTick();
++
++ /**
++ * Returns whether this task has any intermediate tasks that can be executed.
++ */
++ public abstract boolean hasTasks();
++
++ /**
++ * Returns {@code null} if this task should not be scheduled, otherwise returns
++ * {@code Boolean.TRUE} if there are more intermediate tasks to execute and
++ * {@code Boolean.FALSE} if there are no more intermediate tasks to execute.
++ */
++ public abstract Boolean runTasks(final BooleanSupplier canContinue);
++
++ @Override
++ public String toString() {
++ return "SchedulableTick:{" +
++ "class=" + this.getClass().getName() + "," +
++ "scheduled_state=" + this.scheduled.get() + ","
++ + "}";
++ }
++ }
++
++ private static final class TickThreadRunner implements Runnable {
++
++ /**
++ * There are no tasks in this thread's runqueue, so it is parked.
++ *
++ * stateTarget = null
++ *
++ */
++ private static final int STATE_IDLE = 0;
++
++ /**
++ * The runner is waiting to tick a task, as it has no intermediate tasks to execute.
++ *
++ * stateTarget = the task awaiting tick
++ *
++ */
++ private static final int STATE_AWAITING_TICK = 1;
++
++ /**
++ * The runner is executing a tick for one of the tasks that was in its runqueue.
++ *
++ * stateTarget = the task being ticked
++ *
++ */
++ private static final int STATE_EXECUTING_TICK = 2;
++
++ public final int id;
++ public final SchedulerThreadPool scheduler;
++
++ private volatile Thread thread;
++ private volatile TickThreadRunnerState state = new TickThreadRunnerState(null, STATE_IDLE);
++ private static final VarHandle STATE_HANDLE = ConcurrentUtil.getVarHandle(TickThreadRunner.class, "state", TickThreadRunnerState.class);
++
++ private void setStatePlain(final TickThreadRunnerState state) {
++ STATE_HANDLE.set(this, state);
++ }
++
++ private void setStateOpaque(final TickThreadRunnerState state) {
++ STATE_HANDLE.setOpaque(this, state);
++ }
++
++ private void setStateVolatile(final TickThreadRunnerState state) {
++ STATE_HANDLE.setVolatile(this, state);
++ }
++
++ private static record TickThreadRunnerState(SchedulableTick stateTarget, int state) {}
++
++ public TickThreadRunner(final int id, final SchedulerThreadPool scheduler) {
++ this.id = id;
++ this.scheduler = scheduler;
++ }
++
++ private Thread getRunnerThread() {
++ return this.thread;
++ }
++
++ private void acceptTask(final SchedulableTick task) {
++ if (task.ownedBy != null) {
++ throw new IllegalStateException("Already owned by another runner");
++ }
++ task.ownedBy = this;
++ final TickThreadRunnerState state = this.state;
++ if (state.state != STATE_IDLE) {
++ throw new IllegalStateException("Cannot accept task in state " + state);
++ }
++ this.setStateVolatile(new TickThreadRunnerState(task, STATE_AWAITING_TICK));
++ LockSupport.unpark(this.getRunnerThread());
++ }
++
++ private void replaceTask(final SchedulableTick task) {
++ final TickThreadRunnerState state = this.state;
++ if (state.state != STATE_AWAITING_TICK) {
++ throw new IllegalStateException("Cannot replace task in state " + state);
++ }
++ if (task.ownedBy != null) {
++ throw new IllegalStateException("Already owned by another runner");
++ }
++ task.ownedBy = this;
++
++ state.stateTarget.ownedBy = null;
++
++ this.setStateVolatile(new TickThreadRunnerState(task, STATE_AWAITING_TICK));
++ LockSupport.unpark(this.getRunnerThread());
++ }
++
++ private void forceIdle() {
++ final TickThreadRunnerState state = this.state;
++ if (state.state != STATE_AWAITING_TICK) {
++ throw new IllegalStateException("Cannot replace task in state " + state);
++ }
++ state.stateTarget.ownedBy = null;
++ this.setStateOpaque(new TickThreadRunnerState(null, STATE_IDLE));
++ // no need to unpark
++ }
++
++ private boolean takeTask(final TickThreadRunnerState state, final SchedulableTick task) {
++ synchronized (this.scheduler.scheduleLock) {
++ if (this.state != state) {
++ return false;
++ }
++ this.setStatePlain(new TickThreadRunnerState(task, STATE_EXECUTING_TICK));
++ this.scheduler.takeTask(this, task);
++ return true;
++ }
++ }
++
++ private void returnTask(final SchedulableTick task, final boolean reschedule) {
++ synchronized (this.scheduler.scheduleLock) {
++ task.ownedBy = null;
++
++ final SchedulableTick newWait = this.scheduler.returnTask(this, reschedule && task.isScheduled() ? task : null);
++ if (newWait == null) {
++ this.setStatePlain(new TickThreadRunnerState(null, STATE_IDLE));
++ } else {
++ if (newWait.ownedBy != null) {
++ throw new IllegalStateException("Already owned by another runner");
++ }
++ newWait.ownedBy = this;
++ this.setStatePlain(new TickThreadRunnerState(newWait, STATE_AWAITING_TICK));
++ }
++ }
++ }
++
++ @Override
++ public void run() {
++ this.thread = Thread.currentThread();
++
++ main_state_loop:
++ for (;;) {
++ final TickThreadRunnerState startState = this.state;
++ final int startStateType = startState.state;
++ final SchedulableTick startStateTask = startState.stateTarget;
++
++ if (this.scheduler.halted) {
++ return;
++ }
++
++ switch (startStateType) {
++ case STATE_IDLE: {
++ while (this.state.state == STATE_IDLE) {
++ LockSupport.park();
++ if (this.scheduler.halted) {
++ return;
++ }
++ }
++ continue main_state_loop;
++ }
++
++ case STATE_AWAITING_TICK: {
++ final long deadline = startStateTask.getScheduledStart();
++ for (;;) {
++ if (this.state != startState) {
++ continue main_state_loop;
++ }
++ final long diff = deadline - System.nanoTime();
++ if (diff <= 0L) {
++ break;
++ }
++ LockSupport.parkNanos(startState, diff);
++ if (this.scheduler.halted) {
++ return;
++ }
++ }
++
++ if (!this.takeTask(startState, startStateTask)) {
++ continue main_state_loop;
++ }
++
++ // TODO exception handling
++ final boolean reschedule = startStateTask.runTick();
++
++ this.returnTask(startStateTask, reschedule);
++
++ continue main_state_loop;
++ }
++
++ case STATE_EXECUTING_TICK: {
++ throw new IllegalStateException("Tick execution must be set by runner thread, not by any other thread");
++ }
++
++ default: {
++ throw new IllegalStateException("Unknown state: " + startState);
++ }
++ }
++ }
++ }
++ }
++}
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/util/TimeUtil.java b/src/main/java/ca/spottedleaf/concurrentutil/util/TimeUtil.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..63688716244066581d5b505703576e3340e3baf3
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/concurrentutil/util/TimeUtil.java
+@@ -0,0 +1,60 @@
++package ca.spottedleaf.concurrentutil.util;
++
++public final class TimeUtil {
++
++ /*
++ * The comparator is not a valid comparator for every long value. To prove where it is valid, see below.
++ *
++ * For reflexivity, we have that x - x = 0. We then have that for any long value x that
++ * compareTimes(x, x) == 0, as expected.
++ *
++ * For symmetry, we have that x - y = -(y - x) except for when y - x = Long.MIN_VALUE.
++ * So, the difference between any times x and y must not be equal to Long.MIN_VALUE.
++ *
++ * As for the transitive relation, consider we have x,y such that x - y = a > 0 and z such that
++ * y - z = b > 0. Then, we will have that the x - z > 0 is equivalent to a + b > 0. For long values,
++ * this holds as long as a + b <= Long.MAX_VALUE.
++ *
++ * Also consider we have x, y such that x - y = a < 0 and z such that y - z = b < 0. Then, we will have
++ * that x - z < 0 is equivalent to a + b < 0. For long values, this holds as long as a + b >= -Long.MAX_VALUE.
++ *
++ * Thus, the comparator is only valid for timestamps such that abs(c - d) <= Long.MAX_VALUE for all timestamps
++ * c and d.
++ */
++
++ /**
++ * This function is appropriate to be used as a {@link java.util.Comparator} between two timestamps, which
++ * indicates whether the timestamps represented by t1, t2 that t1 is before, equal to, or after t2.
++ */
++ public static int compareTimes(final long t1, final long t2) {
++ final long diff = t1 - t2;
++
++ // HD, Section 2-7
++ return (int) ((diff >> 63) | (-diff >>> 63));
++ }
++
++ public static long getGreatestTime(final long t1, final long t2) {
++ final long diff = t1 - t2;
++ return diff < 0L ? t2 : t1;
++ }
++
++ public static long getLeastTime(final long t1, final long t2) {
++ final long diff = t1 - t2;
++ return diff > 0L ? t2 : t1;
++ }
++
++ public static long clampTime(final long value, final long min, final long max) {
++ final long diffMax = value - max;
++ final long diffMin = value - min;
++
++ if (diffMax > 0L) {
++ return max;
++ }
++ if (diffMin < 0L) {
++ return min;
++ }
++ return value;
++ }
++
++ private TimeUtil() {}
++}
+diff --git a/src/main/java/ca/spottedleaf/leafprofiler/LProfileGraph.java b/src/main/java/ca/spottedleaf/leafprofiler/LProfileGraph.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..14a4778f7913b849fabbd772f9cb8a0bc5a6ed6c
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/leafprofiler/LProfileGraph.java
+@@ -0,0 +1,58 @@
++package ca.spottedleaf.leafprofiler;
++
++import ca.spottedleaf.concurrentutil.map.SWMRInt2IntHashTable;
++import java.util.Arrays;
++
++public final class LProfileGraph {
++
++ public static final int ROOT_NODE = 0;
++
++ // volatile required for correct publishing after resizing
++ private volatile SWMRInt2IntHashTable[] nodes = new SWMRInt2IntHashTable[16];
++ private int nodeCount;
++
++ public LProfileGraph() {
++ this.nodes[ROOT_NODE] = new SWMRInt2IntHashTable();
++ this.nodeCount = 1;
++ }
++
++ private int createNode(final int parent, final int type) {
++ synchronized (this) {
++ SWMRInt2IntHashTable[] nodes = this.nodes;
++
++ final SWMRInt2IntHashTable node = nodes[parent];
++
++ final int newNode = this.nodeCount;
++ final int prev = node.putIfAbsent(type, newNode);
++
++ if (prev != 0) {
++ // already exists
++ return prev;
++ }
++
++ // insert new node
++ ++this.nodeCount;
++
++ if (newNode >= nodes.length) {
++ this.nodes = nodes = Arrays.copyOf(nodes, nodes.length * 2);
++ }
++
++ nodes[newNode] = new SWMRInt2IntHashTable();
++
++ return newNode;
++ }
++ }
++
++ public int getOrCreateNode(final int parent, final int type) {
++ // note: requires parent node to exist
++ final SWMRInt2IntHashTable[] nodes = this.nodes;
++
++ final int mapping = nodes[parent].get(type);
++
++ if (mapping != 0) {
++ return mapping;
++ }
++
++ return this.createNode(parent, type);
++ }
++}
+diff --git a/src/main/java/ca/spottedleaf/leafprofiler/LProfilerRegistry.java b/src/main/java/ca/spottedleaf/leafprofiler/LProfilerRegistry.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..ffa32c1eae22bda371dd1d0318cc7c587f8e5a5c
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/leafprofiler/LProfilerRegistry.java
+@@ -0,0 +1,59 @@
++package ca.spottedleaf.leafprofiler;
++
++import java.util.Arrays;
++import java.util.concurrent.ConcurrentHashMap;
++
++public final class LProfilerRegistry {
++
++ // volatile required to ensure correct publishing when resizing
++ private volatile ProfilerEntry[] typesById = new ProfilerEntry[16];
++ private int totalEntries;
++ private final ConcurrentHashMap nameToEntry = new ConcurrentHashMap<>();
++
++ public LProfilerRegistry() {
++
++ }
++
++ public ProfilerEntry getById(final int id) {
++ final ProfilerEntry[] entries = this.typesById;
++
++ return id < 0 || id >= entries.length ? null : entries[id];
++ }
++
++ public ProfilerEntry getByName(final String name) {
++ return this.nameToEntry.get(name);
++ }
++
++ public int createType(final ProfileType type, final String name) {
++ synchronized (this) {
++ final int id = this.totalEntries;
++
++ final ProfilerEntry ret = new ProfilerEntry(type, name, id);
++
++ final ProfilerEntry prev = this.nameToEntry.putIfAbsent(name, ret);
++
++ if (prev != null) {
++ throw new IllegalStateException("Entry already exists: " + prev);
++ }
++
++ ++this.totalEntries;
++
++ ProfilerEntry[] entries = this.typesById;
++
++ if (id >= entries.length) {
++ this.typesById = entries = Arrays.copyOf(entries, entries.length * 2);
++ }
++
++ // should be opaque, but I don't think that matters here.
++ entries[id] = ret;
++
++ return id;
++ }
++ }
++
++ public static enum ProfileType {
++ TIMER, COUNTER
++ }
++
++ public static record ProfilerEntry(ProfileType type, String name, int id) {}
++}
+diff --git a/src/main/java/ca/spottedleaf/leafprofiler/LeafProfiler.java b/src/main/java/ca/spottedleaf/leafprofiler/LeafProfiler.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..ad8c590fe7479fcb3c7ff5dc3ac3a4d6f33c5938
+--- /dev/null
++++ b/src/main/java/ca/spottedleaf/leafprofiler/LeafProfiler.java
+@@ -0,0 +1,61 @@
++package ca.spottedleaf.leafprofiler;
++
++import it.unimi.dsi.fastutil.ints.IntArrayFIFOQueue;
++import it.unimi.dsi.fastutil.longs.LongArrayFIFOQueue;
++import java.util.Arrays;
++
++public final class LeafProfiler {
++
++ public final LProfilerRegistry registry;
++ public final LProfileGraph graph;
++
++ private long[] data;
++ private final IntArrayFIFOQueue callStack = new IntArrayFIFOQueue();
++ private int topOfStack = LProfileGraph.ROOT_NODE;
++ private final LongArrayFIFOQueue timerStack = new LongArrayFIFOQueue();
++ private long lastTimerStart = 0L;
++
++ public LeafProfiler(final LProfilerRegistry registry, final LProfileGraph graph) {
++ this.registry = registry;
++ this.graph = graph;
++ }
++
++ private long[] resizeData(final long[] old, final int least) {
++ return this.data = Arrays.copyOf(old, Math.max(old.length * 2, least * 2));
++ }
++
++ private void incrementDirect(final int nodeId, final long count) {
++ final long[] data = this.data;
++ if (nodeId >= data.length) {
++ this.resizeData(data, nodeId)[nodeId] += count;
++ } else {
++ data[nodeId] += count;
++ }
++ }
++
++ public void incrementCounter(final int type, final long count) {
++ // this is supposed to be an optimised version of startTimer then stopTimer
++ final int node = this.graph.getOrCreateNode(this.topOfStack, type);
++ this.incrementDirect(node, count);
++ }
++
++ public void startTimer(final int type, final long startTime) {
++ final int parentNode = this.topOfStack;
++ final int newNode = this.graph.getOrCreateNode(parentNode, type);
++ this.callStack.enqueue(parentNode);
++ this.topOfStack = newNode;
++
++ this.timerStack.enqueue(this.lastTimerStart);
++ this.lastTimerStart = startTime;
++ }
++
++ public void stopTimer(final int type, final long endTime) {
++ final int currentNode = this.topOfStack;
++ this.topOfStack = this.callStack.dequeueLastInt();
++
++ final long lastStart = this.lastTimerStart;
++ this.lastTimerStart = this.timerStack.dequeueLastLong();
++
++ this.incrementDirect(currentNode, endTime - lastStart);
++ }
++}
+diff --git a/src/main/java/com/destroystokyo/paper/Metrics.java b/src/main/java/com/destroystokyo/paper/Metrics.java
+index 4b002e8b75d117b726b0de274a76d3596fce015b..897cb94abf7b53da8ba7cda5135b6580aa2d9824 100644
+--- a/src/main/java/com/destroystokyo/paper/Metrics.java
++++ b/src/main/java/com/destroystokyo/paper/Metrics.java
+@@ -593,7 +593,7 @@ public class Metrics {
+ boolean logFailedRequests = config.getBoolean("logFailedRequests", false);
+ // Only start Metrics, if it's enabled in the config
+ if (config.getBoolean("enabled", true)) {
+- Metrics metrics = new Metrics("Paper", serverUUID, logFailedRequests, Bukkit.getLogger());
++ Metrics metrics = new Metrics("Tuinity", serverUUID, logFailedRequests, Bukkit.getLogger()); // Tuinity - we have our own bstats page
+
+ metrics.addCustomChart(new Metrics.SimplePie("minecraft_version", () -> {
+ String minecraftVersion = Bukkit.getVersion();
+@@ -611,7 +611,7 @@ public class Metrics {
+ } else {
+ paperVersion = "unknown";
+ }
+- metrics.addCustomChart(new Metrics.SimplePie("paper_version", () -> paperVersion));
++ metrics.addCustomChart(new Metrics.SimplePie("tuinity_version", () -> paperVersion)); // Tuinity - we have our own bstats page
+
+ metrics.addCustomChart(new Metrics.DrilldownPie("java_version", () -> {
+ Map> map = new HashMap<>();
+diff --git a/src/main/java/com/destroystokyo/paper/antixray/ChunkPacketBlockControllerAntiXray.java b/src/main/java/com/destroystokyo/paper/antixray/ChunkPacketBlockControllerAntiXray.java
+index 4f3670b2bdb8b1b252e9f074a6af56a018a8c465..bb3df6a4d8b87219c3c0406c56428c28d5f9ab4e 100644
+--- a/src/main/java/com/destroystokyo/paper/antixray/ChunkPacketBlockControllerAntiXray.java
++++ b/src/main/java/com/destroystokyo/paper/antixray/ChunkPacketBlockControllerAntiXray.java
+@@ -179,11 +179,7 @@ public final class ChunkPacketBlockControllerAntiXray extends ChunkPacketBlockCo
+ return;
+ }
+
+- if (!Bukkit.isPrimaryThread()) {
+- // Plugins?
+- MinecraftServer.getServer().scheduleOnMain(() -> modifyBlocks(chunkPacket, chunkPacketInfo));
+- return;
+- }
++ // Paper - region threading
+
+ LevelChunk chunk = chunkPacketInfo.getChunk();
+ int x = chunk.getPos().x;
+diff --git a/src/main/java/io/papermc/paper/adventure/ChatProcessor.java b/src/main/java/io/papermc/paper/adventure/ChatProcessor.java
+index 309fe1162db195c7c3c94d785d6aa2700e42b08a..70db79d37257dedada9f55b3cf1127451c151072 100644
+--- a/src/main/java/io/papermc/paper/adventure/ChatProcessor.java
++++ b/src/main/java/io/papermc/paper/adventure/ChatProcessor.java
+@@ -97,7 +97,7 @@ public final class ChatProcessor {
+ final CraftPlayer player = this.player.getBukkitEntity();
+ final AsyncPlayerChatEvent ae = new AsyncPlayerChatEvent(this.async, player, this.craftbukkit$originalMessage, new LazyPlayerSet(this.server));
+ this.post(ae);
+- if (listenersOnSyncEvent) {
++ if (false && listenersOnSyncEvent) { // Paper - region threading
+ final PlayerChatEvent se = new PlayerChatEvent(player, ae.getMessage(), ae.getFormat(), ae.getRecipients());
+ se.setCancelled(ae.isCancelled()); // propagate cancelled state
+ this.queueIfAsyncOrRunImmediately(new Waitable() {
+@@ -177,7 +177,7 @@ public final class ChatProcessor {
+ ae.setCancelled(cancelled); // propagate cancelled state
+ this.post(ae);
+ final boolean listenersOnSyncEvent = canYouHearMe(ChatEvent.getHandlerList());
+- if (listenersOnSyncEvent) {
++ if (false && listenersOnSyncEvent) { // Paper - region threading
+ this.queueIfAsyncOrRunImmediately(new Waitable() {
+ @Override
+ protected Void evaluate() {
+diff --git a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
+index 6df1948b1204a7288ecb7238b6fc2a733f7d25b3..7ac1ca0e358a38cf5d1e7f7cdc7383ca9c7df6f2 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
++++ b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
+@@ -91,6 +91,9 @@ public final class ChunkSystem {
+ for (int index = 0, len = chunkMap.regionManagers.size(); index < len; ++index) {
+ chunkMap.regionManagers.get(index).addChunk(holder.pos.x, holder.pos.z);
+ }
++ // Paper start - threaded regions
++ level.regioniser.addChunk(holder.pos.x, holder.pos.z);
++ // Paper end - threaded regions
+ }
+
+ public static void onChunkHolderDelete(final ServerLevel level, final ChunkHolder holder) {
+@@ -98,6 +101,9 @@ public final class ChunkSystem {
+ for (int index = 0, len = chunkMap.regionManagers.size(); index < len; ++index) {
+ chunkMap.regionManagers.get(index).removeChunk(holder.pos.x, holder.pos.z);
+ }
++ // Paper start - threaded regions
++ level.regioniser.removeChunk(holder.pos.x, holder.pos.z);
++ // Paper end - threaded regions
+ }
+
+ public static void onChunkBorder(final LevelChunk chunk, final ChunkHolder holder) {
+@@ -109,19 +115,19 @@ public final class ChunkSystem {
+ }
+
+ public static void onChunkTicking(final LevelChunk chunk, final ChunkHolder holder) {
+- chunk.level.getChunkSource().tickingChunks.add(chunk);
++ // Paper - region threading
+ }
+
+ public static void onChunkNotTicking(final LevelChunk chunk, final ChunkHolder holder) {
+- chunk.level.getChunkSource().tickingChunks.remove(chunk);
++ // Paper - region threading
+ }
+
+ public static void onChunkEntityTicking(final LevelChunk chunk, final ChunkHolder holder) {
+- chunk.level.getChunkSource().entityTickingChunks.add(chunk);
++ chunk.level.getCurrentWorldData().addEntityTickingChunks(chunk); // Paper - region threading
+ }
+
+ public static void onChunkNotEntityTicking(final LevelChunk chunk, final ChunkHolder holder) {
+- chunk.level.getChunkSource().entityTickingChunks.remove(chunk);
++ chunk.level.getCurrentWorldData().removeEntityTickingChunk(chunk); // Paper - region threading
+ }
+
+ public static ChunkHolder getUnloadingChunkHolder(final ServerLevel level, final int chunkX, final int chunkZ) {
+diff --git a/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java
+index a4d58352eebed11fafde8c381afe3572893b8f8f..b9b3ab0bcddbbe485bb138bfd4882a21067f8bde 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java
++++ b/src/main/java/io/papermc/paper/chunk/system/RegionisedPlayerChunkLoader.java
+@@ -231,14 +231,14 @@ public class RegionisedPlayerChunkLoader {
+
+ public void tick() {
+ TickThread.ensureTickThread("Cannot tick player chunk loader async");
+- for (final ServerPlayer player : this.world.players()) {
++ for (final ServerPlayer player : this.world.getLocalPlayers()) { // Paper - region threding
+ player.chunkLoader.update();
+ }
+ }
+
+ public void tickMidTick() {
+ final long time = System.nanoTime();
+- for (final ServerPlayer player : this.world.players()) {
++ for (final ServerPlayer player : this.world.getLocalPlayers()) { // Paper - region threading
+ player.chunkLoader.midTickUpdate(time);
+ }
+ }
+diff --git a/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java
+index 61c170555c8854b102c640b0b6a615f9f732edbf..576e48f68861b817bcd94252e1fd587e31008458 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java
++++ b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java
+@@ -187,7 +187,12 @@ public final class EntityLookup implements LevelEntityGetter {
+
+ @Override
+ public Iterable getAll() {
+- return new ArrayIterable<>(this.accessibleEntities.getRawData(), 0, this.accessibleEntities.size());
++ // Paper start - region threading
++ synchronized (this.accessibleEntities) {
++ Entity[] iterate = java.util.Arrays.copyOf(this.accessibleEntities.getRawData(), this.accessibleEntities.size());
++ return new ArrayIterable<>(iterate, 0, iterate.length);
++ }
++ // Paper end - region threading
+ }
+
+ @Override
+@@ -261,7 +266,9 @@ public final class EntityLookup implements LevelEntityGetter {
+ if (newVisibility.ordinal() > oldVisibility.ordinal()) {
+ // status upgrade
+ if (!oldVisibility.isAccessible() && newVisibility.isAccessible()) {
++ synchronized (this.accessibleEntities) { // Paper - region threading
+ this.accessibleEntities.add(entity);
++ } // Paper - region threading
+ EntityLookup.this.worldCallback.onTrackingStart(entity);
+ }
+
+@@ -275,7 +282,9 @@ public final class EntityLookup implements LevelEntityGetter {
+ }
+
+ if (oldVisibility.isAccessible() && !newVisibility.isAccessible()) {
++ synchronized (this.accessibleEntities) { // Paper - region threading
+ this.accessibleEntities.remove(entity);
++ } // Paper - region threading
+ EntityLookup.this.worldCallback.onTrackingEnd(entity);
+ }
+ }
+@@ -385,6 +394,8 @@ public final class EntityLookup implements LevelEntityGetter {
+
+ entity.setLevelCallback(new EntityCallback(entity));
+
++ this.world.getCurrentWorldData().addEntity(entity); // Paper - region threading
++
+ this.entityStatusChange(entity, slices, Visibility.HIDDEN, getEntityStatus(entity), false, !fromDisk, false);
+
+ return true;
+@@ -407,6 +418,7 @@ public final class EntityLookup implements LevelEntityGetter {
+ LOGGER.warn("Failed to remove entity " + entity + " from entity slices (" + sectionX + "," + sectionZ + ")");
+ }
+ }
++
+ entity.sectionX = entity.sectionY = entity.sectionZ = Integer.MIN_VALUE;
+
+ this.entityByLock.writeLock();
+@@ -823,6 +835,9 @@ public final class EntityLookup implements LevelEntityGetter {
+ EntityLookup.this.entityStatusChange(entity, null, tickingState, Visibility.HIDDEN, false, false, reason.shouldDestroy());
+
+ this.entity.setLevelCallback(NoOpCallback.INSTANCE);
++
++ // only AFTER full removal callbacks, so that thread checking will work. // Paper - region threading
++ EntityLookup.this.world.getCurrentWorldData().removeEntity(entity); // Paper - region threading
+ }
+ }
+
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+index c6d20bc2f0eab737338db6b88dacb63f0decb66c..8f44efc736055aaf85e4f9068618e911bae0c30c 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+@@ -3,7 +3,6 @@ package io.papermc.paper.chunk.system.scheduling;
+ import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
+ import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
+ import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable;
+-import co.aikar.timings.Timing;
+ import com.google.common.collect.ImmutableList;
+ import com.google.gson.JsonArray;
+ import com.google.gson.JsonObject;
+@@ -19,10 +18,12 @@ import it.unimi.dsi.fastutil.longs.Long2IntMap;
+ import it.unimi.dsi.fastutil.longs.Long2IntOpenHashMap;
+ import it.unimi.dsi.fastutil.longs.Long2ObjectMap;
+ import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
++import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
+ import it.unimi.dsi.fastutil.longs.LongArrayList;
+ import it.unimi.dsi.fastutil.longs.LongIterator;
+ import it.unimi.dsi.fastutil.objects.ObjectRBTreeSet;
+ import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
++import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
+ import net.minecraft.nbt.CompoundTag;
+ import io.papermc.paper.chunk.system.ChunkSystem;
+ import net.minecraft.server.MinecraftServer;
+@@ -34,8 +35,6 @@ import net.minecraft.server.level.TicketType;
+ import net.minecraft.util.SortedArraySet;
+ import net.minecraft.util.Unit;
+ import net.minecraft.world.level.ChunkPos;
+-import net.minecraft.world.level.chunk.ChunkAccess;
+-import net.minecraft.world.level.chunk.ChunkStatus;
+ import org.bukkit.plugin.Plugin;
+ import org.slf4j.Logger;
+ import java.io.IOException;
+@@ -54,6 +53,13 @@ import java.util.concurrent.locks.LockSupport;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.function.Predicate;
+
++// Paper start - region threading
++import io.papermc.paper.threadedregions.RegionisedServer;
++import io.papermc.paper.threadedregions.ThreadedRegioniser;
++import io.papermc.paper.threadedregions.TickRegionScheduler;
++import io.papermc.paper.threadedregions.TickRegions;
++// Paper end - region threading
++
+ public final class ChunkHolderManager {
+
+ private static final Logger LOGGER = LogUtils.getClassLogger();
+@@ -63,40 +69,201 @@ public final class ChunkHolderManager {
+ public static final int ENTITY_TICKING_TICKET_LEVEL = 31;
+ public static final int MAX_TICKET_LEVEL = ChunkMap.MAX_CHUNK_DISTANCE; // inclusive
+
+- private static final long NO_TIMEOUT_MARKER = -1L;
++ // Paper start - region threading
++ private static final long NO_TIMEOUT_MARKER = Long.MIN_VALUE;
++ private static final long PROBE_MARKER = Long.MIN_VALUE + 1;
++ // Paper end - region threading
+
+- final ReentrantLock ticketLock = new ReentrantLock();
++ public final ReentrantLock ticketLock = new ReentrantLock(); // Paper - region threading
+
+ private final SWMRLong2ObjectHashTable chunkHolders = new SWMRLong2ObjectHashTable<>(16384, 0.25f);
+- private final Long2ObjectOpenHashMap>> tickets = new Long2ObjectOpenHashMap<>(8192, 0.25f);
+- // what a disaster of a name
+- // this is a map of removal tick to a map of chunks and the number of tickets a chunk has that are to expire that tick
+- private final Long2ObjectOpenHashMap removeTickToChunkExpireTicketCount = new Long2ObjectOpenHashMap<>();
++ // Paper - region threading
+ private final ServerLevel world;
+ private final ChunkTaskScheduler taskScheduler;
+- private long currentTick;
+
+- private final ArrayDeque pendingFullLoadUpdate = new ArrayDeque<>();
+- private final ObjectRBTreeSet autoSaveQueue = new ObjectRBTreeSet<>((final NewChunkHolder c1, final NewChunkHolder c2) -> {
+- if (c1 == c2) {
+- return 0;
++ // Paper start - region threading
++ public static final class HolderManagerRegionData {
++ private final ArrayDeque pendingFullLoadUpdate = new ArrayDeque<>();
++ private final ObjectRBTreeSet autoSaveQueue = new ObjectRBTreeSet<>((final NewChunkHolder c1, final NewChunkHolder c2) -> {
++ if (c1 == c2) {
++ return 0;
++ }
++
++ final int saveTickCompare = Long.compare(c1.lastAutoSave, c2.lastAutoSave);
++
++ if (saveTickCompare != 0) {
++ return saveTickCompare;
++ }
++
++ final long coord1 = CoordinateUtils.getChunkKey(c1.chunkX, c1.chunkZ);
++ final long coord2 = CoordinateUtils.getChunkKey(c2.chunkX, c2.chunkZ);
++
++ if (coord1 == coord2) {
++ throw new IllegalStateException("Duplicate chunkholder in auto save queue");
++ }
++
++ return Long.compare(coord1, coord2);
++ });
++ private long currentTick;
++ private final Long2ObjectOpenHashMap>> tickets = new Long2ObjectOpenHashMap<>(8192, 0.25f);
++ // what a disaster of a name
++ // this is a map of removal tick to a map of chunks and the number of tickets a chunk has that are to expire that tick
++ private final Long2ObjectOpenHashMap removeTickToChunkExpireTicketCount = new Long2ObjectOpenHashMap<>();
++
++ // special region threading fields
++ // this field contains chunk holders that were created in addTicketAtLevel
++ // because the chunk holders were created without a reliable unload hook (i.e creation for entity/poi loading,
++ // which always check for unload after their tasks finish) we need to do that ourselves later
++ private final ReferenceOpenHashSet specialCaseUnload = new ReferenceOpenHashSet<>();
++
++ public void merge(final HolderManagerRegionData into, final long tickOffset) {
++ // Order doesn't really matter for the pending full update...
++ into.pendingFullLoadUpdate.addAll(this.pendingFullLoadUpdate);
++
++ // We need to copy the set to iterate over, because modifying the field used in compareTo while iterating
++ // will destroy the result from compareTo (However, the set is not destroyed _after_ iteration because a constant
++ // addition to every entry will not affect compareTo).
++ for (final NewChunkHolder holder : new ArrayList<>(this.autoSaveQueue)) {
++ holder.lastAutoSave += tickOffset;
++ into.autoSaveQueue.add(holder);
++ }
++
++ final long chunkManagerTickOffset = into.currentTick - this.currentTick;
++ for (final Iterator>>> iterator = this.tickets.long2ObjectEntrySet().fastIterator();
++ iterator.hasNext();) {
++ final Long2ObjectMap.Entry>> entry = iterator.next();
++ final SortedArraySet> oldTickets = entry.getValue();
++ final SortedArraySet> newTickets = SortedArraySet.create(Math.max(4, oldTickets.size() + 1));
++ for (final Ticket> ticket : oldTickets) {
++ newTickets.add(
++ new Ticket(ticket.getType(), ticket.getTicketLevel(), ticket.key,
++ ticket.removalTick == NO_TIMEOUT_MARKER ? NO_TIMEOUT_MARKER : ticket.removalTick + chunkManagerTickOffset)
++ );
++ }
++ into.tickets.put(entry.getLongKey(), newTickets);
++ }
++ for (final Iterator> iterator = this.removeTickToChunkExpireTicketCount.long2ObjectEntrySet().fastIterator();
++ iterator.hasNext();) {
++ final Long2ObjectMap.Entry entry = iterator.next();
++ into.removeTickToChunkExpireTicketCount.merge(
++ (long)(entry.getLongKey() + chunkManagerTickOffset), entry.getValue(),
++ (final Long2IntOpenHashMap t, final Long2IntOpenHashMap f) -> {
++ for (final Iterator itr = f.long2IntEntrySet().fastIterator(); itr.hasNext();) {
++ final Long2IntMap.Entry e = itr.next();
++ t.addTo(e.getLongKey(), e.getIntValue());
++ }
++ return t;
++ }
++ );
++ }
++
++ // add them all
++ into.specialCaseUnload.addAll(this.specialCaseUnload);
+ }
+
+- final int saveTickCompare = Long.compare(c1.lastAutoSave, c2.lastAutoSave);
++ public void split(final int chunkToRegionShift, final Long2ReferenceOpenHashMap regionToData,
++ final ReferenceOpenHashSet dataSet) {
++ for (final NewChunkHolder fullLoadUpdate : this.pendingFullLoadUpdate) {
++ final int regionCoordinateX = fullLoadUpdate.chunkX >> chunkToRegionShift;
++ final int regionCoordinateZ = fullLoadUpdate.chunkZ >> chunkToRegionShift;
++
++ final HolderManagerRegionData data = regionToData.get(CoordinateUtils.getChunkKey(regionCoordinateX, regionCoordinateZ));
++ if (data != null) {
++ data.pendingFullLoadUpdate.add(fullLoadUpdate);
++ } // else: fullLoadUpdate is an unloaded chunk holder
++ }
+
+- if (saveTickCompare != 0) {
+- return saveTickCompare;
++ for (final NewChunkHolder autoSave : this.autoSaveQueue) {
++ final int regionCoordinateX = autoSave.chunkX >> chunkToRegionShift;
++ final int regionCoordinateZ = autoSave.chunkZ >> chunkToRegionShift;
++
++ final HolderManagerRegionData data = regionToData.get(CoordinateUtils.getChunkKey(regionCoordinateX, regionCoordinateZ));
++ if (data != null) {
++ data.autoSaveQueue.add(autoSave);
++ } // else: autoSave is an unloaded chunk holder
++ }
++ for (final HolderManagerRegionData data : dataSet) {
++ data.currentTick = this.currentTick;
++ }
++ for (final Iterator>>> iterator = this.tickets.long2ObjectEntrySet().fastIterator();
++ iterator.hasNext();) {
++ final Long2ObjectMap.Entry>> entry = iterator.next();
++ final long chunkKey = entry.getLongKey();
++ final int regionCoordinateX = CoordinateUtils.getChunkX(chunkKey) >> chunkToRegionShift;
++ final int regionCoordinateZ = CoordinateUtils.getChunkZ(chunkKey) >> chunkToRegionShift;
++
++ // can never be null, since a chunk holder exists if the ticket set is not empty
++ regionToData.get(CoordinateUtils.getChunkKey(regionCoordinateX, regionCoordinateZ)).tickets.put(chunkKey, entry.getValue());
++ }
++ for (final Iterator> iterator = this.removeTickToChunkExpireTicketCount.long2ObjectEntrySet().fastIterator();
++ iterator.hasNext();) {
++ final Long2ObjectMap.Entry entry = iterator.next();
++ final long tick = entry.getLongKey();
++ final Long2IntOpenHashMap chunkToCount = entry.getValue();
++
++ for (final Iterator itr = chunkToCount.long2IntEntrySet().fastIterator(); itr.hasNext();) {
++ final Long2IntMap.Entry e = itr.next();
++ final long chunkKey = e.getLongKey();
++ final int regionCoordinateX = CoordinateUtils.getChunkX(chunkKey) >> chunkToRegionShift;
++ final int regionCoordinateZ = CoordinateUtils.getChunkZ(chunkKey) >> chunkToRegionShift;
++ final int count = e.getIntValue();
++
++ // can never be null, since a chunk holder exists if the ticket set is not empty
++ final HolderManagerRegionData data = regionToData.get(CoordinateUtils.getChunkKey(regionCoordinateX, regionCoordinateZ));
++
++ data.removeTickToChunkExpireTicketCount.computeIfAbsent(tick, (final long keyInMap) -> {
++ return new Long2IntOpenHashMap();
++ }).put(chunkKey, count);
++ }
++ }
++
++ for (final NewChunkHolder special : this.specialCaseUnload) {
++ final int regionCoordinateX = CoordinateUtils.getChunkX(special.chunkX) >> chunkToRegionShift;
++ final int regionCoordinateZ = CoordinateUtils.getChunkZ(special.chunkZ) >> chunkToRegionShift;
++
++ // can never be null, since this chunk holder is loaded
++ regionToData.get(CoordinateUtils.getChunkKey(regionCoordinateX, regionCoordinateZ)).specialCaseUnload.add(special);
++ }
+ }
++ }
+
+- final long coord1 = CoordinateUtils.getChunkKey(c1.chunkX, c1.chunkZ);
+- final long coord2 = CoordinateUtils.getChunkKey(c2.chunkX, c2.chunkZ);
++ private ChunkHolderManager.HolderManagerRegionData getCurrentRegionData() {
++ final ThreadedRegioniser.ThreadedRegion region =
++ TickRegionScheduler.getCurrentRegion();
+
+- if (coord1 == coord2) {
+- throw new IllegalStateException("Duplicate chunkholder in auto save queue");
++ if (region == null) {
++ return null;
+ }
+
+- return Long.compare(coord1, coord2);
+- });
++ if (this.world != null && this.world != region.getData().world) {
++ throw new IllegalStateException("World check failed: expected world: " + this.world.getWorld().getKey() + ", region world: " + region.getData().world.getWorld().getKey());
++ }
++
++ return region.getData().getHolderManagerRegionData();
++ }
++
++ // MUST hold ticket lock
++ private ChunkHolderManager.HolderManagerRegionData getDataFor(final long key) {
++ return this.getDataFor(CoordinateUtils.getChunkX(key), CoordinateUtils.getChunkZ(key));
++ }
++
++ // MUST hold ticket lock
++ private ChunkHolderManager.HolderManagerRegionData getDataFor(final int chunkX, final int chunkZ) {
++ if (!this.ticketLock.isHeldByCurrentThread()) {
++ throw new IllegalStateException("Must hold ticket level lock");
++ }
++
++ final ThreadedRegioniser.ThreadedRegion region
++ = this.world.regioniser.getRegionAtUnsynchronised(chunkX, chunkZ);
++
++ if (region == null) {
++ return null;
++ }
++
++ return region.getData().getHolderManagerRegionData();
++ }
++ // Paper end - region threading
++
+
+ public ChunkHolderManager(final ServerLevel world, final ChunkTaskScheduler taskScheduler) {
+ this.world = world;
+@@ -129,8 +296,13 @@ public final class ChunkHolderManager {
+ }
+
+ public void close(final boolean save, final boolean halt) {
++ // Paper start - region threading
++ this.close(save, halt, true, true, true);
++ }
++ public void close(final boolean save, final boolean halt, final boolean first, final boolean last, final boolean checkRegions) {
++ // Paper end - region threading
+ TickThread.ensureTickThread("Closing world off-main");
+- if (halt) {
++ if (first && halt) { // Paper - region threading
+ LOGGER.info("Waiting 60s for chunk system to halt for world '" + this.world.getWorld().getName() + "'");
+ if (!this.taskScheduler.halt(true, TimeUnit.SECONDS.toNanos(60L))) {
+ LOGGER.warn("Failed to halt world generation/loading tasks for world '" + this.world.getWorld().getName() + "'");
+@@ -140,9 +312,10 @@ public final class ChunkHolderManager {
+ }
+
+ if (save) {
+- this.saveAllChunks(true, true, true);
++ this.saveAllChunksRegionised(true, true, true, first, last, checkRegions); // Paper - region threading
+ }
+
++ if (last) { // Paper - region threading
+ if (this.world.chunkDataControllerNew.hasTasks() || this.world.entityDataControllerNew.hasTasks() || this.world.poiDataControllerNew.hasTasks()) {
+ RegionFileIOThread.flush();
+ }
+@@ -163,27 +336,34 @@ public final class ChunkHolderManager {
+ } catch (final IOException ex) {
+ LOGGER.error("Failed to close poi regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
+ }
++ } // Paper - region threading
+ }
+
+ void ensureInAutosave(final NewChunkHolder holder) {
+- if (!this.autoSaveQueue.contains(holder)) {
+- holder.lastAutoSave = MinecraftServer.currentTick;
+- this.autoSaveQueue.add(holder);
++ // Paper start - region threading
++ final HolderManagerRegionData regionData = this.getCurrentRegionData();
++ if (!regionData.autoSaveQueue.contains(holder)) {
++ holder.lastAutoSave = RegionisedServer.getCurrentTick();
++ // Paper end - region threading
++ regionData.autoSaveQueue.add(holder);
+ }
+ }
+
+ public void autoSave() {
+ final List reschedule = new ArrayList<>();
+- final long currentTick = MinecraftServer.currentTickLong;
++ final long currentTick = RegionisedServer.getCurrentTick();
+ final long maxSaveTime = currentTick - this.world.paperConfig().chunks.autoSaveInterval.value();
+- for (int autoSaved = 0; autoSaved < this.world.paperConfig().chunks.maxAutoSaveChunksPerTick && !this.autoSaveQueue.isEmpty();) {
+- final NewChunkHolder holder = this.autoSaveQueue.first();
++ // Paper start - region threading
++ final HolderManagerRegionData regionData = this.getCurrentRegionData();
++ for (int autoSaved = 0; autoSaved < this.world.paperConfig().chunks.maxAutoSaveChunksPerTick && !regionData.autoSaveQueue.isEmpty();) {
++ // Paper end - region threading
++ final NewChunkHolder holder = regionData.autoSaveQueue.first();
+
+ if (holder.lastAutoSave > maxSaveTime) {
+ break;
+ }
+
+- this.autoSaveQueue.remove(holder);
++ regionData.autoSaveQueue.remove(holder);
+
+ holder.lastAutoSave = currentTick;
+ if (holder.save(false, false) != null) {
+@@ -197,15 +377,20 @@ public final class ChunkHolderManager {
+
+ for (final NewChunkHolder holder : reschedule) {
+ if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
+- this.autoSaveQueue.add(holder);
++ regionData.autoSaveQueue.add(holder);
+ }
+ }
+ }
+
+ public void saveAllChunks(final boolean flush, final boolean shutdown, final boolean logProgress) {
++ // Paper start - region threading
++ this.saveAllChunksRegionised(flush, shutdown, logProgress, true, true, true);
++ }
++ public void saveAllChunksRegionised(final boolean flush, final boolean shutdown, final boolean logProgress, final boolean first, final boolean last, final boolean checkRegion) {
++ // Paper end - region threading
+ final List holders = this.getChunkHolders();
+
+- if (logProgress) {
++ if (first && logProgress) { // Paper - region threading
+ LOGGER.info("Saving all chunkholders for world '" + this.world.getWorld().getName() + "'");
+ }
+
+@@ -213,7 +398,7 @@ public final class ChunkHolderManager {
+
+ int saved = 0;
+
+- long start = System.nanoTime();
++ final long start = System.nanoTime();
+ long lastLog = start;
+ boolean needsFlush = false;
+ final int flushInterval = 50;
+@@ -224,6 +409,12 @@ public final class ChunkHolderManager {
+
+ for (int i = 0, len = holders.size(); i < len; ++i) {
+ final NewChunkHolder holder = holders.get(i);
++ // Paper start - region threading
++ if (!checkRegion && !TickThread.isTickThreadFor(this.world, holder.chunkX, holder.chunkZ)) {
++ // skip holders that would fail the thread check
++ continue;
++ }
++ // Paper end - region threading
+ try {
+ final NewChunkHolder.SaveStat saveStat = holder.save(shutdown, false);
+ if (saveStat != null) {
+@@ -256,7 +447,7 @@ public final class ChunkHolderManager {
+ }
+ }
+ }
+- if (flush) {
++ if (last && flush) { // Paper - region threading
+ RegionFileIOThread.flush();
+ }
+ if (logProgress) {
+@@ -290,18 +481,16 @@ public final class ChunkHolderManager {
+ }
+
+ public boolean hasTickets() {
+- this.ticketLock.lock();
+- try {
+- return !this.tickets.isEmpty();
+- } finally {
+- this.ticketLock.unlock();
+- }
++ return !this.getTicketsCopy().isEmpty(); // Paper - region threading
+ }
+
+ public String getTicketDebugString(final long coordinate) {
+ this.ticketLock.lock();
+ try {
+- final SortedArraySet> tickets = this.tickets.get(coordinate);
++ // Paper start - region threading
++ final ChunkHolderManager.HolderManagerRegionData holderManagerRegionData = this.getDataFor(coordinate);
++ final SortedArraySet> tickets = holderManagerRegionData == null ? null : holderManagerRegionData.tickets.get(coordinate);
++ // Paper end - region threading
+
+ return tickets != null ? tickets.first().toString() : "no_ticket";
+ } finally {
+@@ -312,7 +501,17 @@ public final class ChunkHolderManager {
+ public Long2ObjectOpenHashMap>> getTicketsCopy() {
+ this.ticketLock.lock();
+ try {
+- return this.tickets.clone();
++ // Paper start - region threading
++ Long2ObjectOpenHashMap>> ret = new Long2ObjectOpenHashMap<>();
++ this.world.regioniser.computeForAllRegions((region) -> {
++ for (final LongIterator iterator = region.getData().getHolderManagerRegionData().tickets.keySet().longIterator(); iterator.hasNext();) {
++ final long chunk = iterator.nextLong();
++
++ ret.put(chunk, region.getData().getHolderManagerRegionData().tickets.get(chunk));
++ }
++ });
++ return ret;
++ // Paper end - region threading
+ } finally {
+ this.ticketLock.unlock();
+ }
+@@ -322,7 +521,11 @@ public final class ChunkHolderManager {
+ ImmutableList.Builder ret;
+ this.ticketLock.lock();
+ try {
+- SortedArraySet> tickets = this.tickets.get(ChunkPos.asLong(x, z));
++ // Paper start - region threading
++ final long coordinate = CoordinateUtils.getChunkKey(x, z);
++ final ChunkHolderManager.HolderManagerRegionData holderManagerRegionData = this.getDataFor(coordinate);
++ final SortedArraySet> tickets = holderManagerRegionData == null ? null : holderManagerRegionData.tickets.get(coordinate);
++ // Paper end - region threading
+
+ if (tickets == null) {
+ return Collections.emptyList();
+@@ -377,10 +580,27 @@ public final class ChunkHolderManager {
+
+ this.ticketLock.lock();
+ try {
+- final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : this.currentTick + removeDelay;
++ // Paper start - region threading
++ NewChunkHolder holder = this.chunkHolders.get(chunk);
++ final boolean addToSpecial = holder == null;
++ if (addToSpecial) {
++ // we need to guarantee that a chunk holder exists for each ticket
++ // this must be executed before retrieving the holder manager data for a target chunk, to ensure the
++ // region will exist
++ this.chunkHolders.put(chunk, holder = this.createChunkHolder(chunk));
++ }
++
++ final ChunkHolderManager.HolderManagerRegionData targetData = this.getDataFor(chunk);
++ if (addToSpecial) {
++ // no guarantee checkUnload is called for this chunk holder - by adding to the special case unload,
++ // the unload chunks call will perform it
++ targetData.specialCaseUnload.add(holder);
++ }
++ // Paper end - region threading
++ final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : targetData.currentTick + removeDelay; // Paper - region threading
+ final Ticket ticket = new Ticket<>(type, level, identifier, removeTick);
+
+- final SortedArraySet> ticketsAtChunk = this.tickets.computeIfAbsent(chunk, (final long keyInMap) -> {
++ final SortedArraySet> ticketsAtChunk = targetData.tickets.computeIfAbsent(chunk, (final long keyInMap) -> { // Paper - region threading
+ return SortedArraySet.create(4);
+ });
+
+@@ -392,25 +612,25 @@ public final class ChunkHolderManager {
+ final long oldRemovalTick = current.removalTick;
+ if (removeTick != oldRemovalTick) {
+ if (oldRemovalTick != NO_TIMEOUT_MARKER) {
+- final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(oldRemovalTick);
++ final Long2IntOpenHashMap removeCounts = targetData.removeTickToChunkExpireTicketCount.get(oldRemovalTick); // Paper - region threading
+ final int prevCount = removeCounts.addTo(chunk, -1);
+
+ if (prevCount == 1) {
+ removeCounts.remove(chunk);
+ if (removeCounts.isEmpty()) {
+- this.removeTickToChunkExpireTicketCount.remove(oldRemovalTick);
++ targetData.removeTickToChunkExpireTicketCount.remove(oldRemovalTick); // Paper - region threading
+ }
+ }
+ }
+ if (removeTick != NO_TIMEOUT_MARKER) {
+- this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> {
++ targetData.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> { // Paper - region threading
+ return new Long2IntOpenHashMap();
+ }).addTo(chunk, 1);
+ }
+ }
+ } else {
+ if (removeTick != NO_TIMEOUT_MARKER) {
+- this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> {
++ targetData.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> { // Paper - region threading
+ return new Long2IntOpenHashMap();
+ }).addTo(chunk, 1);
+ }
+@@ -439,35 +659,43 @@ public final class ChunkHolderManager {
+ return false;
+ }
+
++ final ChunkHolderManager.HolderManagerRegionData currRegionData = this.getCurrentRegionData(); // Paper - region threading
++
+ this.ticketLock.lock();
+ try {
+- final SortedArraySet> ticketsAtChunk = this.tickets.get(chunk);
++ // Paper start - region threading
++ final ChunkHolderManager.HolderManagerRegionData targetData = this.getDataFor(chunk);
++
++ final boolean sameRegion = currRegionData == targetData;
++
++ final SortedArraySet> ticketsAtChunk = targetData == null ? null : targetData.tickets.get(chunk);
++ // Paper end - region threading
+ if (ticketsAtChunk == null) {
+ return false;
+ }
+
+ final int oldLevel = getTicketLevelAt(ticketsAtChunk);
+- final Ticket ticket = (Ticket)ticketsAtChunk.removeAndGet(new Ticket<>(type, level, identifier, -2L));
++ final Ticket ticket = (Ticket)ticketsAtChunk.removeAndGet(new Ticket<>(type, level, identifier, PROBE_MARKER)); // Paper - region threading
+
+ if (ticket == null) {
+ return false;
+ }
+
+ if (ticketsAtChunk.isEmpty()) {
+- this.tickets.remove(chunk);
++ targetData.tickets.remove(chunk); // Paper - region threading
+ }
+
+ final int newLevel = getTicketLevelAt(ticketsAtChunk);
+
+ final long removeTick = ticket.removalTick;
+ if (removeTick != NO_TIMEOUT_MARKER) {
+- final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(removeTick);
++ final Long2IntOpenHashMap removeCounts = targetData.removeTickToChunkExpireTicketCount.get(removeTick); // Paper - region threading
+ final int currCount = removeCounts.addTo(chunk, -1);
+
+ if (currCount == 1) {
+ removeCounts.remove(chunk);
+ if (removeCounts.isEmpty()) {
+- this.removeTickToChunkExpireTicketCount.remove(removeTick);
++ targetData.removeTickToChunkExpireTicketCount.remove(removeTick); // Paper - region threading
+ }
+ }
+ }
+@@ -476,6 +704,13 @@ public final class ChunkHolderManager {
+ this.updateTicketLevel(chunk, newLevel);
+ }
+
++ // Paper start - region threading
++ // if we're not the target region, we should not change the ticket levels while the target region may be ticking
++ if (!sameRegion && newLevel > level) {
++ this.addTicketAtLevel(TicketType.UNKNOWN, chunk, level, new ChunkPos(chunk));
++ }
++ // Paper end - region threading
++
+ return true;
+ } finally {
+ this.ticketLock.unlock();
+@@ -516,24 +751,33 @@ public final class ChunkHolderManager {
+
+ this.ticketLock.lock();
+ try {
+- for (final LongIterator iterator = new LongArrayList(this.tickets.keySet()).longIterator(); iterator.hasNext();) {
+- final long chunk = iterator.nextLong();
++ // Paper start - region threading
++ this.world.regioniser.computeForAllRegions((region) -> {
++ for (final LongIterator iterator = new LongArrayList(region.getData().getHolderManagerRegionData().tickets.keySet()).longIterator(); iterator.hasNext();) {
++ final long chunk = iterator.nextLong();
+
+- this.removeTicketAtLevel(ticketType, chunk, ticketLevel, ticketIdentifier);
+- }
++ this.removeTicketAtLevel(ticketType, chunk, ticketLevel, ticketIdentifier);
++ }
++ });
++ // Paper end - region threading
+ } finally {
+ this.ticketLock.unlock();
+ }
+ }
+
+ public void tick() {
+- TickThread.ensureTickThread("Cannot tick ticket manager off-main");
++ // Paper start - region threading
++ final ChunkHolderManager.HolderManagerRegionData data = this.getCurrentRegionData();
++ if (data == null) {
++ throw new IllegalStateException("Not running tick() while on a region");
++ }
++ // Paper end - region threading
+
+ this.ticketLock.lock();
+ try {
+- final long tick = ++this.currentTick;
++ final long tick = ++data.currentTick; // Paper - region threading
+
+- final Long2IntOpenHashMap toRemove = this.removeTickToChunkExpireTicketCount.remove(tick);
++ final Long2IntOpenHashMap toRemove = data.removeTickToChunkExpireTicketCount.remove(tick); // Paper - region threading
+
+ if (toRemove == null) {
+ return;
+@@ -546,10 +790,10 @@ public final class ChunkHolderManager {
+ for (final LongIterator iterator = toRemove.keySet().longIterator(); iterator.hasNext();) {
+ final long chunk = iterator.nextLong();
+
+- final SortedArraySet> tickets = this.tickets.get(chunk);
++ final SortedArraySet> tickets = data.tickets.get(chunk); // Paper - region threading
+ tickets.removeIf(expireNow);
+ if (tickets.isEmpty()) {
+- this.tickets.remove(chunk);
++ data.tickets.remove(chunk); // Paper - region threading
+ this.ticketLevelPropagator.removeSource(chunk);
+ } else {
+ this.ticketLevelPropagator.setSource(chunk, convertBetweenTicketLevels(tickets.first().getTicketLevel()));
+@@ -798,30 +1042,62 @@ public final class ChunkHolderManager {
+ if (changedFullStatus.isEmpty()) {
+ return;
+ }
+- if (!TickThread.isTickThread()) {
+- this.taskScheduler.scheduleChunkTask(() -> {
+- final ArrayDeque pendingFullLoadUpdate = ChunkHolderManager.this.pendingFullLoadUpdate;
+- for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
+- pendingFullLoadUpdate.add(changedFullStatus.get(i));
+- }
+
+- ChunkHolderManager.this.processPendingFullUpdate();
+- }, PrioritisedExecutor.Priority.HIGHEST);
+- } else {
+- final ArrayDeque pendingFullLoadUpdate = this.pendingFullLoadUpdate;
+- for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
+- pendingFullLoadUpdate.add(changedFullStatus.get(i));
++ final Long2ObjectOpenHashMap> sectionToUpdates = new Long2ObjectOpenHashMap<>();
++ final List thisRegionHolders = new ArrayList<>();
++
++ final int regionShift = this.world.regioniser.sectionChunkShift;
++ final ThreadedRegioniser.ThreadedRegion thisRegion
++ = TickRegionScheduler.getCurrentRegion();
++
++ for (final NewChunkHolder holder : changedFullStatus) {
++ final int regionX = holder.chunkX >> regionShift;
++ final int regionZ = holder.chunkZ >> regionShift;
++ final long holderSectionKey = CoordinateUtils.getChunkKey(regionX, regionZ);
++
++ // region may be null
++ if (thisRegion != null && this.world.regioniser.getRegionAtUnsynchronised(holder.chunkX, holder.chunkZ) == thisRegion) {
++ thisRegionHolders.add(holder);
++ } else {
++ sectionToUpdates.computeIfAbsent(holderSectionKey, (final long keyInMap) -> {
++ return new ArrayList<>();
++ }).add(holder);
++ }
++ }
++
++ if (!thisRegionHolders.isEmpty()) {
++ thisRegion.getData().getHolderManagerRegionData().pendingFullLoadUpdate.addAll(thisRegionHolders);
++ }
++
++ if (!sectionToUpdates.isEmpty()) {
++ for (final Iterator>> iterator = sectionToUpdates.long2ObjectEntrySet().fastIterator();
++ iterator.hasNext();) {
++ final Long2ObjectMap.Entry> entry = iterator.next();
++ final long sectionKey = entry.getLongKey();
++
++ final int chunkX = CoordinateUtils.getChunkX(sectionKey) << regionShift;
++ final int chunkZ = CoordinateUtils.getChunkZ(sectionKey) << regionShift;
++
++ final List regionHolders = entry.getValue();
++ this.taskScheduler.scheduleChunkTaskEventually(chunkX, chunkZ, () -> { // Paper - region threading
++ ChunkHolderManager.this.getCurrentRegionData().pendingFullLoadUpdate.addAll(regionHolders);
++ ChunkHolderManager.this.processPendingFullUpdate();
++ }, PrioritisedExecutor.Priority.HIGHEST);
+ }
+ }
+ }
+
+ final ReferenceLinkedOpenHashSet unloadQueue = new ReferenceLinkedOpenHashSet<>();
+
++ /*
++ * Note: Only called on chunk holders that the current ticking region owns
++ */
+ private void removeChunkHolder(final NewChunkHolder holder) {
+ holder.killed = true;
+ holder.vanillaChunkHolder.onChunkRemove();
+- this.autoSaveQueue.remove(holder);
++ // Paper - region threading
+ ChunkSystem.onChunkHolderDelete(this.world, holder.vanillaChunkHolder);
++ this.getCurrentRegionData().autoSaveQueue.remove(holder); // Paper - region threading
+ this.chunkHolders.remove(CoordinateUtils.getChunkKey(holder.chunkX, holder.chunkZ));
+ }
+
+@@ -839,23 +1115,42 @@ public final class ChunkHolderManager {
+ throw new IllegalStateException("Cannot hold scheduling lock while calling processUnloads");
+ }
+
++ final ChunkHolderManager.HolderManagerRegionData currentData = this.getCurrentRegionData(); // Paper - region threading
++
+ final List unloadQueue;
+ final List scheduleList = new ArrayList<>();
+ this.ticketLock.lock();
+ try {
+ this.taskScheduler.schedulingLock.lock();
+ try {
++ // Paper start - region threading
++ for (final NewChunkHolder special : currentData.specialCaseUnload) {
++ special.checkUnload();
++ }
++ currentData.specialCaseUnload.clear();
++ // Paper end - region threading
+ if (this.unloadQueue.isEmpty()) {
+ return;
+ }
+ // in order to ensure all chunks in the unload queue do not have a pending ticket level update,
+ // process them now
+ this.processTicketUpdates(false, false, scheduleList);
+- unloadQueue = new ArrayList<>((int)(this.unloadQueue.size() * 0.05) + 1);
+
+- final int unloadCount = Math.max(50, (int)(this.unloadQueue.size() * 0.05));
+- for (int i = 0; i < unloadCount && !this.unloadQueue.isEmpty(); ++i) {
+- final NewChunkHolder chunkHolder = this.unloadQueue.removeFirst();
++ // Paper start - region threading
++ final ArrayDeque toUnload = new ArrayDeque<>();
++ // The unload queue is globally maintained, but we can only unload chunks in our region
++ for (final NewChunkHolder holder : this.unloadQueue) {
++ if (TickThread.isTickThreadFor(this.world, holder.chunkX, holder.chunkZ)) {
++ toUnload.add(holder);
++ }
++ }
++ // Paper end - region threading
++
++ final int unloadCount = Math.max(50, (int)(toUnload.size() * 0.05)); // Paper - region threading
++ unloadQueue = new ArrayList<>(unloadCount + 1); // Paper - region threading
++ for (int i = 0; i < unloadCount && !toUnload.isEmpty(); ++i) { // Paper - region threading
++ final NewChunkHolder chunkHolder = toUnload.removeFirst(); // Paper - region threading
++ this.unloadQueue.remove(chunkHolder); // Paper - region threading
+ if (chunkHolder.isSafeToUnload() != null) {
+ LOGGER.error("Chunkholder " + chunkHolder + " is not safe to unload but is inside the unload queue?");
+ continue;
+@@ -1193,7 +1488,12 @@ public final class ChunkHolderManager {
+
+ // only call on tick thread
+ protected final boolean processPendingFullUpdate() {
+- final ArrayDeque pendingFullLoadUpdate = this.pendingFullLoadUpdate;
++ final HolderManagerRegionData data = this.getCurrentRegionData();
++ if (data == null) {
++ return false;
++ }
++
++ final ArrayDeque pendingFullLoadUpdate = data.pendingFullLoadUpdate;
+
+ boolean ret = false;
+
+@@ -1204,9 +1504,7 @@ public final class ChunkHolderManager {
+ ret |= holder.handleFullStatusChange(changedFullStatus);
+
+ if (!changedFullStatus.isEmpty()) {
+- for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
+- pendingFullLoadUpdate.add(changedFullStatus.get(i));
+- }
++ this.addChangedStatuses(changedFullStatus);
+ changedFullStatus.clear();
+ }
+ }
+@@ -1256,7 +1554,7 @@ public final class ChunkHolderManager {
+
+ private JsonObject getDebugJsonNoLock() {
+ final JsonObject ret = new JsonObject();
+- ret.addProperty("current_tick", Long.valueOf(this.currentTick));
++ // Paper - region threading - move down
+
+ final JsonArray unloadQueue = new JsonArray();
+ ret.add("unload_queue", unloadQueue);
+@@ -1275,60 +1573,73 @@ public final class ChunkHolderManager {
+ holders.add(holder.getDebugJson());
+ }
+
+- final JsonArray removeTickToChunkExpireTicketCount = new JsonArray();
+- ret.add("remove_tick_to_chunk_expire_ticket_count", removeTickToChunkExpireTicketCount);
++ // Paper start - region threading
++ final JsonArray regions = new JsonArray();
++ ret.add("regions", regions);
++ this.world.regioniser.computeForAllRegionsUnsynchronised((region) -> {
++ final JsonObject regionJson = new JsonObject();
++ regions.add(regionJson);
++
++ final TickRegions.TickRegionData regionData = region.getData();
+
+- for (final Long2ObjectMap.Entry tickEntry : this.removeTickToChunkExpireTicketCount.long2ObjectEntrySet()) {
+- final long tick = tickEntry.getLongKey();
+- final Long2IntOpenHashMap coordinateToCount = tickEntry.getValue();
++ regionJson.addProperty("current_tick", Long.valueOf(regionData.getCurrentTick()));
+
+- final JsonObject tickJson = new JsonObject();
+- removeTickToChunkExpireTicketCount.add(tickJson);
++ final JsonArray removeTickToChunkExpireTicketCount = new JsonArray();
++ regionJson.add("remove_tick_to_chunk_expire_ticket_count", removeTickToChunkExpireTicketCount);
+
+- tickJson.addProperty("tick", Long.valueOf(tick));
++ for (final Long2ObjectMap.Entry tickEntry : regionData.getHolderManagerRegionData().removeTickToChunkExpireTicketCount.long2ObjectEntrySet()) {
++ final long tick = tickEntry.getLongKey();
++ final Long2IntOpenHashMap coordinateToCount = tickEntry.getValue();
+
+- final JsonArray tickEntries = new JsonArray();
+- tickJson.add("entries", tickEntries);
++ final JsonObject tickJson = new JsonObject();
++ removeTickToChunkExpireTicketCount.add(tickJson);
+
+- for (final Long2IntMap.Entry entry : coordinateToCount.long2IntEntrySet()) {
+- final long coordinate = entry.getLongKey();
+- final int count = entry.getIntValue();
++ tickJson.addProperty("tick", Long.valueOf(tick));
+
+- final JsonObject entryJson = new JsonObject();
+- tickEntries.add(entryJson);
++ final JsonArray tickEntries = new JsonArray();
++ tickJson.add("entries", tickEntries);
+
+- entryJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
+- entryJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
+- entryJson.addProperty("count", Integer.valueOf(count));
++ for (final Long2IntMap.Entry entry : coordinateToCount.long2IntEntrySet()) {
++ final long coordinate = entry.getLongKey();
++ final int count = entry.getIntValue();
++
++ final JsonObject entryJson = new JsonObject();
++ tickEntries.add(entryJson);
++
++ entryJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
++ entryJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
++ entryJson.addProperty("count", Integer.valueOf(count));
++ }
+ }
+- }
+
+- final JsonArray allTicketsJson = new JsonArray();
+- ret.add("tickets", allTicketsJson);
++ final JsonArray allTicketsJson = new JsonArray();
++ regionJson.add("tickets", allTicketsJson);
+
+- for (final Long2ObjectMap.Entry>> coordinateTickets : this.tickets.long2ObjectEntrySet()) {
+- final long coordinate = coordinateTickets.getLongKey();
+- final SortedArraySet> tickets = coordinateTickets.getValue();
++ for (final Long2ObjectMap.Entry>> coordinateTickets : regionData.getHolderManagerRegionData().tickets.long2ObjectEntrySet()) {
++ final long coordinate = coordinateTickets.getLongKey();
++ final SortedArraySet> tickets = coordinateTickets.getValue();
+
+- final JsonObject coordinateJson = new JsonObject();
+- allTicketsJson.add(coordinateJson);
++ final JsonObject coordinateJson = new JsonObject();
++ allTicketsJson.add(coordinateJson);
+
+- coordinateJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
+- coordinateJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
++ coordinateJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
++ coordinateJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
+
+- final JsonArray ticketsSerialized = new JsonArray();
+- coordinateJson.add("tickets", ticketsSerialized);
++ final JsonArray ticketsSerialized = new JsonArray();
++ coordinateJson.add("tickets", ticketsSerialized);
+
+- for (final Ticket> ticket : tickets) {
+- final JsonObject ticketSerialized = new JsonObject();
+- ticketsSerialized.add(ticketSerialized);
++ for (final Ticket> ticket : tickets) {
++ final JsonObject ticketSerialized = new JsonObject();
++ ticketsSerialized.add(ticketSerialized);
+
+- ticketSerialized.addProperty("type", ticket.getType().toString());
+- ticketSerialized.addProperty("level", Integer.valueOf(ticket.getTicketLevel()));
+- ticketSerialized.addProperty("identifier", Objects.toString(ticket.key));
+- ticketSerialized.addProperty("remove_tick", Long.valueOf(ticket.removalTick));
++ ticketSerialized.addProperty("type", ticket.getType().toString());
++ ticketSerialized.addProperty("level", Integer.valueOf(ticket.getTicketLevel()));
++ ticketSerialized.addProperty("identifier", Objects.toString(ticket.key));
++ ticketSerialized.addProperty("remove_tick", Long.valueOf(ticket.removalTick));
++ }
+ }
+- }
++ });
++ // Paper end - region threading
+
+ return ret;
+ }
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
+index 84cc9397237fa0c17aa1012dfb5683c90eb6d3b8..b3ae296cdf3f81550e2cc4d9516f4ed928760a81 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
+@@ -113,7 +113,7 @@ public final class ChunkTaskScheduler {
+ public final PrioritisedThreadPool.PrioritisedPoolExecutor parallelGenExecutor;
+ public final PrioritisedThreadPool.PrioritisedPoolExecutor loadExecutor;
+
+- private final PrioritisedThreadedTaskQueue mainThreadExecutor = new PrioritisedThreadedTaskQueue();
++ // Paper - regionised ticking
+
+ final ReentrantLock schedulingLock = new ReentrantLock();
+ public final ChunkHolderManager chunkHolderManager;
+@@ -240,14 +240,13 @@ public final class ChunkTaskScheduler {
+ };
+
+ // this may not be good enough, specifically thanks to stupid ass plugins swallowing exceptions
+- this.scheduleChunkTask(chunkX, chunkZ, crash, PrioritisedExecutor.Priority.BLOCKING);
++ this.scheduleChunkTaskEventually(chunkX, chunkZ, crash, PrioritisedExecutor.Priority.BLOCKING); // Paper - region threading
+ // so, make the main thread pick it up
+ MinecraftServer.chunkSystemCrash = new RuntimeException("Chunk system crash propagated from unrecoverableChunkSystemFailure", reportedException);
+ }
+
+ public boolean executeMainThreadTask() {
+- TickThread.ensureTickThread("Cannot execute main thread task off-main");
+- return this.mainThreadExecutor.executeTask();
++ throw new UnsupportedOperationException("Use regionised ticking hooks"); // Paper - regionised ticking
+ }
+
+ public void raisePriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
+@@ -267,7 +266,7 @@ public final class ChunkTaskScheduler {
+ public void scheduleTickingState(final int chunkX, final int chunkZ, final ChunkHolder.FullChunkStatus toStatus,
+ final boolean addTicket, final PrioritisedExecutor.Priority priority,
+ final Consumer onComplete) {
+- if (!TickThread.isTickThread()) {
++ if (!TickThread.isTickThreadFor(this.world, chunkX, chunkZ)) {
+ this.scheduleChunkTask(chunkX, chunkZ, () -> {
+ ChunkTaskScheduler.this.scheduleTickingState(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
+ }, priority);
+@@ -380,9 +379,50 @@ public final class ChunkTaskScheduler {
+ });
+ }
+
++ // Paper start - region threading
++ // only appropriate to use with ServerLevel#syncLoadNonFull
++ public boolean beginChunkLoadForNonFullSync(final int chunkX, final int chunkZ, final ChunkStatus toStatus,
++ final PrioritisedExecutor.Priority priority) {
++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
++ final int minLevel = 33 + ChunkStatus.getDistance(toStatus);
++ final List tasks = new ArrayList<>();
++ this.chunkHolderManager.ticketLock.lock();
++ try {
++ this.schedulingLock.lock();
++ try {
++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkKey);
++ if (chunkHolder == null || chunkHolder.getTicketLevel() > minLevel) {
++ return false;
++ } else {
++ final ChunkStatus genStatus = chunkHolder.getCurrentGenStatus();
++ if (genStatus != null && genStatus.isOrAfter(toStatus)) {
++ return true;
++ } else {
++ chunkHolder.raisePriority(priority);
++
++ if (!chunkHolder.upgradeGenTarget(toStatus)) {
++ this.schedule(chunkX, chunkZ, toStatus, chunkHolder, tasks);
++ }
++ }
++ }
++ } finally {
++ this.schedulingLock.unlock();
++ }
++ } finally {
++ this.chunkHolderManager.ticketLock.unlock();
++ }
++
++ for (int i = 0, len = tasks.size(); i < len; ++i) {
++ tasks.get(i).schedule();
++ }
++
++ return true;
++ }
++ // Paper end - region threading
++
+ public void scheduleChunkLoad(final int chunkX, final int chunkZ, final ChunkStatus toStatus, final boolean addTicket,
+ final PrioritisedExecutor.Priority priority, final Consumer onComplete) {
+- if (!TickThread.isTickThread()) {
++ if (!TickThread.isTickThreadFor(this.world, chunkX, chunkZ)) {
+ this.scheduleChunkTask(chunkX, chunkZ, () -> {
+ ChunkTaskScheduler.this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
+ }, priority);
+@@ -409,7 +449,7 @@ public final class ChunkTaskScheduler {
+ this.chunkHolderManager.processTicketUpdates();
+ }
+
+- final Consumer loadCallback = (final ChunkAccess chunk) -> {
++ final Consumer loadCallback = onComplete == null && !addTicket ? null : (final ChunkAccess chunk) -> {
+ try {
+ if (onComplete != null) {
+ onComplete.accept(chunk);
+@@ -449,7 +489,9 @@ public final class ChunkTaskScheduler {
+ if (!chunkHolder.upgradeGenTarget(toStatus)) {
+ this.schedule(chunkX, chunkZ, toStatus, chunkHolder, tasks);
+ }
+- chunkHolder.addStatusConsumer(toStatus, loadCallback);
++ if (loadCallback != null) {
++ chunkHolder.addStatusConsumer(toStatus, loadCallback);
++ }
+ }
+ }
+ } finally {
+@@ -463,7 +505,7 @@ public final class ChunkTaskScheduler {
+ tasks.get(i).schedule();
+ }
+
+- if (!scheduled) {
++ if (loadCallback != null && !scheduled) {
+ // couldn't schedule
+ try {
+ loadCallback.accept(chunk);
+@@ -652,7 +694,7 @@ public final class ChunkTaskScheduler {
+ */
+ @Deprecated
+ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run) {
+- return this.scheduleChunkTask(run, PrioritisedExecutor.Priority.NORMAL);
++ throw new UnsupportedOperationException(); // Paper - regionised ticking
+ }
+
+ /**
+@@ -660,7 +702,7 @@ public final class ChunkTaskScheduler {
+ */
+ @Deprecated
+ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run, final PrioritisedExecutor.Priority priority) {
+- return this.mainThreadExecutor.queueRunnable(run, priority);
++ throw new UnsupportedOperationException(); // Paper - regionised ticking
+ }
+
+ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run) {
+@@ -669,28 +711,33 @@ public final class ChunkTaskScheduler {
+
+ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run,
+ final PrioritisedExecutor.Priority priority) {
+- return this.mainThreadExecutor.createTask(run, priority);
++ return MinecraftServer.getServer().regionisedServer.taskQueue.createChunkTask(this.world, chunkX, chunkZ, run, priority); // Paper - regionised ticking
+ }
+
+ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run) {
+- return this.mainThreadExecutor.queueRunnable(run);
++ return this.scheduleChunkTask(chunkX, chunkZ, run, PrioritisedExecutor.Priority.NORMAL); // TODO rebase into chunk system patch
+ }
+
+ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run,
+ final PrioritisedExecutor.Priority priority) {
+- return this.mainThreadExecutor.queueRunnable(run, priority);
++ return MinecraftServer.getServer().regionisedServer.taskQueue.queueChunkTask(this.world, chunkX, chunkZ, run, priority); // Paper - regionised ticking
+ }
+
+- public void executeTasksUntil(final BooleanSupplier exit) {
+- if (Bukkit.isPrimaryThread()) {
+- this.mainThreadExecutor.executeConditionally(exit);
+- } else {
+- long counter = 1L;
+- while (!exit.getAsBoolean()) {
+- counter = ConcurrentUtil.linearLongBackoff(counter, 100_000L, 5_000_000L); // 100us, 5ms
+- }
+- }
++ // Paper start - region threading
++ // this function is guaranteed to never touch the ticket lock or schedule lock
++ // yes, this IS a hack so that we can avoid deadlock due to region threading introducing the
++ // ticket lock in the schedule logic
++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTaskEventually(final int chunkX, final int chunkZ, final Runnable run,
++ final PrioritisedExecutor.Priority priority) {
++ final PrioritisedExecutor.PrioritisedTask ret = this.createChunkTask(chunkX, chunkZ, run, priority);
++ this.world.taskQueueRegionData.pushGlobalChunkTask(() -> {
++ MinecraftServer.getServer().regionisedServer.taskQueue.queueChunkTask(ChunkTaskScheduler.this.world, chunkX, chunkZ, run, priority);
++ });
++ return ret;
+ }
++ // Paper end - region threading
++
++ // Paper - regionised ticking
+
+ public boolean halt(final boolean sync, final long maxWaitNS) {
+ this.lightExecutor.halt();
+@@ -699,6 +746,7 @@ public final class ChunkTaskScheduler {
+ this.loadExecutor.halt();
+ final long time = System.nanoTime();
+ if (sync) {
++ // start at 10 * 0.5ms -> 5ms
+ for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) {
+ if (
+ !this.lightExecutor.isActive() &&
+diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
+index 8013dd333e27aa5fd0beb431fa32491eec9f5246..63d2cec73e2bcf7031cdb5dfca8151f067860ec0 100644
+--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
+@@ -708,7 +708,7 @@ public final class NewChunkHolder {
+ boolean killed;
+
+ // must hold scheduling lock
+- private void checkUnload() {
++ void checkUnload() { // Paper - region threading
+ if (this.killed) {
+ return;
+ }
+@@ -1412,7 +1412,7 @@ public final class NewChunkHolder {
+ }
+
+ // must be scheduled to main, we do not trust the callback to not do anything stupid
+- this.scheduler.scheduleChunkTask(this.chunkX, this.chunkZ, () -> {
++ this.scheduler.scheduleChunkTaskEventually(this.chunkX, this.chunkZ, () -> { // Paper - region threading
+ for (final Consumer consumer : consumers) {
+ try {
+ consumer.accept(chunk);
+@@ -1455,7 +1455,7 @@ public final class NewChunkHolder {
+ }
+
+ // must be scheduled to main, we do not trust the callback to not do anything stupid
+- this.scheduler.scheduleChunkTask(this.chunkX, this.chunkZ, () -> {
++ this.scheduler.scheduleChunkTaskEventually(this.chunkX, this.chunkZ, () -> { // Paper - region threading
+ for (final Consumer consumer : consumers) {
+ try {
+ consumer.accept(chunk);
+@@ -1715,7 +1715,7 @@ public final class NewChunkHolder {
+ return this.entityChunk;
+ }
+
+- public long lastAutoSave;
++ public long lastAutoSave; // Paper - region threaded - change to relative delay
+
+ public static final record SaveStat(boolean savedChunk, boolean savedEntityChunk, boolean savedPoiChunk) {}
+
+@@ -1865,7 +1865,7 @@ public final class NewChunkHolder {
+ } catch (final ThreadDeath death) {
+ throw death;
+ } catch (final Throwable thr) {
+- LOGGER.error("Failed to save chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'");
++ LOGGER.error("Failed to save chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'", thr); // TODO rebase
+ if (unloading && !completing) {
+ this.completeAsyncChunkDataSave(null);
+ }
+@@ -1913,7 +1913,7 @@ public final class NewChunkHolder {
+ } catch (final ThreadDeath death) {
+ throw death;
+ } catch (final Throwable thr) {
+- LOGGER.error("Failed to save entity data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'");
++ LOGGER.error("Failed to save entity data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'", thr); // TODO rebase
+ }
+
+ return true;
+@@ -1939,7 +1939,7 @@ public final class NewChunkHolder {
+ } catch (final ThreadDeath death) {
+ throw death;
+ } catch (final Throwable thr) {
+- LOGGER.error("Failed to save poi data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'");
++ LOGGER.error("Failed to save poi data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'", thr); // TODO rebase
+ }
+
+ return true;
+diff --git a/src/main/java/io/papermc/paper/command/PaperCommands.java b/src/main/java/io/papermc/paper/command/PaperCommands.java
+index d31b5ed47cffc61c90c926a0cd2005b72ebddfc5..e22632ba0d84c796a9bab3a1a9c43d5e5dcf73e8 100644
+--- a/src/main/java/io/papermc/paper/command/PaperCommands.java
++++ b/src/main/java/io/papermc/paper/command/PaperCommands.java
+@@ -17,7 +17,8 @@ public final class PaperCommands {
+ private static final Map COMMANDS = new HashMap<>();
+ static {
+ COMMANDS.put("paper", new PaperCommand("paper"));
+- COMMANDS.put("mspt", new MSPTCommand("mspt"));
++ COMMANDS.put("tpa", new io.papermc.paper.threadedregions.commands.CommandsTPA()); // Paper - region threading
++ COMMANDS.put("tps", new io.papermc.paper.threadedregions.commands.CommandServerHealth()); // Paper - region threading
+ }
+
+ public static void registerCommands(final MinecraftServer server) {
+diff --git a/src/main/java/io/papermc/paper/command/subcommands/HeapDumpCommand.java b/src/main/java/io/papermc/paper/command/subcommands/HeapDumpCommand.java
+index cd2e4d792e972b8bf1e07b8961594a670ae949cf..645fe0d8d6d301fcb7897f9162fac8971bc6abbe 100644
+--- a/src/main/java/io/papermc/paper/command/subcommands/HeapDumpCommand.java
++++ b/src/main/java/io/papermc/paper/command/subcommands/HeapDumpCommand.java
+@@ -18,7 +18,9 @@ import static net.kyori.adventure.text.format.NamedTextColor.YELLOW;
+ public final class HeapDumpCommand implements PaperSubcommand {
+ @Override
+ public boolean execute(final CommandSender sender, final String subCommand, final String[] args) {
++ io.papermc.paper.threadedregions.RegionisedServer.getInstance().addTask(() -> { // Paper - region threading
+ this.dumpHeap(sender);
++ }); // Paper - region threading
+ return true;
+ }
+
+diff --git a/src/main/java/io/papermc/paper/command/subcommands/ReloadCommand.java b/src/main/java/io/papermc/paper/command/subcommands/ReloadCommand.java
+index bd68139ae635f2ad7ec8e7a21e0056a139c4c62e..872b59bccddf2adc7bce4bec5ecdcf5f0a0f0815 100644
+--- a/src/main/java/io/papermc/paper/command/subcommands/ReloadCommand.java
++++ b/src/main/java/io/papermc/paper/command/subcommands/ReloadCommand.java
+@@ -16,7 +16,9 @@ import static net.kyori.adventure.text.format.NamedTextColor.RED;
+ public final class ReloadCommand implements PaperSubcommand {
+ @Override
+ public boolean execute(final CommandSender sender, final String subCommand, final String[] args) {
++ io.papermc.paper.threadedregions.RegionisedServer.getInstance().addTask(() -> { // Paper - region threading
+ this.doReload(sender);
++ }); // Paper - region threading
+ return true;
+ }
+
+diff --git a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
+index 9f5f0d8ddc8f480b48079c70e38c9c08eff403f6..1d0e1311de8dafd42070edea40b1990c04eb7746 100644
+--- a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
++++ b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java
+@@ -288,6 +288,18 @@ public class GlobalConfiguration extends ConfigurationPart {
+ public boolean strictAdvancementDimensionCheck = false;
+ }
+
++ // Paper start - threaded regions
++ public ThreadedRegions threadedRegions;
++ public class ThreadedRegions extends Post {
++
++ public int threads = -1;
++
++ @Override
++ public void postProcess() {
++ io.papermc.paper.threadedregions.TickRegions.init(this);
++ }
++ }
++ // Paper end - threaded regions
+ public ChunkLoadingBasic chunkLoadingBasic;
+
+ public class ChunkLoadingBasic extends ConfigurationPart {
+diff --git a/src/main/java/io/papermc/paper/threadedregions/EntityScheduler.java b/src/main/java/io/papermc/paper/threadedregions/EntityScheduler.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..d9687722e02dfd4088c7030abbf5008eb0a092c8
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/EntityScheduler.java
+@@ -0,0 +1,181 @@
++package io.papermc.paper.threadedregions;
++
++import ca.spottedleaf.concurrentutil.util.Validate;
++import io.papermc.paper.util.TickThread;
++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
++import net.minecraft.world.entity.Entity;
++import org.bukkit.craftbukkit.entity.CraftEntity;
++
++import java.util.ArrayDeque;
++import java.util.ArrayList;
++import java.util.List;
++import java.util.function.Consumer;
++
++/**
++ * An entity can move between worlds with an arbitrary tick delay, be temporarily removed
++ * for players (i.e end credits), be partially removed from world state (i.e inactive but not removed),
++ * teleport between ticking regions, teleport between worlds (which will change the underlying Entity object
++ * for non-players), and even be removed entirely from the server. The uncertainty of an entity's state can make
++ * it difficult to schedule tasks without worrying about undefined behaviors resulting from any of the states listed
++ * previously.
++ *
++ *
++ * This class is designed to eliminate those states by providing an interface to run tasks only when an entity
++ * is contained in a world, on the owning thread for the region, and by providing the current Entity object.
++ * The scheduler also allows a task to provide a callback, the "retired" callback, that will be invoked
++ * if the entity is removed before a task that was scheduled could be executed. The scheduler is also
++ * completely thread-safe, allowing tasks to be scheduled from any thread context. The scheduler also indicates
++ * properly whether a task was scheduled successfully (i.e scheduler not retired), thus the code scheduling any task
++ * knows whether the given callbacks will be invoked eventually or not - which may be critical for off-thread
++ * contexts.
++ *
++ */
++public final class EntityScheduler {
++
++ /**
++ * The Entity. Note that it is the CraftEntity, since only that class properly tracks world transfers.
++ */
++ public final CraftEntity entity;
++
++ private static final record ScheduledTask(Consumer extends Entity> run, Consumer extends Entity> retired) {}
++
++ private long tickCount = 0L;
++ private static final long RETIRED_TICK_COUNT = -1L;
++ private final Object stateLock = new Object();
++ private final Long2ObjectOpenHashMap> oneTimeDelayed = new Long2ObjectOpenHashMap<>();
++
++ private final ArrayDeque currentlyExecuting = new ArrayDeque<>();
++
++ public EntityScheduler(final CraftEntity entity) {
++ this.entity = Validate.notNull(entity);
++ }
++
++ /**
++ * Retires the scheduler, preventing new tasks from being scheduled and invoking the retired callback
++ * on all currently scheduled tasks.
++ *
++ *
++ * Note: This should only be invoked after synchronously removing the entity from the world.
++ *
++ *
++ * @throws IllegalStateException If the scheduler is already retired.
++ */
++ public void retire() {
++ synchronized (this.stateLock) {
++ if (this.tickCount == RETIRED_TICK_COUNT) {
++ throw new IllegalStateException("Already retired");
++ }
++ this.tickCount = RETIRED_TICK_COUNT;
++ }
++
++ final Entity thisEntity = this.entity.getHandle();
++
++ // correctly handle and order retiring while running executeTick
++ for (int i = 0, len = this.currentlyExecuting.size(); i < len; ++i) {
++ final ScheduledTask task = this.currentlyExecuting.pollFirst();
++ final Consumer retireTask = (Consumer)task.retired;
++ if (retireTask == null) {
++ continue;
++ }
++
++ retireTask.accept(thisEntity);
++ }
++
++ for (final List tasks : this.oneTimeDelayed.values()) {
++ for (int i = 0, len = tasks.size(); i < len; ++i) {
++ final ScheduledTask task = tasks.get(i);
++ final Consumer retireTask = (Consumer)task.retired;
++ if (retireTask == null) {
++ continue;
++ }
++
++ retireTask.accept(thisEntity);
++ }
++ }
++ }
++
++ /**
++ * Schedules a task with the given delay. If the task failed to schedule because the scheduler is retired (entity
++ * removed), then returns {@code false}. Otherwise, either the run callback will be invoked after the specified delay,
++ * or the retired callback will be invoked if the scheduler is retired.
++ * Note that the retired callback is invoked in critical code, so it should not attempt to remove the entity, remove
++ * other entities, load chunks, load worlds, modify ticket levels, etc.
++ *
++ *
++ * It is guaranteed that the run and retired callback are invoked on the region which owns the entity.
++ *
++ *
++ * The run and retired callback take an Entity parameter representing the current object entity that the scheduler
++ * is tied to. Since the scheduler is transferred when an entity changes dimensions, it is possible the entity parameter
++ * is not the same when the task was first scheduled. Thus, only the parameter provided should be used.
++ *
++ * @param run The callback to run after the specified delay, may not be null.
++ * @param retired Retire callback to run if the entity is retired before the run callback can be invoked, may be null.
++ * @param delay The delay in ticks before the run callback is invoked. Any value less-than 1 is treated as 1.
++ * @return {@code true} if the task was scheduled, which means that either the run function or the retired function
++ * will be invoked (but never both), or {@code false} indicating neither the run nor retired function will be invoked
++ * since the scheduler has been retired.
++ */
++ public boolean schedule(final Consumer extends Entity> run, final Consumer extends Entity> retired, final long delay) {
++ Validate.notNull(run, "Run task may not be null");
++
++ final ScheduledTask task = new ScheduledTask(run, retired);
++ synchronized (this.stateLock) {
++ if (this.tickCount == RETIRED_TICK_COUNT) {
++ return false;
++ }
++ this.oneTimeDelayed.computeIfAbsent(this.tickCount + Math.max(1L, delay), (final long keyInMap) -> {
++ return new ArrayList<>();
++ }).add(task);
++ }
++
++ return true;
++ }
++
++ /**
++ * Executes a tick for the scheduler.
++ *
++ * @throws IllegalStateException If the scheduler is retired.
++ */
++ public void executeTick() {
++ final Entity thisEntity = this.entity.getHandle();
++
++ TickThread.ensureTickThread(thisEntity, "May not tick entity scheduler asynchronously");
++ final List toRun;
++ synchronized (this.stateLock) {
++ if (this.tickCount == RETIRED_TICK_COUNT) {
++ throw new IllegalStateException("Ticking retired scheduler");
++ }
++ ++this.tickCount;
++ if (this.oneTimeDelayed.isEmpty()) {
++ toRun = null;
++ } else {
++ toRun = this.oneTimeDelayed.remove(this.tickCount);
++ }
++ }
++
++ if (toRun != null) {
++ for (int i = 0, len = toRun.size(); i < len; ++i) {
++ this.currentlyExecuting.addLast(toRun.get(i));
++ }
++ }
++
++ // Note: It is allowed for the tasks executed to retire the entity in a given task.
++ for (int i = 0, len = this.currentlyExecuting.size(); i < len; ++i) {
++ if (!TickThread.isTickThreadFor(thisEntity)) {
++ // tp has been queued sync by one of the tasks
++ // in this case, we need to delay the tasks for next tick
++ break;
++ }
++ final ScheduledTask task = this.currentlyExecuting.pollFirst();
++
++ if (this.tickCount != RETIRED_TICK_COUNT) {
++ ((Consumer)task.run).accept(thisEntity);
++ } else {
++ // retired synchronously
++ // note: here task is null
++ break;
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/RegionShutdownThread.java b/src/main/java/io/papermc/paper/threadedregions/RegionShutdownThread.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..70c3accbab4e69268435c6f4fb13d29c7662283d
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/RegionShutdownThread.java
+@@ -0,0 +1,112 @@
++package io.papermc.paper.threadedregions;
++
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.util.TickThread;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.level.ChunkPos;
++import org.slf4j.Logger;
++import java.util.ArrayList;
++import java.util.List;
++import java.util.concurrent.TimeUnit;
++
++public final class RegionShutdownThread extends TickThread {
++
++ private static final Logger LOGGER = LogUtils.getClassLogger();
++
++ ThreadedRegioniser.ThreadedRegion shuttingDown;
++
++ public RegionShutdownThread(final String name) {
++ super(name);
++ this.setUncaughtExceptionHandler((thread, thr) -> {
++ LOGGER.error("Error shutting down server", thr);
++ });
++ }
++
++ static ThreadedRegioniser.ThreadedRegion getRegion() {
++ final Thread currentThread = Thread.currentThread();
++ if (currentThread instanceof RegionShutdownThread shutdownThread) {
++ return shutdownThread.shuttingDown;
++ }
++ return null;
++ }
++
++
++ static RegionisedWorldData getWorldData() {
++ final Thread currentThread = Thread.currentThread();
++ if (currentThread instanceof RegionShutdownThread shutdownThread) {
++ // no fast path for shutting down
++ if (shutdownThread.shuttingDown != null) {
++ return shutdownThread.shuttingDown.getData().world.worldRegionData.get();
++ }
++ }
++ return null;
++ }
++
++ // The region shutdown thread bypasses all tick thread checks, which will allow us to execute global saves
++ // it will not however let us perform arbitrary sync loads, arbitrary world state lookups simply because
++ // the data required to do that is regionised, and we can only access it when we OWN the region, and we do not.
++ // Thus, the only operation that the shutdown thread will perform
++
++ private void saveLevelData(final ServerLevel world) {
++ try {
++ world.saveLevelData();
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to save level data for " + world.getWorld().getName(), thr);
++ }
++ }
++
++ private void saveRegionChunks(final ThreadedRegioniser.ThreadedRegion region,
++ final boolean first, final boolean last) {
++ final ChunkPos center = region.getCenterChunk();
++ LOGGER.info("Saving chunks around region around chunk " + center + " in world '" + region.regioniser.world.getWorld().getName() + "'");
++ try {
++ this.shuttingDown = region;
++ region.regioniser.world.chunkTaskScheduler.chunkHolderManager.close(true, true, first, last, false);
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to save chunks for region around chunk " + center + " in world '" + region.regioniser.world.getWorld().getName() + "'", thr);
++ } finally {
++ this.shuttingDown = null;
++ }
++ }
++
++ private void haltWorldNoRegions(final ServerLevel world) {
++ try {
++ world.chunkTaskScheduler.chunkHolderManager.close(true, true, true, true, false);
++ } catch (final Throwable thr) {
++ LOGGER.error("Failed to close world '" + world.getWorld().getName() + "' with no regions", thr);
++ }
++ }
++
++ @Override
++ public final void run() {
++ // await scheduler termination
++ LOGGER.info("Awaiting scheduler termination for 60s");
++ if (TickRegions.getScheduler().halt(true, TimeUnit.SECONDS.toNanos(60L))) {
++ LOGGER.warn("Scheduler halted");
++ } else {
++ LOGGER.warn("Scheduler did not terminate within 60s, proceeding with shutdown anyways");
++ }
++
++ MinecraftServer.getServer().stopServer(); // stop part 1: most logic, kicking players, plugins, etc
++ for (final ServerLevel world : MinecraftServer.getServer().getAllLevels()) {
++ final List>
++ regions = new ArrayList<>();
++ world.regioniser.computeForAllRegionsUnsynchronised(regions::add);
++
++ for (int i = 0, len = regions.size(); i < len; ++i) {
++ final ThreadedRegioniser.ThreadedRegion region = regions.get(i);
++ this.saveRegionChunks(region, i == 0, (i + 1) == len);
++ }
++
++ if (regions.isEmpty()) {
++ // still need to halt the chunk system
++ this.haltWorldNoRegions(world);
++ }
++
++ this.saveLevelData(world);
++ }
++ MinecraftServer.getServer().stopPart2(); // stop part 2: close other resources (io thread, etc)
++ // done, part 2 should call exit()
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/RegionisedData.java b/src/main/java/io/papermc/paper/threadedregions/RegionisedData.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..3549e5f3359f38b207e189d89595442018c9dfa2
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/RegionisedData.java
+@@ -0,0 +1,235 @@
++package io.papermc.paper.threadedregions;
++
++import ca.spottedleaf.concurrentutil.util.Validate;
++import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
++import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
++import net.minecraft.server.level.ServerLevel;
++import javax.annotation.Nullable;
++import java.util.function.Supplier;
++
++/**
++ * Use to manage data that needs to be regionised.
++ *
++ * Note: that unlike {@link ThreadLocal}, regionised data is not deleted once the {@code RegionisedData} object is GC'd.
++ * The data is held in reference to the world it resides in.
++ *
++ *
++ * Note: Keep in mind that when regionised ticking is disabled, the entire server is considered a single region.
++ * That is, the data may or may not cross worlds. As such, the {@code RegionisedData} object must be instanced
++ * per world when appropriate, as it is no longer guaranteed that separate worlds contain separate regions.
++ * See below for more details on instancing per world.
++ *
++ *
++ * Regionised data may be world-checked. That is, {@link #get()} may throw an exception if the current
++ * region's world does not match the {@code RegionisedData}'s world. Consider the usages of {@code RegionisedData} below
++ * see why the behavior may or may not be desirable:
++ *
++ * {@code
++ * public class EntityTickList {
++ * private final List entities = new ArrayList<>();
++ *
++ * public void addEntity(Entity e) {
++ * this.entities.add(e);
++ * }
++ *
++ * public void removeEntity(Entity e) {
++ * this.entities.remove(e);
++ * }
++ * }
++ *
++ * public class World {
++ *
++ * // callback is left out of this example
++ * // note: world != null here
++ * public final RegionisedData entityTickLists =
++ * new RegionisedData<>(this, () -> new EntityTickList(), ...);
++ *
++ * public void addTickingEntity(Entity e) {
++ * // What we expect here is that this world is the
++ * // current ticking region's world.
++ * // If that is true, then calling this.entityTickLists.get()
++ * // will retrieve the current region's EntityTickList
++ * // for this world, which is fine since the current
++ * // region is contained within this world.
++ *
++ * // But if the current region's world is not this world,
++ * // and if the world check is disabled, then we will actually
++ * // retrieve _this_ world's EntityTickList for the region,
++ * // and NOT the EntityTickList for the region's world.
++ * // This is because the RegionisedData object is instantiated
++ * // per world.
++ * this.entityTickLists.get().addEntity(e);
++ * }
++ * }
++ *
++ * public class TickTimes {
++ *
++ * private final List tickTimesNS = new ArrayList<>();
++ *
++ * public void completeTick(long timeNS) {
++ * this.tickTimesNS.add(timeNS);
++ * }
++ *
++ * public double getAverageTickLengthMS() {
++ * double sum = 0.0;
++ * for (long time : tickTimesNS) {
++ * sum += (double)time;
++ * }
++ * return (sum / this.tickTimesNS.size()) / 1.0E6; // 1ms = 1 million ns
++ * }
++ * }
++ *
++ * public class Server {
++ * public final List worlds = ...;
++ *
++ * // callback is left out of this example
++ * // note: world == null here, because this RegionisedData object
++ * // is not instantiated per world, but rather globally.
++ * public final RegionisedData tickTimes =
++ * new RegionisedData<>(null, () -> new TickTimes(), ...);
++ * }
++ * }
++ *
++ * In general, it is advised that if a RegionisedData object is instantiated per world, that world checking
++ * is enabled for it by passing the world to the constructor.
++ *
++ */
++public final class RegionisedData {
++
++ private final ServerLevel world;
++ private final Supplier initialValueSupplier;
++ private final RegioniserCallback callback;
++
++ /**
++ * Creates a regionised data holder. The provided initial value supplier may not be null, and it must
++ * never produce {@code null} values.
++ *
++ * Note that the supplier or regioniser callback may be used while the region lock is held, so any blocking
++ * operations may deadlock the entire server and as such the function should be completely non-blocking
++ * and must complete in a timely manner.
++ *
++ *
++ * If the provided world is {@code null}, then the world checks are disabled. The world should only ever
++ * be {@code null} if the data is specifically not specific to worlds. For example, using {@code null}
++ * for an entity tick list is invalid since the entities are tied to a world and region,
++ * however using {@code null} for tasks to run at the end of a tick is valid since the tasks are tied to
++ * region only.
++ *
++ * @param world The world in which the region data resides.
++ * @param supplier Initial value supplier used to lazy initialise region data.
++ * @param callback Region callback to manage this regionised data.
++ */
++ public RegionisedData(final ServerLevel world, final Supplier supplier, final RegioniserCallback callback) {
++ this.world = world;
++ this.initialValueSupplier = Validate.notNull(supplier, "Supplier may not be null.");
++ this.callback = Validate.notNull(callback, "Regioniser callback may not be null.");
++ }
++
++ T createNewValue() {
++ return Validate.notNull(this.initialValueSupplier.get(), "Initial value supplier may not return null");
++ }
++
++ RegioniserCallback getCallback() {
++ return this.callback;
++ }
++
++ /**
++ * Returns the current data type for the current ticking region. If there is no region, returns {@code null}.
++ * @return the current data type for the current ticking region. If there is no region, returns {@code null}.
++ * @throws IllegalStateException If the following are true: The server is in region ticking mode,
++ * this {@code RegionisedData}'s world is not {@code null},
++ * and the current ticking region's world does not match this {@code RegionisedData}'s world.
++ */
++ public @Nullable T get() {
++ final ThreadedRegioniser.ThreadedRegion region =
++ TickRegionScheduler.getCurrentRegion();
++
++ if (region == null) {
++ return null;
++ }
++
++ if (this.world != null && this.world != region.getData().world) {
++ throw new IllegalStateException("World check failed: expected world: " + this.world.getWorld().getKey() + ", region world: " + region.getData().world.getWorld().getKey());
++ }
++
++ return region.getData().getOrCreateRegionisedData(this);
++ }
++
++ /**
++ * Class responsible for handling merge / split requests from the regioniser.
++ *
++ * It is critical to note that each function is called while holding the region lock.
++ *
++ */
++ public static interface RegioniserCallback {
++
++ /**
++ * Completely merges the data in {@code from} to {@code into}.
++ *
++ * Calculating Tick Offsets:
++ * Sometimes data stores absolute tick deadlines, and since regions tick independently, absolute deadlines
++ * are not comparable across regions. Consider absolute deadlines {@code deadlineFrom, deadlineTo} in
++ * regions {@code from} and {@code into} respectively. We can calculate the relative deadline for the from
++ * region with {@code relFrom = deadlineFrom - currentTickFrom}. Then, we can use the same equation for
++ * computing the absolute deadline in region {@code into} that has the same relative deadline as {@code from}
++ * as {@code deadlineTo = relFrom + currentTickTo}. By substituting {@code relFrom} as {@code deadlineFrom - currentTickFrom},
++ * we finally have that {@code deadlineTo = deadlineFrom + (currentTickTo - currentTickFrom)} and
++ * that we can use an offset {@code fromTickOffset = currentTickTo - currentTickFrom} to calculate
++ * {@code deadlineTo} as {@code deadlineTo = deadlineFrom + fromTickOffset}.
++ *
++ *
++ * Critical Notes:
++ *
++ *
++ * This function is called while the region lock is held, so any blocking operations may
++ * deadlock the entire server and as such the function should be completely non-blocking and must complete
++ * in a timely manner.
++ *
++ *
++ * This function may not throw any exceptions, or the server will be left in an unrecoverable state.
++ *
++ *
++ *
++ *
++ * @param from The data to merge from.
++ * @param into The data to merge into.
++ * @param fromTickOffset The addend to absolute tick deadlines stored in the {@code from} region to adjust to the into region.
++ */
++ public void merge(final T from, final T into, final long fromTickOffset);
++
++ /**
++ * Splits the data in {@code from} into {@code dataSet}.
++ *
++ * The chunk coordinate to region section coordinate bit shift amount is provided in {@code chunkToRegionShift}.
++ * To convert from chunk coordinates to region coordinates and keys, see the code below:
++ *
++ * {@code
++ * int chunkX = ...;
++ * int chunkZ = ...;
++ *
++ * int regionSectionX = chunkX >> chunkToRegionShift;
++ * int regionSectionZ = chunkZ >> chunkToRegionShift;
++ * long regionSectionKey = io.papermc.paper.util.CoordinateUtils.getChunkKey(regionSectionX, regionSectionZ);
++ * }
++ *
++ *
++ *
++ * The {@code regionToData} hashtable provides a lookup from {@code regionSectionKey} (see above) to the
++ * data that is owned by the region which occupies the region section.
++ *
++ *
++ * Unlike {@link #merge(Object, Object, long)}, there is no absolute tick offset provided. This is because
++ * the new regions formed from the split will start at the same tick number, and so no adjustment is required.
++ *
++ *
++ * @param from The data to split from.
++ * @param chunkToRegionShift The signed right-shift value used to convert chunk coordinates into region section coordinates.
++ * @param regionToData Lookup hash table from region section key to .
++ * @param dataSet The data set to split into.
++ */
++ public void split(
++ final T from, final int chunkToRegionShift,
++ final Long2ReferenceOpenHashMap regionToData, final ReferenceOpenHashSet dataSet
++ );
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/RegionisedServer.java b/src/main/java/io/papermc/paper/threadedregions/RegionisedServer.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..31209d5cef17f9bdfe03736654d7dcd222afee51
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/RegionisedServer.java
+@@ -0,0 +1,355 @@
++package io.papermc.paper.threadedregions;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.scheduler.SchedulerThreadPool;
++import com.mojang.authlib.GameProfile;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.util.TickThread;
++import net.minecraft.CrashReport;
++import net.minecraft.ReportedException;
++import net.minecraft.network.Connection;
++import net.minecraft.network.PacketListener;
++import net.minecraft.network.PacketSendListener;
++import net.minecraft.network.chat.Component;
++import net.minecraft.network.chat.MutableComponent;
++import net.minecraft.network.protocol.game.ClientboundDisconnectPacket;
++import net.minecraft.network.protocol.status.ServerStatus;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.dedicated.DedicatedServer;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.ServerPlayer;
++import net.minecraft.server.network.ServerGamePacketListenerImpl;
++import net.minecraft.server.network.ServerLoginPacketListenerImpl;
++import net.minecraft.server.players.PlayerList;
++import net.minecraft.util.Mth;
++import net.minecraft.world.level.GameRules;
++import net.minecraft.world.level.levelgen.LegacyRandomSource;
++import org.slf4j.Logger;
++
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collections;
++import java.util.List;
++import java.util.concurrent.CopyOnWriteArrayList;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.function.BooleanSupplier;
++
++public final class RegionisedServer {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++ private static final RegionisedServer INSTANCE = new RegionisedServer();
++
++ public final RegionisedTaskQueue taskQueue = new RegionisedTaskQueue();
++
++ private final CopyOnWriteArrayList worlds = new CopyOnWriteArrayList<>();
++ private final CopyOnWriteArrayList connections = new CopyOnWriteArrayList<>();
++
++ private final MultiThreadedQueue globalTickQueue = new MultiThreadedQueue<>();
++
++ private final GlobalTickTickHandle tickHandle = new GlobalTickTickHandle(this);
++
++ public static RegionisedServer getInstance() {
++ return INSTANCE;
++ }
++
++ public void addConnection(final Connection conn) {
++ this.connections.add(conn);
++ }
++
++ private boolean removeConnection(final Connection conn) {
++ return this.connections.remove(conn);
++ }
++
++ public void addWorld(final ServerLevel world) {
++ this.worlds.add(world);
++ }
++
++ public void init() {
++ this.tickHandle.setInitialStart(System.nanoTime() + TickRegionScheduler.TIME_BETWEEN_TICKS);
++ TickRegions.getScheduler().scheduleRegion(this.tickHandle);
++ TickRegions.getScheduler().init();
++ }
++
++ public void invalidateStatus() {
++ this.lastServerStatus = 0L;
++ }
++
++ public void addTaskWithoutNotify(final Runnable run) {
++ this.globalTickQueue.add(run);
++ }
++
++ public void addTask(final Runnable run) {
++ this.addTaskWithoutNotify(run);
++ TickRegions.getScheduler().setHasTasks(this.tickHandle);
++ }
++
++ /**
++ * Returns the current tick of the region ticking.
++ * @throws IllegalStateException If there is no current region.
++ */
++ public static long getCurrentTick() throws IllegalStateException {
++ final ThreadedRegioniser.ThreadedRegion region =
++ TickRegionScheduler.getCurrentRegion();
++ if (region == null) {
++ if (TickThread.isShutdownThread()) {
++ return 0L;
++ }
++ throw new IllegalStateException("No currently ticking region");
++ }
++ return region.getData().getCurrentTick();
++ }
++
++ public static boolean isGlobalTickThread() {
++ return INSTANCE.tickHandle == TickRegionScheduler.getCurrentTickingTask();
++ }
++
++ public static void ensureGlobalTickThread(final String reason) {
++ if (!isGlobalTickThread()) {
++ throw new IllegalStateException(reason);
++ }
++ }
++
++ public static TickRegionScheduler.RegionScheduleHandle getGlobalTickData() {
++ return INSTANCE.tickHandle;
++ }
++
++ private static final class GlobalTickTickHandle extends TickRegionScheduler.RegionScheduleHandle {
++
++ private final RegionisedServer server;
++
++ private final AtomicBoolean scheduled = new AtomicBoolean();
++ private final AtomicBoolean ticking = new AtomicBoolean();
++
++ public GlobalTickTickHandle(final RegionisedServer server) {
++ super(null, SchedulerThreadPool.DEADLINE_NOT_SET);
++ this.server = server;
++ }
++
++ /**
++ * Only valid to call BEFORE scheduled!!!!
++ */
++ final void setInitialStart(final long start) {
++ if (this.scheduled.getAndSet(true)) {
++ throw new IllegalStateException("Double scheduling global tick");
++ }
++ this.updateScheduledStart(start);
++ }
++
++ @Override
++ protected boolean tryMarkTicking() {
++ return !this.ticking.getAndSet(true);
++ }
++
++ @Override
++ protected boolean markNotTicking() {
++ return this.ticking.getAndSet(false);
++ }
++
++ @Override
++ protected void tickRegion(final int tickCount, final long startTime, final long scheduledEnd) {
++ this.drainTasks();
++ this.server.globalTick(tickCount);
++ }
++
++ private void drainTasks() {
++ while (this.runOneTask());
++ }
++
++ private boolean runOneTask() {
++ final Runnable run = this.server.globalTickQueue.poll();
++ if (run == null) {
++ return false;
++ }
++
++ // TODO try catch?
++ run.run();
++
++ return true;
++ }
++
++ @Override
++ protected boolean runRegionTasks(final BooleanSupplier canContinue) {
++ do {
++ if (!this.runOneTask()) {
++ return false;
++ }
++ } while (canContinue.getAsBoolean());
++
++ return true;
++ }
++
++ @Override
++ protected boolean hasIntermediateTasks() {
++ return !this.server.globalTickQueue.isEmpty();
++ }
++ }
++
++ private long lastServerStatus;
++ private long tickCount;
++
++ private void globalTick(final int tickCount) {
++ ++this.tickCount;
++ // commands
++ ((DedicatedServer)MinecraftServer.getServer()).handleConsoleInputs();
++
++ // needs
++ // player ping sample
++ // world global tick
++ // connection tick
++
++ // tick player ping sample
++ this.tickPlayerSample();
++
++ // tick worlds
++ for (final ServerLevel world : this.worlds) {
++ this.globalTick(world, tickCount);
++ }
++
++ // tick connections
++ this.tickConnections();
++
++ // player list
++ MinecraftServer.getServer().getPlayerList().tick();
++ }
++
++ private void tickPlayerSample() {
++ final MinecraftServer mcServer = MinecraftServer.getServer();
++ final ServerStatus status = mcServer.getStatus();
++ final PlayerList playerList = mcServer.getPlayerList();
++
++ final long i = System.nanoTime();
++
++ // player ping sample
++ // copied from MinecraftServer#tickServer
++ // note: we need to reorder setPlayers to be the last operation it does, rather than the first to avoid publishing
++ // an uncomplete status
++ if (i - this.lastServerStatus >= 5000000000L) {
++ this.lastServerStatus = i;
++ List players = new ArrayList<>(playerList.players);
++ ServerStatus.Players newPlayers = new ServerStatus.Players(mcServer.getMaxPlayers(), players.size());
++
++ if (!mcServer.hidesOnlinePlayers()) {
++ GameProfile[] agameprofile = new GameProfile[Math.min(players.size(), org.spigotmc.SpigotConfig.playerSample)]; // Paper
++ int j = Mth.nextInt(new LegacyRandomSource(i), 0, players.size() - agameprofile.length);
++
++ for (int k = 0; k < agameprofile.length; ++k) {
++ ServerPlayer entityplayer = (ServerPlayer) players.get(j + k);
++
++ if (entityplayer.allowsListing()) {
++ agameprofile[k] = entityplayer.getGameProfile();
++ } else {
++ agameprofile[k] = MinecraftServer.ANONYMOUS_PLAYER_PROFILE;
++ }
++ }
++
++ Collections.shuffle(Arrays.asList(agameprofile));
++ newPlayers.setSample(agameprofile);
++ }
++ // TODO make players field volatile
++ status.setPlayers(newPlayers);
++ }
++ }
++
++ private boolean hasConnectionMovedToMain(final Connection conn) {
++ final PacketListener packetListener = conn.getPacketListener();
++
++ return (packetListener instanceof ServerGamePacketListenerImpl) ||
++ (packetListener instanceof ServerLoginPacketListenerImpl loginListener && loginListener.state.ordinal() >= ServerLoginPacketListenerImpl.State.HANDING_OFF.ordinal());
++ }
++
++ private void tickConnections() {
++ final List connections = new ArrayList<>(this.connections);
++ Collections.shuffle(connections); // shuffle to prevent people from "gaming" the server by re-logging
++ for (final Connection conn : connections) {
++ if (!conn.becomeActive()) {
++ continue;
++ }
++
++ if (this.hasConnectionMovedToMain(conn)) {
++ if (!conn.isConnected()) {
++ this.removeConnection(conn);
++ }
++ continue;
++ }
++
++ if (!conn.isConnected()) {
++ this.removeConnection(conn);
++ conn.handleDisconnection();
++ continue;
++ }
++
++ try {
++ conn.tick();
++ } catch (final Exception exception) {
++ if (conn.isMemoryConnection()) {
++ throw new ReportedException(CrashReport.forThrowable(exception, "Ticking memory connection"));
++ }
++
++ LOGGER.warn("Failed to handle packet for {}", io.papermc.paper.configuration.GlobalConfiguration.get().logging.logPlayerIpAddresses ? String.valueOf(conn.getRemoteAddress()) : "", exception); // Paper
++ MutableComponent ichatmutablecomponent = Component.literal("Internal server error");
++
++ conn.send(new ClientboundDisconnectPacket(ichatmutablecomponent), PacketSendListener.thenRun(() -> {
++ conn.disconnect(ichatmutablecomponent);
++ }));
++ conn.setReadOnly();
++ continue;
++ }
++ }
++ }
++
++ // A global tick only updates things like weather / worldborder, basically anything in the world that is
++ // NOT tied to a specific region, but rather shared amongst all of them.
++ private void globalTick(final ServerLevel world, final int tickCount) {
++ // needs
++ // worldborder tick
++ // advancing the weather cycle
++ // sleep status thing
++ // updating sky brightness
++ // time ticking (game time + daylight), plus PrimayLevelDat#getScheduledEvents ticking
++
++ // Typically, we expect there to be a running region to drain a world's global chunk tasks. However,
++ // this may not be the case - and thus, only the global tick thread can do anything.
++ world.taskQueueRegionData.drainGlobalChunkTasks();
++
++ // worldborder tick
++ this.tickWorldBorder(world);
++
++ // weather cycle
++ this.advanceWeatherCycle(world);
++
++ // sleep status TODO
++
++ // sky brightness
++ this.updateSkyBrightness(world);
++
++ // time ticking (TODO API synchronisation?)
++ this.tickTime(world, tickCount);
++
++ world.updateTickData();
++ }
++
++ private void advanceWeatherCycle(final ServerLevel world) {
++ world.advanceWeatherCycle();
++ }
++
++ private void updateSkyBrightness(final ServerLevel world) {
++ world.updateSkyBrightness();
++ }
++
++ private void tickWorldBorder(final ServerLevel world) {
++ world.getWorldBorder().tick();
++ }
++
++ private void tickTime(final ServerLevel world, final int tickCount) {
++ if (world.tickTime) {
++ if (world.levelData.getGameRules().getBoolean(GameRules.RULE_DAYLIGHT)) {
++ world.setDayTime(world.levelData.getDayTime() + (long)tickCount);
++ }
++ world.serverLevelData.setGameTime(world.serverLevelData.getGameTime() + (long)tickCount);
++ }
++ }
++
++ public static final record WorldLevelData(ServerLevel world, long nonRedstoneGameTime, long dayTime) {
++
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/RegionisedTaskQueue.java b/src/main/java/io/papermc/paper/threadedregions/RegionisedTaskQueue.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..c13237edb7323fa747d260375f626a5c9979b004
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/RegionisedTaskQueue.java
+@@ -0,0 +1,742 @@
++package io.papermc.paper.threadedregions;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
++import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import io.papermc.paper.chunk.system.io.RegionFileIOThread;
++import io.papermc.paper.chunk.system.scheduling.ChunkHolderManager;
++import io.papermc.paper.util.CoordinateUtils;
++import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
++import it.unimi.dsi.fastutil.objects.Reference2ReferenceMap;
++import it.unimi.dsi.fastutil.objects.Reference2ReferenceOpenHashMap;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.TicketType;
++import net.minecraft.util.Unit;
++import java.lang.invoke.VarHandle;
++import java.util.ArrayDeque;
++import java.util.Iterator;
++import java.util.concurrent.ConcurrentHashMap;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.concurrent.locks.ReentrantLock;
++
++public final class RegionisedTaskQueue {
++
++ private static final TicketType TASK_QUEUE_TICKET = TicketType.create("task_queue_ticket", (a, b) -> 0);
++
++ public PrioritisedExecutor.PrioritisedTask createChunkTask(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Runnable run) {
++ return new PrioritisedQueue.ChunkBasedPriorityTask(world.taskQueueRegionData, chunkX, chunkZ, true, run, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask createChunkTask(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return new PrioritisedQueue.ChunkBasedPriorityTask(world.taskQueueRegionData, chunkX, chunkZ, true, run, priority);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask createTickTaskQueue(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Runnable run) {
++ return new PrioritisedQueue.ChunkBasedPriorityTask(world.taskQueueRegionData, chunkX, chunkZ, false, run, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask createTickTaskQueue(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Runnable run, final PrioritisedExecutor.Priority priority) {
++ return new PrioritisedQueue.ChunkBasedPriorityTask(world.taskQueueRegionData, chunkX, chunkZ, false, run, priority);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask queueChunkTask(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Runnable run) {
++ return this.queueChunkTask(world, chunkX, chunkZ, run, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask queueChunkTask(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Runnable run, final PrioritisedExecutor.Priority priority) {
++ final PrioritisedExecutor.PrioritisedTask ret = this.createChunkTask(world, chunkX, chunkZ, run, priority);
++ ret.queue();
++ return ret;
++ }
++
++ public PrioritisedExecutor.PrioritisedTask queueTickTaskQueue(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Runnable run) {
++ return this.queueTickTaskQueue(world, chunkX, chunkZ, run, PrioritisedExecutor.Priority.NORMAL);
++ }
++
++ public PrioritisedExecutor.PrioritisedTask queueTickTaskQueue(final ServerLevel world, final int chunkX, final int chunkZ,
++ final Runnable run, final PrioritisedExecutor.Priority priority) {
++ final PrioritisedExecutor.PrioritisedTask ret = this.createTickTaskQueue(world, chunkX, chunkZ, run, priority);
++ ret.queue();
++ return ret;
++ }
++
++ public static final class WorldRegionTaskData {
++ private final ServerLevel world;
++ private final MultiThreadedQueue globalChunkTask = new MultiThreadedQueue<>();
++ private final SWMRLong2ObjectHashTable referenceCounters = new SWMRLong2ObjectHashTable<>();
++
++ public WorldRegionTaskData(final ServerLevel world) {
++ this.world = world;
++ }
++
++ private boolean executeGlobalChunkTask() {
++ final Runnable run = this.globalChunkTask.poll();
++ if (run != null) {
++ run.run();
++ return true;
++ }
++ return false;
++ }
++
++ public void drainGlobalChunkTasks() {
++ while (this.executeGlobalChunkTask());
++ }
++
++ public void pushGlobalChunkTask(final Runnable run) {
++ this.globalChunkTask.add(run);
++ }
++
++ private PrioritisedQueue getQueue(final boolean synchronise, final int chunkX, final int chunkZ, final boolean isChunkTask) {
++ final ThreadedRegioniser regioniser = this.world.regioniser;
++ final ThreadedRegioniser.ThreadedRegion region
++ = synchronise ? regioniser.getRegionAtSynchronised(chunkX, chunkZ) : regioniser.getRegionAtUnsynchronised(chunkX, chunkZ);
++ if (region == null) {
++ return null;
++ }
++ final RegionTaskQueueData taskQueueData = region.getData().getTaskQueueData();
++ return (isChunkTask ? taskQueueData.chunkQueue : taskQueueData.tickTaskQueue);
++ }
++
++ private void removeTicket(final long coord) {
++ this.world.chunkTaskScheduler.chunkHolderManager.removeTicketAtLevel(
++ TASK_QUEUE_TICKET, coord, ChunkHolderManager.MAX_TICKET_LEVEL, Unit.INSTANCE
++ );
++ }
++
++ private void addTicket(final long coord) {
++ this.world.chunkTaskScheduler.chunkHolderManager.addTicketAtLevel(
++ TASK_QUEUE_TICKET, coord, ChunkHolderManager.MAX_TICKET_LEVEL, Unit.INSTANCE
++ );
++ }
++
++ private void decrementReference(final AtomicLong reference, final long coord) {
++ final long val = reference.decrementAndGet();
++ if (val == 0L) {
++ final ReentrantLock ticketLock = this.world.chunkTaskScheduler.chunkHolderManager.ticketLock;
++ ticketLock.lock();
++ try {
++ if (this.referenceCounters.remove(coord, reference)) {
++ WorldRegionTaskData.this.removeTicket(coord);
++ } // else: race condition, something replaced our reference - not our issue anymore
++ } finally {
++ ticketLock.unlock();
++ }
++ } else if (val < 0L) {
++ throw new IllegalStateException("Reference count < 0: " + val);
++ }
++ }
++
++ private AtomicLong incrementReference(final long coord) {
++ final AtomicLong ret = this.referenceCounters.get(coord);
++ if (ret != null) {
++ // try to fast acquire counter
++ int failures = 0;
++ for (long curr = ret.get();;) {
++ if (curr == 0L) {
++ // failed to fast acquire as reference expired
++ break;
++ }
++
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++
++ if (curr == (curr = ret.compareAndExchange(curr, curr + 1L))) {
++ return ret;
++ }
++
++ ++failures;
++ }
++ }
++
++ // slow acquire
++ final ReentrantLock ticketLock = this.world.chunkTaskScheduler.chunkHolderManager.ticketLock;
++ ticketLock.lock();
++ try {
++ final AtomicLong replace = new AtomicLong(1L);
++ final AtomicLong valueInMap = this.referenceCounters.putIfAbsent(coord, replace);
++ if (valueInMap == null) {
++ // replaced, we should usually be here
++ this.addTicket(coord);
++ return replace;
++ } // else: need to attempt to acquire the reference
++
++ int failures = 0;
++ for (long curr = valueInMap.get();;) {
++ if (curr == 0L) {
++ // don't need to add ticket here, since ticket is only removed during the lock
++ // we just need to replace the value in the map so that the thread removing fails and doesn't
++ // remove the ticket (see decrementReference)
++ this.referenceCounters.put(coord, replace);
++ return replace;
++ }
++
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++
++ if (curr == (curr = valueInMap.compareAndExchange(curr, curr + 1L))) {
++ // acquired
++ return valueInMap;
++ }
++
++ ++failures;
++ }
++ } finally {
++ ticketLock.unlock();
++ }
++ }
++ }
++
++ public static final class RegionTaskQueueData {
++ private final PrioritisedQueue tickTaskQueue = new PrioritisedQueue();
++ private final PrioritisedQueue chunkQueue = new PrioritisedQueue();
++ private final WorldRegionTaskData worldRegionTaskData;
++
++ public RegionTaskQueueData(final WorldRegionTaskData worldRegionTaskData) {
++ this.worldRegionTaskData = worldRegionTaskData;
++ }
++
++ void mergeInto(final RegionTaskQueueData into) {
++ this.tickTaskQueue.mergeInto(into.tickTaskQueue);
++ this.chunkQueue.mergeInto(into.chunkQueue);
++ }
++
++ public boolean executeTickTask() {
++ return this.tickTaskQueue.executeTask();
++ }
++
++ public boolean executeChunkTask() {
++ return this.worldRegionTaskData.executeGlobalChunkTask() || this.chunkQueue.executeTask();
++ }
++
++ void split(final ThreadedRegioniser regioniser,
++ final Long2ReferenceOpenHashMap> into) {
++ this.tickTaskQueue.split(
++ false, regioniser, into
++ );
++ this.chunkQueue.split(
++ true, regioniser, into
++ );
++ }
++
++ public void drainTasks() {
++ final PrioritisedQueue tickTaskQueue = this.tickTaskQueue;
++ final PrioritisedQueue chunkTaskQueue = this.chunkQueue;
++
++ int allowedTickTasks = tickTaskQueue.getScheduledTasks();
++ int allowedChunkTasks = chunkTaskQueue.getScheduledTasks();
++
++ boolean executeTickTasks = allowedTickTasks > 0;
++ boolean executeChunkTasks = allowedChunkTasks > 0;
++ boolean executeGlobalTasks = true;
++
++ do {
++ executeTickTasks = executeTickTasks && allowedTickTasks-- > 0 && tickTaskQueue.executeTask();
++ executeChunkTasks = executeChunkTasks && allowedChunkTasks-- > 0 && chunkTaskQueue.executeTask();
++ executeGlobalTasks = executeGlobalTasks && this.worldRegionTaskData.executeGlobalChunkTask();
++ } while (executeTickTasks | executeChunkTasks | executeGlobalTasks);
++ }
++
++ public boolean hasTasks() {
++ return !this.tickTaskQueue.isEmpty() || !this.chunkQueue.isEmpty();
++ }
++ }
++
++ static final class PrioritisedQueue {
++ private final ArrayDeque[] queues = new ArrayDeque[PrioritisedExecutor.Priority.TOTAL_SCHEDULABLE_PRIORITIES]; {
++ for (int i = 0; i < PrioritisedExecutor.Priority.TOTAL_SCHEDULABLE_PRIORITIES; ++i) {
++ this.queues[i] = new ArrayDeque<>();
++ }
++ }
++ private boolean isDestroyed;
++
++ public int getScheduledTasks() {
++ synchronized (this) {
++ int ret = 0;
++
++ for (final ArrayDeque queue : this.queues) {
++ ret += queue.size();
++ }
++
++ return ret;
++ }
++ }
++
++ public boolean isEmpty() {
++ final ArrayDeque[] queues = this.queues;
++ final int max = PrioritisedExecutor.Priority.IDLE.priority;
++ synchronized (this) {
++ for (int i = 0; i <= max; ++i) {
++ if (!queues[i].isEmpty()) {
++ return false;
++ }
++ }
++ return true;
++ }
++ }
++
++ public void mergeInto(final PrioritisedQueue target) {
++ synchronized (this) {
++ this.isDestroyed = true;
++ synchronized (target) {
++ mergeInto(target, this.queues);
++ }
++ }
++ }
++
++ private static void mergeInto(final PrioritisedQueue target, final ArrayDeque[] thisQueues) {
++ final ArrayDeque[] otherQueues = target.queues;
++ for (int i = 0; i < thisQueues.length; ++i) {
++ final ArrayDeque fromQ = thisQueues[i];
++ final ArrayDeque intoQ = otherQueues[i];
++
++ // it is possible for another thread to queue tasks into the target queue before we do
++ // since only the ticking region can poll, we don't have to worry about it when they are being queued -
++ // but when we are merging, we need to ensure order is maintained (notwithstanding priority changes)
++ // we can ensure order is maintained by adding all of the tasks from the fromQ into the intoQ at the
++ // front of the queue, but we need to use descending iterator to ensure we do not reverse
++ // the order of elements from fromQ
++ for (final Iterator iterator = fromQ.descendingIterator(); iterator.hasNext();) {
++ intoQ.addFirst(iterator.next());
++ }
++ }
++ }
++
++ // into is a map of section coordinate to region
++ public void split(final boolean isChunkData,
++ final ThreadedRegioniser regioniser,
++ final Long2ReferenceOpenHashMap> into) {
++ final Reference2ReferenceOpenHashMap, ArrayDeque[]>
++ split = new Reference2ReferenceOpenHashMap<>();
++ final int shift = regioniser.sectionChunkShift;
++ synchronized (this) {
++ this.isDestroyed = true;
++ // like mergeTarget, we need to be careful about insertion order so we can maintain order when splitting
++
++ // first, build the targets
++ final ArrayDeque[] thisQueues = this.queues;
++ for (int i = 0; i < thisQueues.length; ++i) {
++ final ArrayDeque fromQ = thisQueues[i];
++
++ for (final ChunkBasedPriorityTask task : fromQ) {
++ final int sectionX = task.chunkX >> shift;
++ final int sectionZ = task.chunkZ >> shift;
++ final long sectionKey = CoordinateUtils.getChunkKey(sectionX, sectionZ);
++ final ThreadedRegioniser.ThreadedRegion
++ region = into.get(sectionKey);
++ if (region == null) {
++ throw new IllegalStateException();
++ }
++
++ split.computeIfAbsent(region, (keyInMap) -> {
++ final ArrayDeque[] ret = new ArrayDeque[PrioritisedExecutor.Priority.TOTAL_SCHEDULABLE_PRIORITIES];
++
++ for (int k = 0; k < ret.length; ++k) {
++ ret[k] = new ArrayDeque<>();
++ }
++
++ return ret;
++ })[i].add(task);
++ }
++ }
++
++ // merge the targets into their queues
++ for (final Iterator, ArrayDeque[]>>
++ iterator = split.reference2ReferenceEntrySet().fastIterator();
++ iterator.hasNext();) {
++ final Reference2ReferenceMap.Entry, ArrayDeque[]>
++ entry = iterator.next();
++ final RegionTaskQueueData taskQueueData = entry.getKey().getData().getTaskQueueData();
++ mergeInto(isChunkData ? taskQueueData.chunkQueue : taskQueueData.tickTaskQueue, entry.getValue());
++ }
++ }
++ }
++
++ /**
++ * returns null if the task cannot be scheduled, returns false if this task queue is dead, and returns true
++ * if the task was added
++ */
++ private Boolean tryPush(final ChunkBasedPriorityTask task) {
++ final ArrayDeque[] queues = this.queues;
++ synchronized (this) {
++ final PrioritisedExecutor.Priority priority = task.getPriority();
++ if (priority == PrioritisedExecutor.Priority.COMPLETING) {
++ return null;
++ }
++ if (this.isDestroyed) {
++ return Boolean.FALSE;
++ }
++ queues[priority.priority].addLast(task);
++ return Boolean.TRUE;
++ }
++ }
++
++ private boolean executeTask() {
++ final ArrayDeque[] queues = this.queues;
++ final int max = PrioritisedExecutor.Priority.IDLE.priority;
++ ChunkBasedPriorityTask task = null;
++ AtomicLong referenceCounter = null;
++ synchronized (this) {
++ if (this.isDestroyed) {
++ throw new IllegalStateException("Attempting to poll from dead queue");
++ }
++
++ search_loop:
++ for (int i = 0; i <= max; ++i) {
++ final ArrayDeque queue = queues[i];
++ while ((task = queue.pollFirst()) != null) {
++ if ((referenceCounter = task.trySetCompleting(i)) != null) {
++ break search_loop;
++ }
++ }
++ }
++ }
++
++ if (task == null) {
++ return false;
++ }
++
++ try {
++ task.executeInternal();
++ } finally {
++ task.world.decrementReference(referenceCounter, task.sectionLowerLeftCoord);
++ }
++
++ return true;
++ }
++
++ private static final class ChunkBasedPriorityTask implements PrioritisedExecutor.PrioritisedTask {
++
++ private static final AtomicLong REFERENCE_COUNTER_NOT_SET = new AtomicLong(-1L);
++
++ private final WorldRegionTaskData world;
++ private final int chunkX;
++ private final int chunkZ;
++ private final long sectionLowerLeftCoord; // chunk coordinate
++ private final boolean isChunkTask;
++
++ private volatile AtomicLong referenceCounter;
++ private static final VarHandle REFERENCE_COUNTER_HANDLE = ConcurrentUtil.getVarHandle(ChunkBasedPriorityTask.class, "referenceCounter", AtomicLong.class);
++ private Runnable run;
++ private volatile PrioritisedExecutor.Priority priority;
++ private static final VarHandle PRIORITY_HANDLE = ConcurrentUtil.getVarHandle(ChunkBasedPriorityTask.class, "priority", PrioritisedExecutor.Priority.class);
++
++ ChunkBasedPriorityTask(final WorldRegionTaskData world, final int chunkX, final int chunkZ, final boolean isChunkTask,
++ final Runnable run, final PrioritisedExecutor.Priority priority) {
++ this.world = world;
++ this.chunkX = chunkX;
++ this.chunkZ = chunkZ;
++ this.isChunkTask = isChunkTask;
++ this.run = run;
++ this.setReferenceCounterPlain(REFERENCE_COUNTER_NOT_SET);
++ this.setPriorityPlain(priority);
++
++ final int regionShift = world.world.regioniser.sectionChunkShift;
++ final int regionMask = (1 << regionShift) - 1;
++
++ this.sectionLowerLeftCoord = CoordinateUtils.getChunkKey(chunkX & ~regionMask, chunkZ & ~regionMask);
++ }
++
++ private PrioritisedExecutor.Priority getPriorityVolatile() {
++ return (PrioritisedExecutor.Priority)PRIORITY_HANDLE.getVolatile(this);
++ }
++
++ private void setPriorityPlain(final PrioritisedExecutor.Priority priority) {
++ PRIORITY_HANDLE.set(this, priority);
++ }
++
++ private void setPriorityVolatile(final PrioritisedExecutor.Priority priority) {
++ PRIORITY_HANDLE.setVolatile(this, priority);
++ }
++
++ private PrioritisedExecutor.Priority compareAndExchangePriority(final PrioritisedExecutor.Priority expect, final PrioritisedExecutor.Priority update) {
++ return (PrioritisedExecutor.Priority)PRIORITY_HANDLE.compareAndExchange(this, expect, update);
++ }
++
++ private void setReferenceCounterPlain(final AtomicLong value) {
++ REFERENCE_COUNTER_HANDLE.set(this, value);
++ }
++
++ private AtomicLong getReferenceCounterVolatile() {
++ return (AtomicLong)REFERENCE_COUNTER_HANDLE.get(this);
++ }
++
++ private AtomicLong compareAndExchangeReferenceCounter(final AtomicLong expect, final AtomicLong update) {
++ return (AtomicLong)REFERENCE_COUNTER_HANDLE.compareAndExchange(this, expect, update);
++ }
++
++ private void executeInternal() {
++ try {
++ this.run.run();
++ } finally {
++ this.run = null;
++ }
++ }
++
++ private void cancelInternal() {
++ this.run = null;
++ }
++
++ private boolean tryComplete(final boolean cancel) {
++ int failures = 0;
++ for (AtomicLong curr = this.getReferenceCounterVolatile();;) {
++ if (curr == null) {
++ return false;
++ }
++
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++
++ if (curr != (curr = this.compareAndExchangeReferenceCounter(curr, null))) {
++ ++failures;
++ continue;
++ }
++
++ // we have the reference count, we win no matter what.
++ this.setPriorityVolatile(PrioritisedExecutor.Priority.COMPLETING);
++
++ try {
++ if (cancel) {
++ this.cancelInternal();
++ } else {
++ this.executeInternal();
++ }
++ } finally {
++ if (curr != REFERENCE_COUNTER_NOT_SET) {
++ this.world.decrementReference(curr, this.sectionLowerLeftCoord);
++ }
++ }
++
++ return true;
++ }
++ }
++
++ @Override
++ public boolean queue() {
++ if (this.getReferenceCounterVolatile() != REFERENCE_COUNTER_NOT_SET) {
++ return false;
++ }
++
++ final AtomicLong referenceCounter = this.world.incrementReference(this.sectionLowerLeftCoord);
++ if (this.compareAndExchangeReferenceCounter(REFERENCE_COUNTER_NOT_SET, referenceCounter) != REFERENCE_COUNTER_NOT_SET) {
++ // we don't expect race conditions here, so it is OK if we have to needlessly reference count
++ this.world.decrementReference(referenceCounter, this.sectionLowerLeftCoord);
++ return false;
++ }
++
++ boolean synchronise = false;
++ for (;;) {
++ // we need to synchronise for repeated operations so that we guarantee that we do not retrieve
++ // the same queue again, as the region lock will be given to us only when the merge/split operation
++ // is done
++ final PrioritisedQueue queue = this.world.getQueue(synchronise, this.chunkX, this.chunkZ, this.isChunkTask);
++
++ if (queue == null) {
++ if (!synchronise) {
++ // may be incorrectly null when unsynchronised
++ continue;
++ }
++ // may have been cancelled before we got to the queue
++ if (this.getReferenceCounterVolatile() != null) {
++ throw new IllegalStateException("Expected null ref count when queue does not exist");
++ }
++ // the task never could be polled from the queue, so we return false
++ // don't decrement reference count, as we were certainly cancelled by another thread, which
++ // will decrement the reference count
++ return false;
++ }
++
++ synchronise = true;
++
++ final Boolean res = queue.tryPush(this);
++ if (res == null) {
++ // we were cancelled
++ // don't decrement reference count, as we were certainly cancelled by another thread, which
++ // will decrement the reference count
++ return false;
++ }
++
++ if (!res.booleanValue()) {
++ // failed, try again
++ continue;
++ }
++
++ // successfully queued
++ return true;
++ }
++ }
++
++ private AtomicLong trySetCompleting(final int minPriority) {
++ // first, try to set priority to EXECUTING
++ for (PrioritisedExecutor.Priority curr = this.getPriorityVolatile();;) {
++ if (curr.isLowerPriority(minPriority)) {
++ return null;
++ }
++
++ if (curr == (curr = this.compareAndExchangePriority(curr, PrioritisedExecutor.Priority.COMPLETING))) {
++ break;
++ } // else: continue
++ }
++
++ for (AtomicLong curr = this.getReferenceCounterVolatile();;) {
++ if (curr == null) {
++ // something acquired before us
++ return null;
++ }
++
++ if (curr == REFERENCE_COUNTER_NOT_SET) {
++ throw new IllegalStateException();
++ }
++
++ if (curr != (curr = this.compareAndExchangeReferenceCounter(curr, null))) {
++ continue;
++ }
++ return curr;
++ }
++ }
++
++ private void updatePriorityInQueue() {
++ boolean synchronise = false;
++ for (;;) {
++ final AtomicLong referenceCount = this.getReferenceCounterVolatile();
++ if (referenceCount == REFERENCE_COUNTER_NOT_SET || referenceCount == null) {
++ // cancelled or not queued
++ return;
++ }
++
++ if (this.getPriorityVolatile() == PrioritisedExecutor.Priority.COMPLETING) {
++ // cancelled
++ return;
++ }
++
++ // we need to synchronise for repeated operations so that we guarantee that we do not retrieve
++ // the same queue again, as the region lock will be given to us only when the merge/split operation
++ // is done
++ final PrioritisedQueue queue = this.world.getQueue(synchronise, this.chunkX, this.chunkZ, this.isChunkTask);
++
++ if (queue == null) {
++ if (!synchronise) {
++ // may be incorrectly null when unsynchronised
++ continue;
++ }
++ // must have been removed
++ return;
++ }
++
++ synchronise = true;
++
++ final Boolean res = queue.tryPush(this);
++ if (res == null) {
++ // we were cancelled
++ return;
++ }
++
++ if (!res.booleanValue()) {
++ // failed, try again
++ continue;
++ }
++
++ // successfully queued
++ return;
++ }
++ }
++
++ @Override
++ public PrioritisedExecutor.Priority getPriority() {
++ return this.getPriorityVolatile();
++ }
++
++ @Override
++ public boolean lowerPriority(final PrioritisedExecutor.Priority priority) {
++ int failures = 0;
++ for (PrioritisedExecutor.Priority curr = this.getPriorityVolatile();;) {
++ if (curr == PrioritisedExecutor.Priority.COMPLETING) {
++ return false;
++ }
++
++ if (curr.isLowerOrEqualPriority(priority)) {
++ return false;
++ }
++
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++
++ if (curr == (curr = this.compareAndExchangePriority(curr, priority))) {
++ this.updatePriorityInQueue();
++ return true;
++ }
++ ++failures;
++ }
++ }
++
++ @Override
++ public boolean setPriority(final PrioritisedExecutor.Priority priority) {
++ int failures = 0;
++ for (PrioritisedExecutor.Priority curr = this.getPriorityVolatile();;) {
++ if (curr == PrioritisedExecutor.Priority.COMPLETING) {
++ return false;
++ }
++
++ if (curr == priority) {
++ return false;
++ }
++
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++
++ if (curr == (curr = this.compareAndExchangePriority(curr, priority))) {
++ this.updatePriorityInQueue();
++ return true;
++ }
++ ++failures;
++ }
++ }
++
++ @Override
++ public boolean raisePriority(final PrioritisedExecutor.Priority priority) {
++ int failures = 0;
++ for (PrioritisedExecutor.Priority curr = this.getPriorityVolatile();;) {
++ if (curr == PrioritisedExecutor.Priority.COMPLETING) {
++ return false;
++ }
++
++ if (curr.isHigherOrEqualPriority(priority)) {
++ return false;
++ }
++
++ for (int i = 0; i < failures; ++i) {
++ ConcurrentUtil.backoff();
++ }
++
++ if (curr == (curr = this.compareAndExchangePriority(curr, priority))) {
++ this.updatePriorityInQueue();
++ return true;
++ }
++ ++failures;
++ }
++ }
++
++ @Override
++ public boolean execute() {
++ return this.tryComplete(false);
++ }
++
++ @Override
++ public boolean cancel() {
++ return this.tryComplete(true);
++ }
++ }
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/RegionisedWorldData.java b/src/main/java/io/papermc/paper/threadedregions/RegionisedWorldData.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..0ce7849e652f8093f061a87bbd48306102b66aa4
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/RegionisedWorldData.java
+@@ -0,0 +1,652 @@
++package io.papermc.paper.threadedregions;
++
++import com.destroystokyo.paper.util.maplist.ReferenceList;
++import com.destroystokyo.paper.util.misc.PlayerAreaMap;
++import com.destroystokyo.paper.util.misc.PooledLinkedHashSets;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.scheduling.ChunkHolderManager;
++import io.papermc.paper.util.CoordinateUtils;
++import io.papermc.paper.util.TickThread;
++import io.papermc.paper.util.maplist.IteratorSafeOrderedReferenceSet;
++import it.unimi.dsi.fastutil.longs.Long2IntOpenHashMap;
++import it.unimi.dsi.fastutil.longs.Long2ReferenceMap;
++import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
++import it.unimi.dsi.fastutil.objects.ObjectLinkedOpenHashSet;
++import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
++import net.minecraft.CrashReport;
++import net.minecraft.ReportedException;
++import net.minecraft.core.BlockPos;
++import net.minecraft.network.Connection;
++import net.minecraft.network.PacketSendListener;
++import net.minecraft.network.chat.Component;
++import net.minecraft.network.chat.MutableComponent;
++import net.minecraft.network.protocol.game.ClientboundDisconnectPacket;
++import net.minecraft.server.level.ChunkHolder;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.server.level.ServerPlayer;
++import net.minecraft.server.network.ServerGamePacketListenerImpl;
++import net.minecraft.util.Mth;
++import net.minecraft.world.entity.Entity;
++import net.minecraft.world.entity.Mob;
++import net.minecraft.world.entity.ai.village.VillageSiege;
++import net.minecraft.world.entity.item.ItemEntity;
++import net.minecraft.world.level.BlockEventData;
++import net.minecraft.world.level.ChunkPos;
++import net.minecraft.world.level.block.Block;
++import net.minecraft.world.level.block.entity.BlockEntity;
++import net.minecraft.world.level.block.entity.TickingBlockEntity;
++import net.minecraft.world.level.chunk.LevelChunk;
++import net.minecraft.world.level.gameevent.GameEvent;
++import net.minecraft.world.level.material.Fluid;
++import net.minecraft.world.level.redstone.CollectingNeighborUpdater;
++import net.minecraft.world.level.redstone.NeighborUpdater;
++import net.minecraft.world.phys.AABB;
++import net.minecraft.world.phys.Vec3;
++import net.minecraft.world.ticks.LevelTicks;
++import org.bukkit.craftbukkit.block.CraftBlockState;
++import org.bukkit.craftbukkit.util.UnsafeList;
++import org.bukkit.entity.SpawnCategory;
++import org.slf4j.Logger;
++
++import java.util.ArrayDeque;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Collection;
++import java.util.Collections;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Map;
++import java.util.concurrent.atomic.AtomicReference;
++import java.util.function.Consumer;
++import java.util.function.Predicate;
++
++public final class RegionisedWorldData {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ public static final RegionisedData.RegioniserCallback REGION_CALLBACK = new RegionisedData.RegioniserCallback<>() {
++ @Override
++ public void merge(final RegionisedWorldData from, final RegionisedWorldData into, final long fromTickOffset) {
++ // connections
++ for (final Connection conn : from.connections) {
++ into.connections.add(conn);
++ }
++ // time
++ final long fromRedstoneTimeOffset = from.redstoneTime - into.redstoneTime;
++ // entities
++ for (final ServerPlayer player : from.localPlayers) {
++ into.localPlayers.add(player);
++ }
++ for (final Entity entity : from.allEntities) {
++ into.allEntities.add(entity);
++ entity.updateTicks(fromTickOffset, fromRedstoneTimeOffset);
++ }
++ for (final Iterator iterator = from.entityTickList.unsafeIterator(); iterator.hasNext();) {
++ into.entityTickList.add(iterator.next());
++ }
++ for (final Iterator iterator = from.navigatingMobs.unsafeIterator(); iterator.hasNext();) {
++ into.navigatingMobs.add(iterator.next());
++ }
++ // block ticking
++ into.blockEvents.addAll(from.blockEvents);
++ // ticklists use game time
++ from.blockLevelTicks.merge(into.blockLevelTicks, fromRedstoneTimeOffset);
++ from.fluidLevelTicks.merge(into.fluidLevelTicks, fromRedstoneTimeOffset);
++
++ // tile entity ticking
++ for (final TickingBlockEntity tileEntity : from.pendingBlockEntityTickers) {
++ into.pendingBlockEntityTickers.add(tileEntity);
++ //tileEntity.updateTicks(fromTickOffset, fromRedstoneTimeOffset); // TODO
++ }
++ for (final TickingBlockEntity tileEntity : from.blockEntityTickers) {
++ into.blockEntityTickers.add(tileEntity);
++ //tileEntity.updateTicks(fromTickOffset, fromRedstoneTimeOffset); // TODO
++ }
++
++ // ticking chunks
++ for (final Iterator iterator = from.entityTickingChunks.unsafeIterator(); iterator.hasNext();) {
++ into.entityTickingChunks.add(iterator.next());
++ }
++ for (final ChunkHolder holder : from.needsChangeBroadcasting) {
++ into.needsChangeBroadcasting.add(holder);
++ }
++ // redstone torches
++ if (from.redstoneUpdateInfos != null && !from.redstoneUpdateInfos.isEmpty()) {
++ if (into.redstoneUpdateInfos == null) {
++ into.redstoneUpdateInfos = new ArrayDeque<>();
++ }
++ for (final net.minecraft.world.level.block.RedstoneTorchBlock.Toggle info : from.redstoneUpdateInfos) {
++ info.offsetTime(fromRedstoneTimeOffset);
++ into.redstoneUpdateInfos.add(info);
++ }
++ }
++ // light chunks being worked on
++ into.chunksBeingWorkedOn.putAll(from.chunksBeingWorkedOn);
++ // mob spawning
++ into.catSpawnerNextTick = Math.max(from.catSpawnerNextTick, into.catSpawnerNextTick);
++ into.patrolSpawnerNextTick = Math.max(from.patrolSpawnerNextTick, into.patrolSpawnerNextTick);
++ into.phantomSpawnerNextTick = Math.max(from.phantomSpawnerNextTick, into.phantomSpawnerNextTick);
++ if (from.wanderingTraderTickDelay != Integer.MIN_VALUE && into.wanderingTraderTickDelay != Integer.MIN_VALUE) {
++ into.wanderingTraderTickDelay = Math.max(from.wanderingTraderTickDelay, into.wanderingTraderTickDelay);
++ into.wanderingTraderSpawnDelay = Math.max(from.wanderingTraderSpawnDelay, into.wanderingTraderSpawnDelay);
++ into.wanderingTraderSpawnChance = Math.max(from.wanderingTraderSpawnChance, into.wanderingTraderSpawnChance);
++ }
++ }
++
++ @Override
++ public void split(final RegionisedWorldData from, final int chunkToRegionShift,
++ final Long2ReferenceOpenHashMap regionToData,
++ final ReferenceOpenHashSet dataSet) {
++ // connections
++ for (final Connection conn : from.connections) {
++ final ServerPlayer player = conn.getPlayer();
++ final ChunkPos pos = player.chunkPosition();
++ // Note: It is impossible for an entity in the world to _not_ be in an entity chunk, which means
++ // the chunk holder must _exist_, and so the region section exists.
++ regionToData.get(CoordinateUtils.getChunkKey(pos.x >> chunkToRegionShift, pos.z >> chunkToRegionShift))
++ .connections.add(conn);
++ }
++ // entities
++ for (final ServerPlayer player : from.localPlayers) {
++ final ChunkPos pos = player.chunkPosition();
++ // Note: It is impossible for an entity in the world to _not_ be in an entity chunk, which means
++ // the chunk holder must _exist_, and so the region section exists.
++ regionToData.get(CoordinateUtils.getChunkKey(pos.x >> chunkToRegionShift, pos.z >> chunkToRegionShift))
++ .localPlayers.add(player);
++ }
++ for (final Entity entity : from.allEntities) {
++ final ChunkPos pos = entity.chunkPosition();
++ // Note: It is impossible for an entity in the world to _not_ be in an entity chunk, which means
++ // the chunk holder must _exist_, and so the region section exists.
++ final RegionisedWorldData into = regionToData.get(CoordinateUtils.getChunkKey(pos.x >> chunkToRegionShift, pos.z >> chunkToRegionShift));
++ into.allEntities.add(entity);
++ // Note: entityTickList is a subset of allEntities
++ if (from.entityTickList.contains(entity)) {
++ into.entityTickList.add(entity);
++ }
++ // Note: navigatingMobs is a subset of allEntities
++ if (entity instanceof Mob mob && from.navigatingMobs.contains(mob)) {
++ into.navigatingMobs.add(mob);
++ }
++ }
++ // block ticking
++ for (final BlockEventData blockEventData : from.blockEvents) {
++ final BlockPos pos = blockEventData.pos();
++ final int chunkX = pos.getX() >> 4;
++ final int chunkZ = pos.getZ() >> 4;
++
++ final RegionisedWorldData into = regionToData.get(CoordinateUtils.getChunkKey(chunkX >> chunkToRegionShift, chunkZ >> chunkToRegionShift));
++ // Unlike entities, the chunk holder is not guaranteed to exist for block events, because the block events
++ // is just some list. So if it unloads, I guess it's just lost.
++ if (into != null) {
++ into.blockEvents.add(blockEventData);
++ }
++ }
++
++ final Long2ReferenceOpenHashMap> levelTicksBlockRegionData = new Long2ReferenceOpenHashMap<>(regionToData.size(), 0.75f);
++ final Long2ReferenceOpenHashMap> levelTicksFluidRegionData = new Long2ReferenceOpenHashMap<>(regionToData.size(), 0.75f);
++
++ for (final Iterator> iterator = regionToData.long2ReferenceEntrySet().fastIterator();
++ iterator.hasNext();) {
++ final Long2ReferenceMap.Entry entry = iterator.next();
++ final long key = entry.getLongKey();
++ final RegionisedWorldData worldData = entry.getValue();
++
++ levelTicksBlockRegionData.put(key, worldData.blockLevelTicks);
++ levelTicksFluidRegionData.put(key, worldData.fluidLevelTicks);
++ }
++
++ from.blockLevelTicks.split(chunkToRegionShift, levelTicksBlockRegionData);
++ from.fluidLevelTicks.split(chunkToRegionShift, levelTicksFluidRegionData);
++
++ // tile entity ticking
++ for (final TickingBlockEntity tileEntity : from.pendingBlockEntityTickers) {
++ final BlockPos pos = tileEntity.getPos();
++ final int chunkX = pos.getX() >> 4;
++ final int chunkZ = pos.getZ() >> 4;
++
++ final RegionisedWorldData into = regionToData.get(CoordinateUtils.getChunkKey(chunkX >> chunkToRegionShift, chunkZ >> chunkToRegionShift));
++ if (into != null) {
++ into.pendingBlockEntityTickers.add(tileEntity);
++ } // else: when a chunk unloads, it does not actually _remove_ the tile entity from the list, it just gets
++ // marked as removed. So if there is no section, it's probably removed!
++ }
++ for (final TickingBlockEntity tileEntity : from.blockEntityTickers) {
++ final BlockPos pos = tileEntity.getPos();
++ final int chunkX = pos.getX() >> 4;
++ final int chunkZ = pos.getZ() >> 4;
++
++ final RegionisedWorldData into = regionToData.get(CoordinateUtils.getChunkKey(chunkX >> chunkToRegionShift, chunkZ >> chunkToRegionShift));
++ if (into != null) {
++ into.blockEntityTickers.add(tileEntity);
++ } // else: when a chunk unloads, it does not actually _remove_ the tile entity from the list, it just gets
++ // marked as removed. So if there is no section, it's probably removed!
++ }
++ // time
++ for (final RegionisedWorldData regionisedWorldData : dataSet) {
++ regionisedWorldData.redstoneTime = from.redstoneTime;
++ }
++ // ticking chunks
++ for (final Iterator iterator = from.entityTickingChunks.unsafeIterator(); iterator.hasNext();) {
++ final LevelChunk levelChunk = iterator.next();
++ final ChunkPos pos = levelChunk.getPos();
++
++ // Impossible for get() to return null, as the chunk is entity ticking - thus the chunk holder is loaded
++ regionToData.get(CoordinateUtils.getChunkKey(pos.x >> chunkToRegionShift, pos.z >> chunkToRegionShift))
++ .entityTickingChunks.add(levelChunk);
++ }
++
++ for (final ChunkHolder holder : from.needsChangeBroadcasting) {
++ final ChunkPos pos = holder.pos;
++
++ regionToData.get(CoordinateUtils.getChunkKey(pos.x >> chunkToRegionShift, pos.z >> chunkToRegionShift))
++ .needsChangeBroadcasting.add(holder);
++ }
++ // redstone torches
++ if (from.redstoneUpdateInfos != null && !from.redstoneUpdateInfos.isEmpty()) {
++ for (final net.minecraft.world.level.block.RedstoneTorchBlock.Toggle info : from.redstoneUpdateInfos) {
++ final BlockPos pos = info.pos;
++
++ final RegionisedWorldData worldData = regionToData.get(CoordinateUtils.getChunkKey((pos.getX() >> 4) >> chunkToRegionShift, (pos.getZ() >> 4) >> chunkToRegionShift));
++ if (worldData != null) {
++ if (worldData.redstoneUpdateInfos == null) {
++ worldData.redstoneUpdateInfos = new ArrayDeque<>();
++ }
++ worldData.redstoneUpdateInfos.add(info);
++ } // else: chunk unloaded
++ }
++ }
++ // light chunks being worked on
++ for (final Iterator iterator = from.chunksBeingWorkedOn.long2IntEntrySet().fastIterator(); iterator.hasNext();) {
++ final Long2IntOpenHashMap.Entry entry = iterator.next();
++ final long pos = entry.getLongKey();
++ final int chunkX = CoordinateUtils.getChunkX(pos);
++ final int chunkZ = CoordinateUtils.getChunkZ(pos);
++ final int value = entry.getIntValue();
++
++ // should never be null, as it is a reference counter for ticket
++ regionToData.get(CoordinateUtils.getChunkKey(chunkX >> chunkToRegionShift, chunkZ >> chunkToRegionShift)).chunksBeingWorkedOn.put(pos, value);
++ }
++ // mob spawning
++ for (final RegionisedWorldData regionisedWorldData : dataSet) {
++ regionisedWorldData.catSpawnerNextTick = from.catSpawnerNextTick;
++ regionisedWorldData.patrolSpawnerNextTick = from.patrolSpawnerNextTick;
++ regionisedWorldData.phantomSpawnerNextTick = from.phantomSpawnerNextTick;
++ regionisedWorldData.wanderingTraderTickDelay = from.wanderingTraderTickDelay;
++ regionisedWorldData.wanderingTraderSpawnChance = from.wanderingTraderSpawnChance;
++ regionisedWorldData.wanderingTraderSpawnDelay = from.wanderingTraderSpawnDelay;
++ regionisedWorldData.villageSiegeState = new VillageSiegeState(); // just re set it, as the spawn pos will be invalid
++ }
++ }
++ };
++
++ public final ServerLevel world;
++
++ private RegionisedServer.WorldLevelData tickData;
++
++ // connections
++ public final List connections = new ArrayList<>();
++
++ // misc. fields
++ private boolean isHandlingTick;
++
++ public void setHandlingTick(final boolean to) {
++ this.isHandlingTick = to;
++ }
++
++ public boolean isHandlingTick() {
++ return this.isHandlingTick;
++ }
++
++ // entities
++ private final List localPlayers = new ArrayList<>();
++ private final ReferenceList allEntities = new ReferenceList<>();
++ private final IteratorSafeOrderedReferenceSet entityTickList = new IteratorSafeOrderedReferenceSet<>();
++ private final IteratorSafeOrderedReferenceSet navigatingMobs = new IteratorSafeOrderedReferenceSet<>();
++
++ // block ticking
++ private final ObjectLinkedOpenHashSet blockEvents = new ObjectLinkedOpenHashSet<>();
++ private final LevelTicks blockLevelTicks;
++ private final LevelTicks fluidLevelTicks;
++
++ // tile entity ticking
++ private final List pendingBlockEntityTickers = new ArrayList<>();
++ private final List blockEntityTickers = new ArrayList<>();
++ private boolean tickingBlockEntities;
++
++ // time
++ private long redstoneTime = 1L;
++
++ public long getRedstoneGameTime() {
++ return this.redstoneTime;
++ }
++
++ public void setRedstoneGameTime(final long to) {
++ this.redstoneTime = to;
++ }
++
++ // ticking chunks
++ private final IteratorSafeOrderedReferenceSet entityTickingChunks = new IteratorSafeOrderedReferenceSet<>();
++ private final ReferenceOpenHashSet needsChangeBroadcasting = new ReferenceOpenHashSet<>();
++
++ // Paper/CB api hook misc
++ // don't bother to merge/split these, no point
++ // From ServerLevel
++ public boolean hasPhysicsEvent = true; // Paper
++ public boolean hasEntityMoveEvent = false; // Paper
++ public long lastMidTickExecuteFailure;
++ public long lastMidTickExecute;
++ // From Level
++ public boolean populating;
++ public final NeighborUpdater neighborUpdater;
++ public boolean preventPoiUpdated = false; // CraftBukkit - SPIGOT-5710
++ public boolean captureBlockStates = false;
++ public boolean captureTreeGeneration = false;
++ public final Map capturedBlockStates = new java.util.LinkedHashMap<>(); // Paper
++ public final Map capturedTileEntities = new java.util.LinkedHashMap<>(); // Paper
++ public List captureDrops;
++ // Paper start
++ public int wakeupInactiveRemainingAnimals;
++ public int wakeupInactiveRemainingFlying;
++ public int wakeupInactiveRemainingMonsters;
++ public int wakeupInactiveRemainingVillagers;
++ // Paper end
++ public final TempCollisionList tempCollisionList = new TempCollisionList<>();
++ public final TempCollisionList tempEntitiesList = new TempCollisionList<>();
++ public int currentPrimedTnt = 0; // Spigot
++
++ // not transient
++ public java.util.ArrayDeque redstoneUpdateInfos;
++ public final Long2IntOpenHashMap chunksBeingWorkedOn = new Long2IntOpenHashMap();
++
++ public static final class TempCollisionList {
++ final UnsafeList list = new UnsafeList<>(64);
++ boolean inUse;
++
++ public UnsafeList get() {
++ if (this.inUse) {
++ return new UnsafeList<>(16);
++ }
++ this.inUse = true;
++ return this.list;
++ }
++
++ public void ret(List list) {
++ if (list != this.list) {
++ return;
++ }
++
++ ((UnsafeList)list).setSize(0);
++ this.inUse = false;
++ }
++
++ public void reset() {
++ this.list.completeReset();
++ }
++ }
++ public void resetCollisionLists() {
++ this.tempCollisionList.reset();
++ this.tempEntitiesList.reset();
++ }
++
++ // Mob spawning
++ private final PooledLinkedHashSets pooledHashSets = new PooledLinkedHashSets<>();
++ public final PlayerAreaMap mobSpawnMap = new PlayerAreaMap(this.pooledHashSets);
++ public int catSpawnerNextTick = 0;
++ public int patrolSpawnerNextTick = 0;
++ public int phantomSpawnerNextTick = 0;
++ public int wanderingTraderTickDelay = Integer.MIN_VALUE;
++ public int wanderingTraderSpawnDelay;
++ public int wanderingTraderSpawnChance;
++ public VillageSiegeState villageSiegeState = new VillageSiegeState();
++
++ public static final class VillageSiegeState {
++ public boolean hasSetupSiege;
++ public VillageSiege.State siegeState = VillageSiege.State.SIEGE_DONE;
++ public int zombiesToSpawn;
++ public int nextSpawnTime;
++ public int spawnX;
++ public int spawnY;
++ public int spawnZ;
++ }
++
++ public RegionisedWorldData(final ServerLevel world) {
++ this.world = world;
++ this.blockLevelTicks = new LevelTicks<>(world::isPositionTickingWithEntitiesLoaded, world.getProfilerSupplier(), world, true);
++ this.fluidLevelTicks = new LevelTicks<>(world::isPositionTickingWithEntitiesLoaded, world.getProfilerSupplier(), world, false);
++ this.neighborUpdater = new CollectingNeighborUpdater(world, world.neighbourUpdateMax);
++
++ // tasks may be drained before the region ticks, so we must set up the tick data early just in case
++ this.updateTickData();
++ }
++
++ public RegionisedServer.WorldLevelData getTickData() {
++ return this.tickData;
++ }
++
++ public void updateTickData() {
++ this.tickData = this.world.tickData;
++ this.hasPhysicsEvent = org.bukkit.event.block.BlockPhysicsEvent.getHandlerList().getRegisteredListeners().length > 0; // Paper
++ this.hasEntityMoveEvent = io.papermc.paper.event.entity.EntityMoveEvent.getHandlerList().getRegisteredListeners().length > 0; // Paper
++ }
++
++ // connections
++ public void tickConnections() {
++ final List connections = new ArrayList<>(this.connections);
++ Collections.shuffle(connections);
++ for (final Connection conn : connections) {
++ if (!conn.isConnected()) {
++ conn.handleDisconnection();
++ this.connections.remove(conn);
++ // note: ALL connections HERE have a player
++ final ServerPlayer player = conn.getPlayer();
++ // now that the connection is removed, we can allow this region to die
++ player.getLevel().chunkSource.removeTicketAtLevel(
++ ServerGamePacketListenerImpl.DISCONNECT_TICKET, player.connection.disconnectPos,
++ ChunkHolderManager.MAX_TICKET_LEVEL,
++ player.connection.disconnectTicketId
++ );
++ continue;
++ }
++ if (!this.connections.contains(conn)) {
++ // removed by connection tick?
++ continue;
++ }
++
++ try {
++ conn.tick();
++ } catch (final Exception exception) {
++ if (conn.isMemoryConnection()) {
++ throw new ReportedException(CrashReport.forThrowable(exception, "Ticking memory connection"));
++ }
++
++ LOGGER.warn("Failed to handle packet for {}", io.papermc.paper.configuration.GlobalConfiguration.get().logging.logPlayerIpAddresses ? String.valueOf(conn.getRemoteAddress()) : "", exception); // Paper
++ MutableComponent ichatmutablecomponent = Component.literal("Internal server error");
++
++ conn.send(new ClientboundDisconnectPacket(ichatmutablecomponent), PacketSendListener.thenRun(() -> {
++ conn.disconnect(ichatmutablecomponent);
++ }));
++ conn.setReadOnly();
++ continue;
++ }
++ }
++ }
++
++ // entities hooks
++ public Iterable getLocalEntities() {
++ return this.allEntities;
++ }
++
++ public Entity[] getLocalEntitiesCopy() {
++ return Arrays.copyOf(this.allEntities.getRawData(), this.allEntities.size(), Entity[].class);
++ }
++
++ public List getLocalPlayers() {
++ return this.localPlayers;
++ }
++
++ public void addEntityTickingEntity(final Entity entity) {
++ if (!TickThread.isTickThreadFor(entity)) {
++ throw new IllegalArgumentException("Entity " + entity + " is not under this region's control");
++ }
++ this.entityTickList.add(entity);
++ }
++
++ public boolean hasEntityTickingEntity(final Entity entity) {
++ return this.entityTickList.contains(entity);
++ }
++
++ public void removeEntityTickingEntity(final Entity entity) {
++ if (!TickThread.isTickThreadFor(entity)) {
++ throw new IllegalArgumentException("Entity " + entity + " is not under this region's control");
++ }
++ this.entityTickList.remove(entity);
++ }
++
++ public void forEachTickingEntity(final Consumer action) {
++ final IteratorSafeOrderedReferenceSet.Iterator iterator = this.entityTickList.iterator();
++ try {
++ while (iterator.hasNext()) {
++ action.accept(iterator.next());
++ }
++ } finally {
++ iterator.finishedIterating();
++ }
++ }
++
++ public void addEntity(final Entity entity) {
++ if (!TickThread.isTickThreadFor(this.world, entity.chunkPosition())) {
++ throw new IllegalArgumentException("Entity " + entity + " is not under this region's control");
++ }
++ if (this.allEntities.add(entity)) {
++ if (entity instanceof ServerPlayer player) {
++ this.localPlayers.add(player);
++ }
++ }
++ }
++
++ public boolean hasEntity(final Entity entity) {
++ return this.allEntities.contains(entity);
++ }
++
++ public void removeEntity(final Entity entity) {
++ if (!TickThread.isTickThreadFor(entity)) {
++ throw new IllegalArgumentException("Entity " + entity + " is not under this region's control");
++ }
++ if (this.allEntities.remove(entity)) {
++ if (entity instanceof ServerPlayer player) {
++ this.localPlayers.remove(player);
++ }
++ }
++ }
++
++ public void addNavigatingMob(final Mob mob) {
++ if (!TickThread.isTickThreadFor(mob)) {
++ throw new IllegalArgumentException("Entity " + mob + " is not under this region's control");
++ }
++ this.navigatingMobs.add(mob);
++ }
++
++ public void removeNavigatingMob(final Mob mob) {
++ if (!TickThread.isTickThreadFor(mob)) {
++ throw new IllegalArgumentException("Entity " + mob + " is not under this region's control");
++ }
++ this.navigatingMobs.remove(mob);
++ }
++
++ public Iterator getNavigatingMobs() {
++ return this.navigatingMobs.unsafeIterator();
++ }
++
++ // block ticking hooks
++ // Since block event data does not require chunk holders to be created for the chunk they reside in,
++ // it's not actually guaranteed that when merging / splitting data that we actually own the data...
++ // Note that we can only ever not own the event data when the chunk unloads, and so I've decided to
++ // make the code easier by simply discarding it in such an event
++ public void pushBlockEvent(final BlockEventData blockEventData) {
++ TickThread.ensureTickThread(this.world, blockEventData.pos(), "Cannot queue block even data async");
++ this.blockEvents.add(blockEventData);
++ }
++
++ public void pushBlockEvents(final Collection extends BlockEventData> blockEvents) {
++ for (final BlockEventData blockEventData : blockEvents) {
++ this.pushBlockEvent(blockEventData);
++ }
++ }
++
++ public void removeIfBlockEvents(final Predicate super BlockEventData> predicate) {
++ for (final Iterator iterator = this.blockEvents.iterator(); iterator.hasNext();) {
++ final BlockEventData blockEventData = iterator.next();
++ if (predicate.test(blockEventData)) {
++ iterator.remove();
++ }
++ }
++ }
++
++ public BlockEventData removeFirstBlockEvent() {
++ BlockEventData ret;
++ while (!this.blockEvents.isEmpty()) {
++ ret = this.blockEvents.removeFirst();
++ if (TickThread.isTickThreadFor(this.world, ret.pos())) {
++ return ret;
++ } // else: chunk must have been unloaded
++ }
++
++ return null;
++ }
++
++ public LevelTicks getBlockLevelTicks() {
++ return this.blockLevelTicks;
++ }
++
++ public LevelTicks getFluidLevelTicks() {
++ return this.fluidLevelTicks;
++ }
++
++ // tile entity ticking
++ public void addBlockEntityTicker(final TickingBlockEntity ticker) {
++ TickThread.ensureTickThread(this.world, ticker.getPos(), "Tile entity must be owned by current region");
++
++ (this.tickingBlockEntities ? this.pendingBlockEntityTickers : this.blockEntityTickers).add(ticker);
++ }
++
++ public void seTtickingBlockEntities(final boolean to) {
++ this.tickingBlockEntities = true;
++ }
++
++ public List getBlockEntityTickers() {
++ return this.blockEntityTickers;
++ }
++
++ public void pushPendingTickingBlockEntities() {
++ if (!this.pendingBlockEntityTickers.isEmpty()) {
++ this.blockEntityTickers.addAll(this.pendingBlockEntityTickers);
++ this.pendingBlockEntityTickers.clear();
++ }
++ }
++
++ // ticking chunks
++ public void addEntityTickingChunks(final LevelChunk levelChunk) {
++ this.entityTickingChunks.add(levelChunk);
++ }
++
++ public void removeEntityTickingChunk(final LevelChunk levelChunk) {
++ this.entityTickingChunks.remove(levelChunk);
++ }
++
++ public IteratorSafeOrderedReferenceSet getEntityTickingChunks() {
++ return this.entityTickingChunks;
++ }
++
++ public void addChunkHolderNeedsBroadcasting(final ChunkHolder holder) {
++ this.needsChangeBroadcasting.add(holder);
++ }
++
++ public void removeChunkHolderNeedsBroadcasting(final ChunkHolder holder) {
++ this.needsChangeBroadcasting.remove(holder);
++ }
++
++ public ReferenceOpenHashSet getNeedsChangeBroadcasting() {
++ return this.needsChangeBroadcasting;
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/Schedule.java b/src/main/java/io/papermc/paper/threadedregions/Schedule.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..112d24a93bddf3d81c9176c05340c94ecd1a40a3
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/Schedule.java
+@@ -0,0 +1,91 @@
++package io.papermc.paper.threadedregions;
++
++/**
++ * A Schedule is an object that can be used to maintain a periodic schedule for an event of interest.
++ */
++public final class Schedule {
++
++ private long lastPeriod;
++
++ /**
++ * Initialises a schedule with the provided period.
++ * @param firstPeriod The last time an event of interest occurred.
++ * @see #setLastPeriod(long)
++ */
++ public Schedule(final long firstPeriod) {
++ this.lastPeriod = firstPeriod;
++ }
++
++ /**
++ * Updates the last period to the specified value. This call sets the last "time" the event
++ * of interest took place at. Thus, the value returned by {@link #getDeadline(long)} is
++ * the provided time plus the period length provided to {@code getDeadline}.
++ * @param value The value to set the last period to.
++ */
++ public void setLastPeriod(final long value) {
++ this.lastPeriod = value;
++ }
++
++ /**
++ * Returns the last time the event of interest should have taken place.
++ */
++ public long getLastPeriod() {
++ return this.lastPeriod;
++ }
++
++ /**
++ * Returns the number of times the event of interest should have taken place between the last
++ * period and the provided time given the period between each event.
++ * @param periodLength The length of the period between events in ns.
++ * @param time The provided time.
++ */
++ public int getPeriodsAhead(final long periodLength, final long time) {
++ final long difference = time - this.lastPeriod;
++ final int ret = (int)(Math.abs(difference) / periodLength);
++ return difference >= 0 ? ret : -ret;
++ }
++
++ /**
++ * Returns the next starting deadline for the event of interest to take place,
++ * given the provided period length.
++ * @param periodLength The provided period length.
++ */
++ public long getDeadline(final long periodLength) {
++ return this.lastPeriod + periodLength;
++ }
++
++ /**
++ * Adjusts the last period so that the next starting deadline returned is the next period specified,
++ * given the provided period length.
++ * @param nextPeriod The specified next starting deadline.
++ * @param periodLength The specified period length.
++ */
++ public void setNextPeriod(final long nextPeriod, final long periodLength) {
++ this.lastPeriod = nextPeriod - periodLength;
++ }
++
++ /**
++ * Increases the last period by the specified number of periods and period length.
++ * The specified number of periods may be < 0, in which case the last period
++ * will decrease.
++ * @param periods The specified number of periods.
++ * @param periodLength The specified period length.
++ */
++ public void advanceBy(final int periods, final long periodLength) {
++ this.lastPeriod += (long)periods * periodLength;
++ }
++
++ /**
++ * Sets the last period so that it is the specified number of periods ahead
++ * given the specified time and period length.
++ * @param periodsToBeAhead Specified number of periods to be ahead by.
++ * @param periodLength The specified period length.
++ * @param time The specified time.
++ */
++ public void setPeriodsAhead(final int periodsToBeAhead, final long periodLength, final long time) {
++ final int periodsAhead = this.getPeriodsAhead(periodLength, time);
++ final int periodsToAdd = periodsToBeAhead - periodsAhead;
++
++ this.lastPeriod -= (long)periodsToAdd * periodLength;
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/TeleportUtils.java b/src/main/java/io/papermc/paper/threadedregions/TeleportUtils.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..64d67c2c6c67fa64582b4f8516bd2350f4f034e5
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/TeleportUtils.java
+@@ -0,0 +1,60 @@
++package io.papermc.paper.threadedregions;
++
++import ca.spottedleaf.concurrentutil.completable.Completable;
++import net.minecraft.world.entity.Entity;
++import net.minecraft.world.phys.Vec3;
++import org.bukkit.Location;
++import org.bukkit.craftbukkit.CraftWorld;
++import org.bukkit.event.player.PlayerTeleportEvent;
++import java.util.function.Consumer;
++
++public final class TeleportUtils {
++
++ public static void teleport(final Entity from, final Entity to, final Float yaw, final Float pitch,
++ final long teleportFlags, final PlayerTeleportEvent.TeleportCause cause, final Consumer onComplete) {
++ // retrieve coordinates
++ final Completable positionCompletable = new Completable<>();
++
++ positionCompletable.addWaiter(
++ (final Location loc, final Throwable thr) -> {
++ if (loc == null) {
++ onComplete.accept(null);
++ return;
++ }
++ final boolean scheduled = from.getBukkitEntity().taskScheduler.schedule(
++ (final Entity realFrom) -> {
++ final Vec3 pos = new Vec3(
++ loc.getX(), loc.getY(), loc.getZ()
++ );
++ realFrom.teleportAsync(
++ ((CraftWorld)loc.getWorld()).getHandle(), pos, null, null, null,
++ cause, teleportFlags, onComplete
++ );
++ },
++ (final Entity retired) -> {
++ onComplete.accept(null);
++ },
++ 1L
++ );
++ if (!scheduled) {
++ onComplete.accept(null);
++ }
++ }
++ );
++
++ final boolean scheduled = to.getBukkitEntity().taskScheduler.schedule(
++ (final Entity target) -> {
++ positionCompletable.complete(target.getBukkitEntity().getLocation());
++ },
++ (final Entity retired) -> {
++ onComplete.accept(null);
++ },
++ 1L
++ );
++ if (!scheduled) {
++ onComplete.accept(null);
++ }
++ }
++
++ private TeleportUtils() {}
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java b/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..f05546aa9124d4c0e34005f528483bf516e93c20
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java
+@@ -0,0 +1,1187 @@
++package io.papermc.paper.threadedregions;
++
++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
++import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable;
++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
++import com.google.gson.JsonArray;
++import com.google.gson.JsonElement;
++import com.google.gson.JsonObject;
++import com.google.gson.JsonParser;
++import io.papermc.paper.util.CoordinateUtils;
++import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
++import it.unimi.dsi.fastutil.longs.LongArrayList;
++import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
++import net.minecraft.core.BlockPos;
++import net.minecraft.server.level.ServerLevel;
++import net.minecraft.world.entity.Entity;
++import net.minecraft.world.level.ChunkPos;
++
++import java.io.FileReader;
++import java.lang.invoke.VarHandle;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.Iterator;
++import java.util.List;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.concurrent.locks.StampedLock;
++import java.util.function.Consumer;
++
++public final class ThreadedRegioniser, S extends ThreadedRegioniser.ThreadedRegionSectionData> {
++
++ public final int regionSectionChunkSize;
++ public final int sectionChunkShift;
++ public final int minSectionRecalcCount;
++ public final int emptySectionCreateRadius;
++ public final int regionSectionMergeRadius;
++ public final double maxDeadRegionPercent;
++ public final ServerLevel world;
++
++ private final SWMRLong2ObjectHashTable> sections = new SWMRLong2ObjectHashTable<>();
++ private final SWMRLong2ObjectHashTable> regionsById = new SWMRLong2ObjectHashTable<>();
++ private final RegionCallbacks callbacks;
++ private final StampedLock regionLock = new StampedLock();
++ private Thread writeLockOwner;
++
++ /*
++ static final record Operation(String type, int chunkX, int chunkZ) {}
++ private final MultiThreadedQueue ops = new MultiThreadedQueue<>();
++ */
++
++ public ThreadedRegioniser(final int minSectionRecalcCount, final double maxDeadRegionPercent,
++ final int emptySectionCreateRadius, final int regionSectionMergeRadius,
++ final int regionSectionChunkShift, final ServerLevel world,
++ final RegionCallbacks callbacks) {
++ if (emptySectionCreateRadius <= 0) {
++ throw new IllegalStateException("Region section create radius must be > 0");
++ }
++ if (regionSectionMergeRadius <= 0) {
++ throw new IllegalStateException("Region section merge radius must be > 0");
++ }
++ this.regionSectionChunkSize = 1 << regionSectionChunkShift;
++ this.sectionChunkShift = regionSectionChunkShift;
++ this.minSectionRecalcCount = Math.max(2, minSectionRecalcCount);
++ this.maxDeadRegionPercent = maxDeadRegionPercent;
++ this.emptySectionCreateRadius = emptySectionCreateRadius;
++ this.regionSectionMergeRadius = regionSectionMergeRadius;
++ this.world = world;
++ this.callbacks = callbacks;
++ //this.loadTestData();
++ }
++
++ /*
++ private static String substr(String val, String prefix, int from) {
++ int idx = val.indexOf(prefix, from) + prefix.length();
++ int idx2 = val.indexOf(',', idx);
++ if (idx2 == -1) {
++ idx2 = val.indexOf(']', idx);
++ }
++ return val.substring(idx, idx2);
++ }
++
++ private void loadTestData() {
++ if (true) {
++ return;
++ }
++ try {
++ final JsonArray arr = JsonParser.parseReader(new FileReader("test.json")).getAsJsonArray();
++
++ List ops = new ArrayList<>();
++
++ for (JsonElement elem : arr) {
++ JsonObject obj = elem.getAsJsonObject();
++ String val = obj.get("value").getAsString();
++
++ String type = substr(val, "type=", 0);
++ String x = substr(val, "chunkX=", 0);
++ String z = substr(val, "chunkZ=", 0);
++
++ ops.add(new Operation(type, Integer.parseInt(x), Integer.parseInt(z)));
++ }
++
++ for (Operation op : ops) {
++ switch (op.type) {
++ case "add": {
++ this.addChunk(op.chunkX, op.chunkZ);
++ break;
++ }
++ case "remove": {
++ this.removeChunk(op.chunkX, op.chunkZ);
++ break;
++ }
++ case "mark_ticking": {
++ this.sections.get(CoordinateUtils.getChunkKey(op.chunkX, op.chunkZ)).region.tryMarkTicking();
++ break;
++ }
++ case "rel_region": {
++ if (this.sections.get(CoordinateUtils.getChunkKey(op.chunkX, op.chunkZ)).region.state == ThreadedRegion.STATE_TICKING) {
++ this.sections.get(CoordinateUtils.getChunkKey(op.chunkX, op.chunkZ)).region.markNotTicking();
++ }
++ break;
++ }
++ }
++ }
++
++ } catch (final Exception ex) {
++ throw new IllegalStateException(ex);
++ }
++ }
++ */
++
++ private void acquireWriteLock() {
++ final Thread currentThread = Thread.currentThread();
++ if (this.writeLockOwner == currentThread) {
++ throw new IllegalStateException("Cannot recursively operate in the regioniser");
++ }
++ this.regionLock.writeLock();
++ this.writeLockOwner = currentThread;
++ }
++
++ private void releaseWriteLock() {
++ this.writeLockOwner = null;
++ this.regionLock.tryUnlockWrite();
++ }
++
++ private void onRegionCreate(final ThreadedRegion region) {
++ final ThreadedRegion conflict;
++ if ((conflict = this.regionsById.putIfAbsent(region.id, region)) != null) {
++ throw new IllegalStateException("Region " + region + " is already mapped to " + conflict);
++ }
++ }
++
++ private void onRegionDestroy(final ThreadedRegion region) {
++ final ThreadedRegion removed = this.regionsById.remove(region.id);
++ if (removed != region) {
++ throw new IllegalStateException("Expected to remove " + region + ", but removed " + removed);
++ }
++ }
++
++ public int getSectionCoordinate(final int chunkCoordinate) {
++ return chunkCoordinate >> this.sectionChunkShift;
++ }
++
++ public long getSectionKey(final BlockPos pos) {
++ return CoordinateUtils.getChunkKey((pos.getX() >> 4) >> this.sectionChunkShift, (pos.getZ() >> 4) >> this.sectionChunkShift);
++ }
++
++ public long getSectionKey(final ChunkPos pos) {
++ return CoordinateUtils.getChunkKey(pos.x >> this.sectionChunkShift, pos.z >> this.sectionChunkShift);
++ }
++
++ public long getSectionKey(final Entity entity) {
++ final ChunkPos pos = entity.chunkPosition();
++ return CoordinateUtils.getChunkKey(pos.x >> this.sectionChunkShift, pos.z >> this.sectionChunkShift);
++ }
++
++ public void computeForAllRegions(final Consumer super ThreadedRegion> consumer) {
++ this.regionLock.readLock();
++ try {
++ this.regionsById.forEachValue(consumer);
++ } finally {
++ this.regionLock.tryUnlockRead();
++ }
++ }
++
++ public void computeForAllRegionsUnsynchronised(final Consumer super ThreadedRegion> consumer) {
++ this.regionsById.forEachValue(consumer);
++ }
++
++ public ThreadedRegion getRegionAtUnsynchronised(final int chunkX, final int chunkZ) {
++ final int sectionX = chunkX >> this.sectionChunkShift;
++ final int sectionZ = chunkZ >> this.sectionChunkShift;
++ final long sectionKey = CoordinateUtils.getChunkKey(sectionX, sectionZ);
++
++ final ThreadedRegionSection section = this.sections.get(sectionKey);
++
++ return section == null ? null : section.getRegion();
++ }
++
++ public ThreadedRegion getRegionAtSynchronised(final int chunkX, final int chunkZ) {
++ final int sectionX = chunkX >> this.sectionChunkShift;
++ final int sectionZ = chunkZ >> this.sectionChunkShift;
++ final long sectionKey = CoordinateUtils.getChunkKey(sectionX, sectionZ);
++
++ // try an optimistic read
++ {
++ final long readAttempt = this.regionLock.tryOptimisticRead();
++ final ThreadedRegionSection optimisticSection = this.sections.get(sectionKey);
++ final ThreadedRegion optimisticRet =
++ optimisticSection == null ? null : optimisticSection.getRegionPlain();
++ if (this.regionLock.validate(readAttempt)) {
++ return optimisticRet;
++ }
++ }
++
++ // failed, fall back to acquiring the lock
++ this.regionLock.readLock();
++ try {
++ final ThreadedRegionSection section = this.sections.get(sectionKey);
++
++ return section == null ? null : section.getRegionPlain();
++ } finally {
++ this.regionLock.tryUnlockRead();
++ }
++ }
++
++ /**
++ * Adds a chunk to the regioniser. Note that it is illegal to add a chunk unless
++ * addChunk has not been called for it or removeChunk has been previously called.
++ *
++ *
++ * Note that it is illegal to additionally call addChunk or removeChunk for the same
++ * region section in parallel.
++ *
++ */
++ public void addChunk(final int chunkX, final int chunkZ) {
++ final int sectionX = chunkX >> this.sectionChunkShift;
++ final int sectionZ = chunkZ >> this.sectionChunkShift;
++ final long sectionKey = CoordinateUtils.getChunkKey(sectionX, sectionZ);
++
++ // Given that for each section, no addChunk/removeChunk can occur in parallel,
++ // we can avoid the lock IF the section exists AND it has a non-zero chunk count.
++ {
++ final ThreadedRegionSection existing = this.sections.get(sectionKey);
++ if (existing != null && !existing.isEmpty()) {
++ existing.addChunk(chunkX, chunkZ);
++ return;
++ } // else: just acquire the write lock
++ }
++
++ this.acquireWriteLock();
++ try {
++ ThreadedRegionSection section = this.sections.get(sectionKey);
++
++ List> newSections = new ArrayList<>();
++
++ if (section == null) {
++ // no section at all
++ section = new ThreadedRegionSection<>(sectionX, sectionZ, this, chunkX, chunkZ);
++ this.sections.put(sectionKey, section);
++ newSections.add(section);
++ } else {
++ section.addChunk(chunkX, chunkZ);
++ }
++ // due to the fast check from above, we know the section is empty whether we need to create it
++
++ // enforce the adjacency invariant by creating / updating neighbour sections
++ final int createRadius = this.emptySectionCreateRadius;
++ final int searchRadius = createRadius + this.regionSectionMergeRadius;
++ ReferenceOpenHashSet> nearbyRegions = null;
++ for (int dx = -searchRadius; dx <= searchRadius; ++dx) {
++ for (int dz = -searchRadius; dz <= searchRadius; ++dz) {
++ if ((dx | dz) == 0) {
++ continue;
++ }
++ final int squareDistance = Math.max(Math.abs(dx), Math.abs(dz));
++ final boolean inCreateRange = squareDistance <= createRadius;
++
++ final int neighbourX = dx + sectionX;
++ final int neighbourZ = dz + sectionZ;
++ final long neighbourKey = CoordinateUtils.getChunkKey(neighbourX, neighbourZ);
++
++ ThreadedRegionSection neighbourSection = this.sections.get(neighbourKey);
++
++ if (neighbourSection != null) {
++ if (nearbyRegions == null) {
++ nearbyRegions = new ReferenceOpenHashSet<>(((searchRadius * 2 + 1) * (searchRadius * 2 + 1)) >> 1);
++ }
++ nearbyRegions.add(neighbourSection.getRegionPlain());
++ }
++
++ if (!inCreateRange) {
++ continue;
++ }
++
++ // we need to ensure the section exists
++ if (neighbourSection != null) {
++ // nothing else to do
++ neighbourSection.incrementNonEmptyNeighbours();
++ continue;
++ }
++ neighbourSection = new ThreadedRegionSection<>(neighbourX, neighbourZ, this, 1);
++ if (null != this.sections.put(neighbourKey, neighbourSection)) {
++ throw new IllegalStateException("Failed to insert new section");
++ }
++ newSections.add(neighbourSection);
++ }
++ }
++
++ final ThreadedRegion regionOfInterest;
++ final boolean regionOfInterestAlive;
++ if (nearbyRegions == null) {
++ // we can simply create a new region, don't have neighbours to worry about merging into
++ regionOfInterest = new ThreadedRegion<>(this);
++ regionOfInterestAlive = true;
++
++ for (int i = 0, len = newSections.size(); i < len; ++i) {
++ regionOfInterest.addSection(newSections.get(i));
++ }
++
++ // only call create callback after adding sections
++ regionOfInterest.onCreate();
++ } else {
++ // need to merge the regions
++
++ ThreadedRegion firstUnlockedRegion = null;
++
++ for (final ThreadedRegion region : nearbyRegions) {
++ if (region.isTicking()) {
++ continue;
++ }
++ firstUnlockedRegion = region;
++ break;
++ }
++
++ if (firstUnlockedRegion != null) {
++ regionOfInterest = firstUnlockedRegion;
++ } else {
++ regionOfInterest = new ThreadedRegion<>(this);
++ }
++
++ for (int i = 0, len = newSections.size(); i < len; ++i) {
++ regionOfInterest.addSection(newSections.get(i));
++ }
++
++ // only call create callback after adding sections
++ if (firstUnlockedRegion == null) {
++ regionOfInterest.onCreate();
++ }
++
++ if (firstUnlockedRegion != null && nearbyRegions.size() == 1) {
++ // nothing to do further, no need to merge anything
++ return;
++ }
++
++ // we need to now tell all the other regions to merge into the region we just created,
++ // and to merge all the ones we can immediately
++
++ boolean delayedTrueMerge = false;
++
++ for (final ThreadedRegion region : nearbyRegions) {
++ if (region == regionOfInterest) {
++ continue;
++ }
++ // need the relaxed check, as the region may already be
++ // a merge target
++ if (!region.tryKill()) {
++ regionOfInterest.mergeIntoLater(region);
++ delayedTrueMerge = true;
++ } else {
++ region.mergeInto(regionOfInterest);
++ }
++ }
++
++ if (delayedTrueMerge && firstUnlockedRegion != null) {
++ // we need to retire this region, as it can no longer tick
++ if (regionOfInterest.state == ThreadedRegion.STATE_STEADY_STATE) {
++ regionOfInterest.state = ThreadedRegion.STATE_NOT_READY;
++ this.callbacks.onRegionInactive(regionOfInterest);
++ }
++ }
++
++ // need to set alive if we created it and we didn't delay a merge
++ regionOfInterestAlive = firstUnlockedRegion == null && !delayedTrueMerge && regionOfInterest.mergeIntoLater.isEmpty() && regionOfInterest.expectingMergeFrom.isEmpty();
++ }
++
++ if (regionOfInterestAlive) {
++ regionOfInterest.state = ThreadedRegion.STATE_STEADY_STATE;
++ if (!regionOfInterest.mergeIntoLater.isEmpty() || !regionOfInterest.expectingMergeFrom.isEmpty()) {
++ throw new IllegalStateException("Should not happen on region " + this);
++ }
++ this.callbacks.onRegionActive(regionOfInterest);
++ }
++ } finally {
++ this.releaseWriteLock();
++ }
++ }
++
++ public void removeChunk(final int chunkX, final int chunkZ) {
++ final int sectionX = chunkX >> this.sectionChunkShift;
++ final int sectionZ = chunkZ >> this.sectionChunkShift;
++ final long sectionKey = CoordinateUtils.getChunkKey(sectionX, sectionZ);
++
++ // Given that for each section, no addChunk/removeChunk can occur in parallel,
++ // we can avoid the lock IF the section exists AND it has a chunk count > 1
++ final ThreadedRegionSection section = this.sections.get(sectionKey);
++ if (section == null) {
++ throw new IllegalStateException("Chunk (" + chunkX + "," + chunkZ + ") has no section");
++ }
++ if (!section.hasOnlyOneChunk()) {
++ // chunk will not go empty, so we don't need to acquire the lock
++ section.removeChunk(chunkX, chunkZ);
++ return;
++ }
++
++ this.acquireWriteLock();
++ try {
++ section.removeChunk(chunkX, chunkZ);
++
++ final int searchRadius = this.emptySectionCreateRadius;
++ for (int dx = -searchRadius; dx <= searchRadius; ++dx) {
++ for (int dz = -searchRadius; dz <= searchRadius; ++dz) {
++ if ((dx | dz) == 0) {
++ continue;
++ }
++
++ final int neighbourX = dx + sectionX;
++ final int neighbourZ = dz + sectionZ;
++ final long neighbourKey = CoordinateUtils.getChunkKey(neighbourX, neighbourZ);
++
++ final ThreadedRegionSection neighbourSection = this.sections.get(neighbourKey);
++
++ // should be non-null here always
++ neighbourSection.decrementNonEmptyNeighbours();
++ }
++ }
++ } finally {
++ this.releaseWriteLock();
++ }
++ }
++
++ // must hold regionLock
++ private void onRegionRelease(final ThreadedRegion region) {
++ if (!region.mergeIntoLater.isEmpty()) {
++ throw new IllegalStateException("Region " + region + " should not have any regions to merge into!");
++ }
++
++ final boolean hasExpectingMerges = !region.expectingMergeFrom.isEmpty();
++
++ // is this region supposed to merge into any other region?
++ if (hasExpectingMerges) {
++ // merge the regions into this one
++ final ReferenceOpenHashSet> expectingMergeFrom = region.expectingMergeFrom.clone();
++ for (final ThreadedRegion mergeFrom : expectingMergeFrom) {
++ if (!mergeFrom.tryKill()) {
++ throw new IllegalStateException("Merge from region " + mergeFrom + " should be killable! Trying to merge into " + region);
++ }
++ mergeFrom.mergeInto(region);
++ }
++
++ if (!region.expectingMergeFrom.isEmpty()) {
++ throw new IllegalStateException("Region " + region + " should no longer have merge requests after mering from " + expectingMergeFrom);
++ }
++
++ if (!region.mergeIntoLater.isEmpty()) {
++ // There is another nearby ticking region that we need to merge into
++ region.state = ThreadedRegion.STATE_NOT_READY;
++ this.callbacks.onRegionInactive(region);
++ // return to avoid removing dead sections or splitting, these actions will be performed
++ // by the region we merge into
++ return;
++ }
++ }
++
++ // now check whether we need to recalculate regions
++ final boolean removeDeadSections = hasExpectingMerges || region.hasNoAliveSections()
++ || (region.sectionByKey.size() >= this.minSectionRecalcCount && region.getDeadSectionPercent() >= this.maxDeadRegionPercent);
++ final boolean removedDeadSections = removeDeadSections && !region.deadSections.isEmpty();
++ if (removeDeadSections) {
++ // kill dead sections
++ for (final ThreadedRegionSection deadSection : region.deadSections) {
++ final long key = CoordinateUtils.getChunkKey(deadSection.sectionX, deadSection.sectionZ);
++
++ if (!deadSection.isEmpty()) {
++ throw new IllegalStateException("Dead section '" + deadSection.toStringWithRegion() + "' is marked dead but has chunks!");
++ }
++ if (deadSection.hasNonEmptyNeighbours()) {
++ throw new IllegalStateException("Dead section '" + deadSection.toStringWithRegion() + "' is marked dead but has non-empty neighbours!");
++ }
++ if (!region.sectionByKey.remove(key, deadSection)) {
++ throw new IllegalStateException("Region " + region + " has inconsistent state, it should contain section " + deadSection);
++ }
++ if (this.sections.remove(key) != deadSection) {
++ throw new IllegalStateException("Cannot remove dead section '" +
++ deadSection.toStringWithRegion() + "' from section state! State at section coordinate: " + this.sections.get(key));
++ }
++ }
++ region.deadSections.clear();
++ }
++
++ // if we removed dead sections, we should check if the region can be split into smaller ones
++ // otherwise, the region remains alive
++ if (!removedDeadSections) {
++ region.state = ThreadedRegion.STATE_STEADY_STATE;
++ if (!region.expectingMergeFrom.isEmpty() || !region.mergeIntoLater.isEmpty()) {
++ throw new IllegalStateException("Illegal state " + region);
++ }
++ return;
++ }
++
++ // first, we need to build copy of coordinate->section map of all sections in recalculate
++ final Long2ReferenceOpenHashMap> recalculateSections = region.sectionByKey.clone();
++
++ if (recalculateSections.isEmpty()) {
++ // looks like the region's sections were all dead, and now there is no region at all
++ region.state = ThreadedRegion.STATE_DEAD;
++ region.onRemove(true);
++ return;
++ }
++
++ // merge radius is max, since recalculateSections includes the dead or empty sections
++ final int mergeRadius = Math.max(this.regionSectionMergeRadius, this.emptySectionCreateRadius);
++
++ final List>> newRegions = new ArrayList<>();
++ while (!recalculateSections.isEmpty()) {
++ // select any section, then BFS around it to find all of its neighbours to form a region
++ // once no more neighbours are found, the region is complete
++ final List> currRegion = new ArrayList<>();
++ final Iterator> firstIterator = recalculateSections.values().iterator();
++
++ currRegion.add(firstIterator.next());
++ firstIterator.remove();
++ search_loop:
++ for (int idx = 0; idx < currRegion.size(); ++idx) {
++ final ThreadedRegionSection curr = currRegion.get(idx);
++ final int centerX = curr.sectionX;
++ final int centerZ = curr.sectionZ;
++
++ // find neighbours in radius
++ for (int dz = -mergeRadius; dz <= mergeRadius; ++dz) {
++ for (int dx = -mergeRadius; dx <= mergeRadius; ++dx) {
++ if ((dx | dz) == 0) {
++ continue;
++ }
++
++ final ThreadedRegionSection section = recalculateSections.remove(CoordinateUtils.getChunkKey(dx + centerX, dz + centerZ));
++ if (section == null) {
++ continue;
++ }
++
++ currRegion.add(section);
++
++ if (recalculateSections.isEmpty()) {
++ // no point in searching further
++ break search_loop;
++ }
++ }
++ }
++ }
++
++ newRegions.add(currRegion);
++ }
++
++ // now we have split the regions into separate parts, we can split recalculate
++
++ if (newRegions.size() == 1) {
++ // no need to split anything, we're done here
++ region.state = ThreadedRegion.STATE_STEADY_STATE;
++ if (!region.expectingMergeFrom.isEmpty() || !region.mergeIntoLater.isEmpty()) {
++ throw new IllegalStateException("Illegal state " + region);
++ }
++ return;
++ }
++
++ // need to split the region, so we need to kill the old one first
++ region.state = ThreadedRegion.STATE_DEAD;
++ region.onRemove(true);
++
++ // create new regions
++ final Long2ReferenceOpenHashMap> newRegionsMap = new Long2ReferenceOpenHashMap<>();
++ final ReferenceOpenHashSet> newRegionsSet = new ReferenceOpenHashSet<>();
++
++ for (final List> sections : newRegions) {
++ final ThreadedRegion newRegion = new ThreadedRegion<>(this);
++ newRegionsSet.add(newRegion);
++
++ for (final ThreadedRegionSection section : sections) {
++ section.setRegionRelease(null);
++ newRegion.addSection(section);
++ final ThreadedRegion curr = newRegionsMap.putIfAbsent(section.sectionKey, newRegion);
++ if (curr != null) {
++ throw new IllegalStateException("Expected no region at " + section + ", but got " + curr + ", should have put " + newRegion);
++ }
++ }
++ }
++
++ region.split(newRegionsMap, newRegionsSet);
++
++ // only after invoking data callbacks
++
++ for (final ThreadedRegion newRegion : newRegionsSet) {
++ newRegion.state = ThreadedRegion.STATE_STEADY_STATE;
++ if (!newRegion.expectingMergeFrom.isEmpty() || !newRegion.mergeIntoLater.isEmpty()) {
++ throw new IllegalStateException("Illegal state " + newRegion);
++ }
++ newRegion.onCreate();
++ this.callbacks.onRegionActive(newRegion);
++ }
++ }
++
++ public static final class ThreadedRegion, S extends ThreadedRegionSectionData> {
++
++ private static final AtomicLong REGION_ID_GENERATOR = new AtomicLong();
++
++ private static final int STATE_NOT_READY = 0;
++ private static final int STATE_STEADY_STATE = 1;
++ private static final int STATE_TICKING = 2;
++ private static final int STATE_DEAD = 3;
++
++ public final long id;
++
++ private int state;
++
++ private final Long2ReferenceOpenHashMap> sectionByKey = new Long2ReferenceOpenHashMap<>();
++ private final ReferenceOpenHashSet> deadSections = new ReferenceOpenHashSet<>();
++
++ public final ThreadedRegioniser regioniser;
++
++ private final R data;
++
++ private final ReferenceOpenHashSet> mergeIntoLater = new ReferenceOpenHashSet<>();
++ private final ReferenceOpenHashSet> expectingMergeFrom = new ReferenceOpenHashSet<>();
++
++ public ThreadedRegion(final ThreadedRegioniser regioniser) {
++ this.regioniser = regioniser;
++ this.id = REGION_ID_GENERATOR.getAndIncrement();
++ this.state = STATE_NOT_READY;
++ this.data = regioniser.callbacks.createNewData(this);
++ }
++
++ public LongArrayList getOwnedSections() {
++ final boolean lock = this.regioniser.writeLockOwner != Thread.currentThread();
++ if (lock) {
++ this.regioniser.regionLock.readLock();
++ }
++ try {
++ final LongArrayList ret = new LongArrayList(this.sectionByKey.size());
++ ret.addAll(this.sectionByKey.keySet());
++
++ return ret;
++ } finally {
++ if (lock) {
++ this.regioniser.regionLock.tryUnlockRead();
++ }
++ }
++ }
++
++ public ChunkPos getCenterChunk() {
++ final LongArrayList sections = this.getOwnedSections();
++
++ sections.sort(null);
++
++ // note: regions always have at least one section
++ final long middle = sections.getLong(sections.size() >> 1);
++
++ return new ChunkPos(CoordinateUtils.getChunkX(middle), CoordinateUtils.getChunkZ(middle));
++ }
++
++ private void onCreate() {
++ this.regioniser.onRegionCreate(this);
++ this.regioniser.callbacks.onRegionCreate(this);
++ }
++
++ private void onRemove(final boolean wasActive) {
++ if (wasActive) {
++ this.regioniser.callbacks.onRegionInactive(this);
++ }
++ this.regioniser.callbacks.onRegionDestroy(this);
++ this.regioniser.onRegionDestroy(this);
++ }
++
++ private final boolean hasNoAliveSections() {
++ return this.deadSections.size() == this.sectionByKey.size();
++ }
++
++ private final double getDeadSectionPercent() {
++ return (double)this.deadSections.size() / (double)this.sectionByKey.size();
++ }
++
++ private void split(final Long2ReferenceOpenHashMap> into, final ReferenceOpenHashSet> regions) {
++ if (this.data != null) {
++ this.data.split(this.regioniser, into, regions);
++ }
++ }
++
++ private void mergeInto(final ThreadedRegion mergeTarget) {
++ if (this == mergeTarget) {
++ throw new IllegalStateException("Cannot merge a region onto itself");
++ }
++ if (!this.isDead()) {
++ throw new IllegalStateException("Source region is not dead! Source " + this + ", target " + mergeTarget);
++ } else if (mergeTarget.isDead()) {
++ throw new IllegalStateException("Target region is dead! Source " + this + ", target " + mergeTarget);
++ }
++
++ for (final ThreadedRegionSection section : this.sectionByKey.values()) {
++ section.setRegionRelease(null);
++ mergeTarget.addSection(section);
++ }
++ for (final ThreadedRegionSection deadSection : this.deadSections) {
++ if (this.sectionByKey.get(deadSection.sectionKey) != deadSection) {
++ throw new IllegalStateException("Source region does not even contain its own dead sections! Missing " + deadSection + " from region " + this);
++ }
++ if (!mergeTarget.deadSections.add(deadSection)) {
++ throw new IllegalStateException("Merge target contains dead section from source! Has " + deadSection + " from region " + this);
++ }
++ }
++
++ // forward merge expectations
++ for (final ThreadedRegion region : this.expectingMergeFrom) {
++ if (!region.mergeIntoLater.remove(this)) {
++ throw new IllegalStateException("Region " + region + " was not supposed to merge into " + this + "?");
++ }
++ if (region != mergeTarget) {
++ region.mergeIntoLater(mergeTarget);
++ }
++ }
++
++ // forward merge into
++ for (final ThreadedRegion region : this.mergeIntoLater) {
++ if (!region.expectingMergeFrom.remove(this)) {
++ throw new IllegalStateException("Region " + this + " was not supposed to merge into " + region + "?");
++ }
++ if (region != mergeTarget) {
++ mergeTarget.mergeIntoLater(region);
++ }
++ }
++
++ // finally, merge data
++ if (this.data != null) {
++ this.data.mergeInto(mergeTarget);
++ }
++ }
++
++ private void mergeIntoLater(final ThreadedRegion region) {
++ if (region.isDead()) {
++ throw new IllegalStateException("Trying to merge later into a dead region: " + region);
++ }
++ final boolean add1, add2;
++ if ((add1 = this.mergeIntoLater.add(region)) != (add2 = region.expectingMergeFrom.add(this))) {
++ throw new IllegalStateException("Inconsistent state between target merge " + region + " and this " + this + ": add1,add2:" + add1 + "," + add2);
++ }
++ }
++
++ private boolean tryKill() {
++ switch (this.state) {
++ case STATE_NOT_READY: {
++ this.state = STATE_DEAD;
++ this.onRemove(false);
++ return true;
++ }
++ case STATE_STEADY_STATE: {
++ this.state = STATE_DEAD;
++ this.onRemove(true);
++ return true;
++ }
++ case STATE_TICKING: {
++ return false;
++ }
++ case STATE_DEAD: {
++ throw new IllegalStateException("Already dead");
++ }
++ default: {
++ throw new IllegalStateException("Unknown state: " + this.state);
++ }
++ }
++ }
++
++ private boolean isDead() {
++ return this.state == STATE_DEAD;
++ }
++
++ private boolean isTicking() {
++ return this.state == STATE_TICKING;
++ }
++
++ private void removeDeadSection(final ThreadedRegionSection section) {
++ this.deadSections.remove(section);
++ }
++
++ private void addDeadSection(final ThreadedRegionSection section) {
++ this.deadSections.add(section);
++ }
++
++ private void addSection(final ThreadedRegionSection section) {
++ if (section.getRegionPlain() != null) {
++ throw new IllegalStateException("Section already has region");
++ }
++ if (this.sectionByKey.putIfAbsent(section.sectionKey, section) != null) {
++ throw new IllegalStateException("Already have section " + section + ", mapped to " + this.sectionByKey.get(section.sectionKey));
++ }
++ section.setRegionRelease(this);
++ }
++
++ public R getData() {
++ return this.data;
++ }
++
++ public boolean tryMarkTicking() {
++ this.regioniser.acquireWriteLock();
++ try {
++ if (this.state != STATE_STEADY_STATE) {
++ return false;
++ }
++
++ if (!this.mergeIntoLater.isEmpty() || !this.expectingMergeFrom.isEmpty()) {
++ throw new IllegalStateException("Region " + this + " should not be steady state");
++ }
++
++ this.state = STATE_TICKING;
++ return true;
++ } finally {
++ this.regioniser.releaseWriteLock();
++ }
++ }
++
++ public boolean markNotTicking() {
++ this.regioniser.acquireWriteLock();
++ try {
++ if (this.state != STATE_TICKING) {
++ throw new IllegalStateException("Attempting to release non-locked state");
++ }
++
++ this.regioniser.onRegionRelease(this);
++
++ return this.state == STATE_STEADY_STATE;
++ } finally {
++ this.regioniser.releaseWriteLock();
++ }
++ }
++
++ @Override
++ public String toString() {
++ final StringBuilder ret = new StringBuilder(128);
++
++ ret.append("ThreadedRegion{");
++ ret.append("state=").append(this.state).append(',');
++ // To avoid recursion in toString, maybe fix later?
++ //ret.append("mergeIntoLater=").append(this.mergeIntoLater).append(',');
++ //ret.append("expectingMergeFrom=").append(this.expectingMergeFrom).append(',');
++
++ ret.append("sectionCount=").append(this.sectionByKey.size()).append(',');
++ ret.append("sections=[");
++ for (final Iterator> iterator = this.sectionByKey.values().iterator(); iterator.hasNext();) {
++ final ThreadedRegionSection section = iterator.next();
++
++ ret.append(section.toString());
++ if (iterator.hasNext()) {
++ ret.append(',');
++ }
++ }
++ ret.append(']');
++
++ ret.append('}');
++ return ret.toString();
++ }
++ }
++
++ public static final class ThreadedRegionSection, S extends ThreadedRegionSectionData> {
++
++ public final int sectionX;
++ public final int sectionZ;
++ public final long sectionKey;
++ private final long[] chunksBitset;
++ private int chunkCount;
++ private int nonEmptyNeighbours;
++
++ private ThreadedRegion region;
++ private static final VarHandle REGION_HANDLE = ConcurrentUtil.getVarHandle(ThreadedRegionSection.class, "region", ThreadedRegion.class);
++
++ public final ThreadedRegioniser regioniser;
++
++ private final int regionChunkShift;
++ private final int regionChunkMask;
++
++ private final S data;
++
++ private ThreadedRegion getRegionPlain() {
++ return (ThreadedRegion)REGION_HANDLE.get(this);
++ }
++
++ private ThreadedRegion getRegionAcquire() {
++ return (ThreadedRegion)REGION_HANDLE.getAcquire(this);
++ }
++
++ private void setRegionRelease(final ThreadedRegion value) {
++ REGION_HANDLE.setRelease(this, value);
++ }
++
++ // creates an empty section with zero non-empty neighbours
++ private ThreadedRegionSection(final int sectionX, final int sectionZ, final ThreadedRegioniser regioniser) {
++ this.sectionX = sectionX;
++ this.sectionZ = sectionZ;
++ this.sectionKey = CoordinateUtils.getChunkKey(sectionX, sectionZ);
++ this.chunksBitset = new long[Math.max(1, regioniser.regionSectionChunkSize * regioniser.regionSectionChunkSize / Long.SIZE)];
++ this.regioniser = regioniser;
++ this.regionChunkShift = regioniser.sectionChunkShift;
++ this.regionChunkMask = regioniser.regionSectionChunkSize - 1;
++ this.data = regioniser.callbacks
++ .createNewSectionData(sectionX, sectionZ, this.regionChunkShift);
++ }
++
++ // creates a section with an initial chunk with zero non-empty neighbours
++ private ThreadedRegionSection(final int sectionX, final int sectionZ, final ThreadedRegioniser regioniser,
++ final int chunkXInit, final int chunkZInit) {
++ this(sectionX, sectionZ, regioniser);
++
++ final int initIndex = this.getChunkIndex(chunkXInit, chunkZInit);
++ this.chunkCount = 1;
++ this.chunksBitset[initIndex >>> 6] = 1L << (initIndex & (Long.SIZE - 1)); // index / Long.SIZE
++ }
++
++ // creates an empty section with the specified number of non-empty neighbours
++ private ThreadedRegionSection(final int sectionX, final int sectionZ, final ThreadedRegioniser regioniser,
++ final int nonEmptyNeighbours) {
++ this(sectionX, sectionZ, regioniser);
++
++ this.nonEmptyNeighbours = nonEmptyNeighbours;
++ }
++
++ private boolean isEmpty() {
++ return this.chunkCount == 0;
++ }
++
++ private boolean hasOnlyOneChunk() {
++ return this.chunkCount == 1;
++ }
++
++ public boolean hasNonEmptyNeighbours() {
++ return this.nonEmptyNeighbours != 0;
++ }
++
++ /**
++ * Returns the section data associated with this region section. May be {@code null}.
++ */
++ public S getData() {
++ return this.data;
++ }
++
++ /**
++ * Returns the region that owns this section. Unsynchronised access may produce outdateed or transient results.
++ */
++ public ThreadedRegion getRegion() {
++ return this.getRegionAcquire();
++ }
++
++ private int getChunkIndex(final int chunkX, final int chunkZ) {
++ return (chunkX & this.regionChunkMask) | ((chunkZ & this.regionChunkMask) << this.regionChunkShift);
++ }
++
++ private void markAlive() {
++ this.getRegionPlain().removeDeadSection(this);
++ }
++
++ private void markDead() {
++ this.getRegionPlain().addDeadSection(this);
++ }
++
++ private void incrementNonEmptyNeighbours() {
++ if (++this.nonEmptyNeighbours == 1 && this.chunkCount == 0) {
++ this.markAlive();
++ }
++ final int createRadius = this.regioniser.emptySectionCreateRadius;
++ if (this.nonEmptyNeighbours >= ((createRadius * 2 + 1) * (createRadius * 2 + 1))) {
++ throw new IllegalStateException("Non empty neighbours exceeded max value for radius " + createRadius);
++ }
++ }
++
++ private void decrementNonEmptyNeighbours() {
++ if (--this.nonEmptyNeighbours == 0 && this.chunkCount == 0) {
++ this.markDead();
++ }
++ if (this.nonEmptyNeighbours < 0) {
++ throw new IllegalStateException("Non empty neighbours reached zero");
++ }
++ }
++
++ /**
++ * Returns whether the chunk was zero. Effectively returns whether the caller needs to create
++ * dead sections / increase non-empty neighbour count for neighbouring sections.
++ */
++ private boolean addChunk(final int chunkX, final int chunkZ) {
++ final int index = this.getChunkIndex(chunkX, chunkZ);
++ final long bitset = this.chunksBitset[index >>> 6]; // index / Long.SIZE
++ final long after = this.chunksBitset[index >>> 6] = bitset | (1L << (index & (Long.SIZE - 1)));
++ if (after == bitset) {
++ throw new IllegalStateException("Cannot add a chunk to a section which already has the chunk! RegionSection: " + this + ", global chunk: " + new ChunkPos(chunkX, chunkZ).toString());
++ }
++ final boolean notEmpty = ++this.chunkCount == 1;
++ if (notEmpty && this.nonEmptyNeighbours == 0) {
++ this.markAlive();
++ }
++ return notEmpty;
++ }
++
++ /**
++ * Returns whether the chunk count is now zero. Effectively returns whether
++ * the caller needs to decrement the neighbour count for neighbouring sections.
++ */
++ private boolean removeChunk(final int chunkX, final int chunkZ) {
++ final int index = this.getChunkIndex(chunkX, chunkZ);
++ final long before = this.chunksBitset[index >>> 6]; // index / Long.SIZE
++ final long bitset = this.chunksBitset[index >>> 6] = before & ~(1L << (index & (Long.SIZE - 1)));
++ if (before == bitset) {
++ throw new IllegalStateException("Cannot remove a chunk from a section which does not have that chunk! RegionSection: " + this + ", global chunk: " + new ChunkPos(chunkX, chunkZ).toString());
++ }
++ final boolean empty = --this.chunkCount == 0;
++ if (empty && this.nonEmptyNeighbours == 0) {
++ this.markDead();
++ }
++ return empty;
++ }
++
++ @Override
++ public String toString() {
++ return "RegionSection{" +
++ "sectionCoordinate=" + new ChunkPos(this.sectionX, this.sectionZ).toString() + "," +
++ "chunkCount=" + this.chunkCount + "," +
++ "chunksBitset=" + toString(this.chunksBitset) + "," +
++ "nonEmptyNeighbours=" + this.nonEmptyNeighbours + "," +
++ "hash=" + this.hashCode() +
++ "}";
++ }
++
++ public String toStringWithRegion() {
++ return "RegionSection{" +
++ "sectionCoordinate=" + new ChunkPos(this.sectionX, this.sectionZ).toString() + "," +
++ "chunkCount=" + this.chunkCount + "," +
++ "chunksBitset=" + toString(this.chunksBitset) + "," +
++ "hash=" + this.hashCode() + "," +
++ "nonEmptyNeighbours=" + this.nonEmptyNeighbours + "," +
++ "region=" + this.getRegionAcquire() +
++ "}";
++ }
++
++ private static String toString(final long[] array) {
++ final StringBuilder ret = new StringBuilder();
++ final char[] zeros = new char[Long.SIZE / 4];
++ for (final long value : array) {
++ // zero pad the hex string
++ Arrays.fill(zeros, '0');
++ final String string = Long.toHexString(value);
++ System.arraycopy(string.toCharArray(), 0, zeros, zeros.length - string.length(), string.length());
++
++ ret.append(zeros);
++ }
++
++ return ret.toString();
++ }
++ }
++
++ public static interface ThreadedRegionData, S extends ThreadedRegionSectionData> {
++
++ /**
++ * Splits this region data into the specified regions set.
++ *
++ * Note:
++ *
++ *
++ * This function is always called while holding critical locks and as such should not attempt to block on anything, and
++ * should NOT retrieve or modify ANY world state.
++ *
++ * @param regioniser Regioniser for which the regions reside in.
++ * @param into A map of region section coordinate key to the region that owns the section.
++ * @param regions The set of regions to split into.
++ */
++ public void split(final ThreadedRegioniser regioniser, final Long2ReferenceOpenHashMap> into,
++ final ReferenceOpenHashSet> regions);
++
++ /**
++ * Callback to merge {@code this} region data into the specified region. The state of the region is undefined
++ * except that its region data is already created.
++ *
++ * Note:
++ *
++ *
++ * This function is always called while holding critical locks and as such should not attempt to block on anything, and
++ * should NOT retrieve or modify ANY world state.
++ *
++ * @param into Specified region.
++ */
++ public void mergeInto(final ThreadedRegion into);
++ }
++
++ public static interface ThreadedRegionSectionData {}
++
++ public static interface RegionCallbacks, S extends ThreadedRegionSectionData> {
++
++ /**
++ * Creates new section data for the specified section x and section z.
++ *
++ * Note:
++ *
++ *
++ * This function is always called while holding critical locks and as such should not attempt to block on anything, and
++ * should NOT retrieve or modify ANY world state.
++ *
++ * @param sectionX x coordinate of the section.
++ * @param sectionZ z coordinate of the section.
++ * @param sectionShift The signed right shift value that can be applied to any chunk coordinate that
++ * produces a section coordinate.
++ * @return New section data, may be {@code null}.
++ */
++ public S createNewSectionData(final int sectionX, final int sectionZ, final int sectionShift);
++
++ /**
++ * Creates new region data for the specified region.
++ *
++ * Note:
++ *
++ *
++ * This function is always called while holding critical locks and as such should not attempt to block on anything, and
++ * should NOT retrieve or modify ANY world state.
++ *
++ * @param forRegion The region to create the data for.
++ * @return New region data, may be {@code null}.
++ */
++ public R createNewData(final ThreadedRegion forRegion);
++
++ /**
++ * Callback for when a region is created. This is invoked after the region is completely set up,
++ * so its data and owned sections are reliable to inspect.
++ *
++ * Note:
++ *
++ *
++ * This function is always called while holding critical locks and as such should not attempt to block on anything, and
++ * should NOT retrieve or modify ANY world state.
++ *
++ * @param region The region that was created.
++ */
++ public void onRegionCreate(final ThreadedRegion region);
++
++ /**
++ * Callback for when a region is destroyed. This is invoked before the region is actually destroyed; so
++ * its data and owned sections are reliable to inspect.
++ *
++ * Note:
++ *
++ *
++ * This function is always called while holding critical locks and as such should not attempt to block on anything, and
++ * should NOT retrieve or modify ANY world state.
++ *
++ * @param region The region that is about to be destroyed.
++ */
++ public void onRegionDestroy(final ThreadedRegion region);
++
++ /**
++ * Callback for when a region is considered "active." An active region x is a non-destroyed region which
++ * is not scheduled to merge into another region y and there are no non-destroyed regions z which are
++ * scheduled to merge into the region x. Equivalently, an active region is not directly adjacent to any
++ * other region considering the regioniser's empty section radius.
++ *
++ * Note:
++ *
++ *
++ * This function is always called while holding critical locks and as such should not attempt to block on anything, and
++ * should NOT retrieve or modify ANY world state.
++ *
++ * @param region The region that is now active.
++ */
++ public void onRegionActive(final ThreadedRegion region);
++
++ /**
++ * Callback for when a region transistions becomes inactive. An inactive region is non-destroyed, but
++ * has neighbouring adjacent regions considering the regioniser's empty section radius. Effectively,
++ * an inactive region may not tick and needs to be merged into its neighbouring regions.
++ *
++ * Note:
++ *
++ *
++ * This function is always called while holding critical locks and as such should not attempt to block on anything, and
++ * should NOT retrieve or modify ANY world state.
++ *
++ * @param region The region that is now inactive.
++ */
++ public void onRegionInactive(final ThreadedRegion region);
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/TickData.java b/src/main/java/io/papermc/paper/threadedregions/TickData.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..29f9fed5f02530b3256e6b993e607d4647daa7b6
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/TickData.java
+@@ -0,0 +1,333 @@
++package io.papermc.paper.threadedregions;
++
++import ca.spottedleaf.concurrentutil.util.TimeUtil;
++import io.papermc.paper.util.IntervalledCounter;
++import it.unimi.dsi.fastutil.longs.LongArrayList;
++
++import java.util.ArrayDeque;
++import java.util.ArrayList;
++import java.util.Arrays;
++import java.util.List;
++
++public final class TickData {
++
++ private final long interval; // ns
++
++ private final ArrayDeque timeData = new ArrayDeque<>();
++
++ public TickData(final long intervalNS) {
++ this.interval = intervalNS;
++ }
++
++ public void addDataFrom(final TickRegionScheduler.TickTime time) {
++ final long start = time.tickStart();
++
++ TickRegionScheduler.TickTime first;
++ while ((first = this.timeData.peekFirst()) != null) {
++ // only remove data completely out of window
++ if ((start - first.tickEnd()) <= this.interval) {
++ break;
++ }
++ this.timeData.pollFirst();
++ }
++
++ this.timeData.add(time);
++ }
++
++ // fromIndex inclusive, toIndex exclusive
++ // will throw if arr.length == 0
++ private static double median(final long[] arr, final int fromIndex, final int toIndex) {
++ final int len = toIndex - fromIndex;
++ final int middle = fromIndex + (len >>> 1);
++ if ((len & 1) == 0) {
++ // even, average the two middle points
++ return (double)(arr[middle - 1] + arr[middle]) / 2.0;
++ } else {
++ // odd, just grab the middle
++ return (double)arr[middle];
++ }
++ }
++
++ // will throw if arr.length == 0
++ private static SegmentData computeSegmentData(final long[] arr, final int fromIndex, final int toIndex,
++ final boolean inverse) {
++ final int len = toIndex - fromIndex;
++ long sum = 0L;
++ final double median = median(arr, fromIndex, toIndex);
++ long min = arr[0];
++ long max = arr[0];
++
++ for (int i = fromIndex; i < toIndex; ++i) {
++ final long val = arr[i];
++ sum += val;
++ if (val < min) {
++ min = val;
++ }
++ if (val > max) {
++ max = val;
++ }
++ }
++
++ if (inverse) {
++ // for positive a,b we have that a >= b if and only if 1/a <= 1/b
++ return new SegmentData(
++ len,
++ (double)len / ((double)sum / 1.0E9),
++ 1.0E9 / median,
++ 1.0E9 / (double)max,
++ 1.0E9 / (double)min
++ );
++ } else {
++ return new SegmentData(
++ len,
++ (double)sum / (double)len,
++ median,
++ (double)min,
++ (double)max
++ );
++ }
++ }
++
++ private static SegmentedAverage computeSegmentedAverage(final long[] data, final int allStart, final int allEnd,
++ final int percent99BestStart, final int percent99BestEnd,
++ final int percent95BestStart, final int percent95BestEnd,
++ final int percent1WorstStart, final int percent1WorstEnd,
++ final int percent5WorstStart, final int percent5WorstEnd,
++ final boolean inverse) {
++ return new SegmentedAverage(
++ computeSegmentData(data, allStart, allEnd, inverse),
++ computeSegmentData(data, percent99BestStart, percent99BestEnd, inverse),
++ computeSegmentData(data, percent95BestStart, percent95BestEnd, inverse),
++ computeSegmentData(data, percent1WorstStart, percent1WorstEnd, inverse),
++ computeSegmentData(data, percent5WorstStart, percent5WorstEnd, inverse)
++ );
++ }
++
++ private static record TickInformation(
++ long differenceFromLastTick,
++ long tickTime,
++ long tickTimeCPU
++ ) {}
++
++ // rets null if there is no data
++ public TickReportData generateTickReport(final TickRegionScheduler.TickTime inProgress, final long endTime) {
++ if (this.timeData.isEmpty() && inProgress == null) {
++ return null;
++ }
++
++ final List allData = new ArrayList<>(this.timeData);
++ if (inProgress != null) {
++ allData.add(inProgress);
++ }
++
++ final long intervalStart = allData.get(0).tickStart();
++ final long intervalEnd = allData.get(allData.size() - 1).tickEnd();
++
++ // to make utilisation accurate, we need to take the total time used over the last interval period -
++ // this means if a tick start before the measurement interval, but ends within the interval, then we
++ // only consider the time it spent ticking inside the interval
++ long totalTimeOverInterval = 0L;
++ long measureStart = endTime - this.interval;
++
++ for (int i = 0, len = allData.size(); i < len; ++i) {
++ final TickRegionScheduler.TickTime time = allData.get(i);
++ if (TimeUtil.compareTimes(time.tickStart(), measureStart) < 0) {
++ final long diff = time.tickEnd() - measureStart;
++ if (diff > 0L) {
++ totalTimeOverInterval += diff;
++ } // else: the time is entirely out of interval
++ } else {
++ totalTimeOverInterval += time.tickLength();
++ }
++ }
++
++ // we only care about ticks, but because of inbetween tick task execution
++ // there will be data in allData that isn't ticks. But, that data cannot
++ // be ignored since it contributes to utilisation.
++ // So, we will "compact" the data by merging any inbetween tick times
++ // the next tick.
++ // If there is no "next tick", then we will create one.
++ final List collapsedData = new ArrayList<>();
++ for (int i = 0, len = allData.size(); i < len; ++i) {
++ final List toCollapse = new ArrayList<>();
++ TickRegionScheduler.TickTime lastTick = null;
++ for (;i < len; ++i) {
++ final TickRegionScheduler.TickTime time = allData.get(i);
++ if (!time.isTickExecution()) {
++ toCollapse.add(time);
++ continue;
++ }
++ lastTick = time;
++ break;
++ }
++
++ if (toCollapse.isEmpty()) {
++ // nothing to collapse
++ final TickRegionScheduler.TickTime last = allData.get(i);
++ collapsedData.add(
++ new TickInformation(
++ last.differenceFromLastTick(),
++ last.tickLength(),
++ last.supportCPUTime() ? last.tickCpuTime() : 0L
++ )
++ );
++ } else {
++ long totalTickTime = 0L;
++ long totalCpuTime = 0L;
++ for (int k = 0, len2 = collapsedData.size(); k < len2; ++k) {
++ final TickRegionScheduler.TickTime time = toCollapse.get(k);
++ totalTickTime += time.tickLength();
++ totalCpuTime += time.supportCPUTime() ? time.tickCpuTime() : 0L;
++ }
++ if (i < len) {
++ // we know there is a tick to collapse into
++ final TickRegionScheduler.TickTime last = allData.get(i);
++ collapsedData.add(
++ new TickInformation(
++ last.differenceFromLastTick(),
++ last.tickLength() + totalTickTime,
++ (last.supportCPUTime() ? last.tickCpuTime() : 0L) + totalCpuTime
++ )
++ );
++ } else {
++ // we do not have a tick to collapse into, so we must make one up
++ // we will assume that the tick is "starting now" and ongoing
++
++ // compute difference between imaginary tick and last tick
++ final long differenceBetweenTicks;
++ if (lastTick != null) {
++ // we have a last tick, use it
++ differenceBetweenTicks = lastTick.tickStart();
++ } else {
++ // we don't have a last tick, so we must make one up that makes sense
++ // if the current interval exceeds the max tick time, then use it
++
++ // Otherwise use the interval length.
++ // This is how differenceFromLastTick() works on TickTime when there is no previous interval.
++ differenceBetweenTicks = Math.max(
++ TickRegionScheduler.TIME_BETWEEN_TICKS, totalTickTime
++ );
++ }
++
++ collapsedData.add(
++ new TickInformation(
++ differenceBetweenTicks,
++ totalTickTime,
++ totalCpuTime
++ )
++ );
++ }
++ }
++ }
++
++
++ final int collectedTicks = collapsedData.size();
++ final long[] tickStartToStartDifferences = new long[collectedTicks];
++ final long[] timePerTickDataRaw = new long[collectedTicks];
++ final long[] missingCPUTimeDataRaw = new long[collectedTicks];
++
++ long totalTimeTicking = 0L;
++
++ int i = 0;
++ for (final TickInformation time : collapsedData) {
++ tickStartToStartDifferences[i] = time.differenceFromLastTick();
++ final long timePerTick = timePerTickDataRaw[i] = time.tickTime();
++ missingCPUTimeDataRaw[i] = Math.max(0L, timePerTick - time.tickTimeCPU());
++
++ ++i;
++
++ totalTimeTicking += timePerTick;
++ }
++
++ Arrays.sort(tickStartToStartDifferences);
++ Arrays.sort(timePerTickDataRaw);
++ Arrays.sort(missingCPUTimeDataRaw);
++
++ // Note: computeSegmentData cannot take start == end
++ final int allStart = 0;
++ final int allEnd = collectedTicks;
++ final int percent95BestStart = 0;
++ final int percent95BestEnd = collectedTicks == 1 ? 1 : (int)(0.95 * collectedTicks);
++ final int percent99BestStart = 0;
++ // (int)(0.99 * collectedTicks) == 0 if collectedTicks = 1, so we need to use 1 to avoid start == end
++ final int percent99BestEnd = collectedTicks == 1 ? 1 : (int)(0.99 * collectedTicks);
++ final int percent1WorstStart = (int)(0.99 * collectedTicks);
++ final int percent1WorstEnd = collectedTicks;
++ final int percent5WorstStart = (int)(0.95 * collectedTicks);
++ final int percent5WorstEnd = collectedTicks;
++
++ final SegmentedAverage tpsData = computeSegmentedAverage(
++ tickStartToStartDifferences,
++ allStart, allEnd,
++ percent99BestStart, percent99BestEnd,
++ percent95BestStart, percent95BestEnd,
++ percent1WorstStart, percent1WorstEnd,
++ percent5WorstStart, percent5WorstEnd,
++ true
++ );
++
++ final SegmentedAverage timePerTickData = computeSegmentedAverage(
++ timePerTickDataRaw,
++ allStart, allEnd,
++ percent99BestStart, percent99BestEnd,
++ percent95BestStart, percent95BestEnd,
++ percent1WorstStart, percent1WorstEnd,
++ percent5WorstStart, percent5WorstEnd,
++ false
++ );
++
++ final SegmentedAverage missingCPUTimeData = computeSegmentedAverage(
++ missingCPUTimeDataRaw,
++ allStart, allEnd,
++ percent99BestStart, percent99BestEnd,
++ percent95BestStart, percent95BestEnd,
++ percent1WorstStart, percent1WorstEnd,
++ percent5WorstStart, percent5WorstEnd,
++ false
++ );
++
++ final double utilisation = (double)totalTimeOverInterval / (double)this.interval;
++
++ return new TickReportData(
++ collectedTicks,
++ intervalStart,
++ intervalEnd,
++ totalTimeTicking,
++ utilisation,
++
++ tpsData,
++ timePerTickData,
++ missingCPUTimeData
++ );
++ }
++
++ public static final record TickReportData(
++ int collectedTicks,
++ long collectedTickIntervalStart,
++ long collectedTickIntervalEnd,
++ long totalTimeTicking,
++ double utilisation,
++
++ SegmentedAverage tpsData,
++ // in ns
++ SegmentedAverage timePerTickData,
++ // in ns
++ SegmentedAverage missingCPUTimeData
++ ) {}
++
++ public static final record SegmentedAverage(
++ SegmentData segmentAll,
++ SegmentData segment99PercentBest,
++ SegmentData segment95PercentBest,
++ SegmentData segment5PercentWorst,
++ SegmentData segment1PercentWorst
++ ) {}
++
++ public static final record SegmentData(
++ int count,
++ double average,
++ double median,
++ double least,
++ double greatest
++ ) {}
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/TickRegionScheduler.java b/src/main/java/io/papermc/paper/threadedregions/TickRegionScheduler.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..65145994bd062c5c22d8fdf8124e7833323a3ff2
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/TickRegionScheduler.java
+@@ -0,0 +1,544 @@
++package io.papermc.paper.threadedregions;
++
++import ca.spottedleaf.concurrentutil.scheduler.SchedulerThreadPool;
++import ca.spottedleaf.concurrentutil.util.TimeUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.util.TickThread;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.world.level.ChunkPos;
++import org.slf4j.Logger;
++import java.lang.management.ManagementFactory;
++import java.lang.management.ThreadMXBean;
++import java.util.concurrent.ThreadFactory;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.atomic.AtomicBoolean;
++import java.util.concurrent.atomic.AtomicInteger;
++import java.util.function.BooleanSupplier;
++
++public final class TickRegionScheduler {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++ private static final ThreadMXBean THREAD_MX_BEAN = ManagementFactory.getThreadMXBean();
++ private static final boolean MEASURE_CPU_TIME;
++ static {
++ MEASURE_CPU_TIME = THREAD_MX_BEAN.isThreadCpuTimeSupported();
++ if (MEASURE_CPU_TIME) {
++ THREAD_MX_BEAN.setThreadCpuTimeEnabled(true);
++ } else {
++ LOGGER.warn("TickRegionScheduler CPU time measurement is not available");
++ }
++ }
++
++ public static final int TICK_RATE = 20;
++ public static final long TIME_BETWEEN_TICKS = 1_000_000_000L / TICK_RATE; // ns
++
++ private final SchedulerThreadPool scheduler;
++
++ public TickRegionScheduler(final int threads) {
++ this.scheduler = new SchedulerThreadPool(threads, new ThreadFactory() {
++ private final AtomicInteger idGenerator = new AtomicInteger();
++
++ @Override
++ public Thread newThread(final Runnable run) {
++ final Thread ret = new TickThreadRunner(run, "Region Scheduler Thread #" + this.idGenerator.getAndIncrement());
++ return ret;
++ }
++ });
++ }
++
++ public int getTotalThreadCount() {
++ return this.scheduler.getThreads().length;
++ }
++
++ private static void setTickingRegion(final ThreadedRegioniser.ThreadedRegion region) {
++ final Thread currThread = Thread.currentThread();
++ if (!(currThread instanceof TickThreadRunner tickThreadRunner)) {
++ throw new IllegalStateException("Must be tick thread runner");
++ }
++ if (region != null && tickThreadRunner.currentTickingRegion != null) {
++ throw new IllegalStateException("Trying to double set ticking region!");
++ }
++ if (region == null && tickThreadRunner.currentTickingRegion == null) {
++ throw new IllegalStateException("Trying to double unset ticking region!");
++ }
++ tickThreadRunner.currentTickingRegion = region;
++ if (region != null) {
++ tickThreadRunner.currentTickingWorldRegionisedData = region.regioniser.world.worldRegionData.get();
++ } else {
++ tickThreadRunner.currentTickingWorldRegionisedData = null;
++ }
++ }
++
++ private static void setTickTask(final SchedulerThreadPool.SchedulableTick task) {
++ final Thread currThread = Thread.currentThread();
++ if (!(currThread instanceof TickThreadRunner tickThreadRunner)) {
++ throw new IllegalStateException("Must be tick thread runner");
++ }
++ if (task != null && tickThreadRunner.currentTickingTask != null) {
++ throw new IllegalStateException("Trying to double set ticking task!");
++ }
++ if (task == null && tickThreadRunner.currentTickingTask == null) {
++ throw new IllegalStateException("Trying to double unset ticking task!");
++ }
++ tickThreadRunner.currentTickingTask = task;
++ }
++
++ /**
++ * Returns the current ticking region, or {@code null} if there is no ticking region.
++ * If this thread is not a TickThread, then returns {@code null}.
++ */
++ public static ThreadedRegioniser.ThreadedRegion getCurrentRegion() {
++ final Thread currThread = Thread.currentThread();
++ if (!(currThread instanceof TickThreadRunner tickThreadRunner)) {
++ return RegionShutdownThread.getRegion();
++ }
++ return tickThreadRunner.currentTickingRegion;
++ }
++
++ /**
++ * Returns the current ticking region's world regionised data, or {@code null} if there is no ticking region.
++ * This is a faster alternative to calling the {@link RegionisedData#get()} method.
++ * If this thread is not a TickThread, then returns {@code null}.
++ */
++ public static RegionisedWorldData getCurrentRegionisedWorldData() {
++ final Thread currThread = Thread.currentThread();
++ if (!(currThread instanceof TickThreadRunner tickThreadRunner)) {
++ return RegionShutdownThread.getWorldData();
++ }
++ return tickThreadRunner.currentTickingWorldRegionisedData;
++ }
++
++ /**
++ * Returns the current ticking task, or {@code null} if there is no ticking region.
++ * If this thread is not a TickThread, then returns {@code null}.
++ */
++ public static SchedulerThreadPool.SchedulableTick getCurrentTickingTask() {
++ final Thread currThread = Thread.currentThread();
++ if (!(currThread instanceof TickThreadRunner tickThreadRunner)) {
++ return null;
++ }
++ return tickThreadRunner.currentTickingTask;
++ }
++
++ /**
++ * Schedules the given region
++ * @throws IllegalStateException If the region is already scheduled or is ticking
++ */
++ public void scheduleRegion(final RegionScheduleHandle region) {
++ region.scheduler = this;
++ this.scheduler.schedule(region);
++ }
++
++ /**
++ * Attempts to de-schedule the provided region. If the current region cannot be cancelled for its next tick or task
++ * execution, then it will be cancelled after.
++ */
++ public void descheduleRegion(final RegionScheduleHandle region) {
++ // To avoid acquiring any of the locks the scheduler may be using, we
++ // simply cancel the next action.
++ region.markNonSchedulable();
++ }
++
++ /**
++ * Updates the tick start to the farthest into the future of its current scheduled time and the
++ * provided time.
++ * @return {@code false} if the region was not scheduled or is currently ticking or the specified time is less-than its
++ * current start time, {@code true} if the next tick start was adjusted.
++ */
++ public boolean updateTickStartToMax(final RegionScheduleHandle region, final long newStart) {
++ return this.scheduler.updateTickStartToMax(region, newStart);
++ }
++
++ public boolean halt(final boolean sync, final long maxWaitNS) {
++ return this.scheduler.halt(sync, maxWaitNS);
++ }
++
++ public void setHasTasks(final RegionScheduleHandle region) {
++ this.scheduler.notifyTasks(region);
++ }
++
++ public void init() {
++ this.scheduler.start();
++ }
++
++ private void regionFailed(final RegionScheduleHandle handle, final boolean executingTasks, final Throwable thr) {
++ // when a region fails, we need to shut down the server gracefully
++
++ // prevent further ticks from occurring
++ // we CANNOT sync, because WE ARE ON A SCHEDULER THREAD
++ this.scheduler.halt(false, 0L);
++
++ final ChunkPos center = handle.region.region.getCenterChunk();
++
++ LOGGER.error("Region #" + handle.region.id + " centered at chunk " + center + " failed to " + (executingTasks ? "execute tasks" : "tick") + ":", thr);
++
++ MinecraftServer.getServer().stopServer();
++ }
++
++ // By using our own thread object, we can use a field for the current region rather than a ThreadLocal.
++ // This is much faster than a thread local, since the thread local has to use a map lookup.
++ private static final class TickThreadRunner extends TickThread {
++
++ private ThreadedRegioniser.ThreadedRegion currentTickingRegion;
++ private RegionisedWorldData currentTickingWorldRegionisedData;
++ private SchedulerThreadPool.SchedulableTick currentTickingTask;
++
++ public TickThreadRunner(final Runnable run, final String name) {
++ super(run, name);
++ }
++ }
++
++ public static abstract class RegionScheduleHandle extends SchedulerThreadPool.SchedulableTick {
++
++ protected long currentTick;
++ protected long lastTickStart;
++
++ protected final TickData tickTimes5s;
++ protected final TickData tickTimes15s;
++ protected final TickData tickTimes1m;
++ protected final TickData tickTimes5m;
++ protected final TickData tickTimes15m;
++ protected TickTime currentTickData;
++ protected Thread currentTickingThread;
++
++ public final TickRegions.TickRegionData region;
++ private final AtomicBoolean cancelled = new AtomicBoolean();
++
++ protected final Schedule tickSchedule;
++
++ private TickRegionScheduler scheduler;
++
++ public RegionScheduleHandle(final TickRegions.TickRegionData region, final long firstStart) {
++ this.currentTick = 0L;
++ this.lastTickStart = SchedulerThreadPool.DEADLINE_NOT_SET;
++ this.tickTimes5s = new TickData(TimeUnit.SECONDS.toNanos(5L));
++ this.tickTimes15s = new TickData(TimeUnit.SECONDS.toNanos(15L));
++ this.tickTimes1m = new TickData(TimeUnit.MINUTES.toNanos(1L));
++ this.tickTimes5m = new TickData(TimeUnit.MINUTES.toNanos(5L));
++ this.tickTimes15m = new TickData(TimeUnit.MINUTES.toNanos(15L));
++ this.region = region;
++
++ this.setScheduledStart(firstStart);
++ this.tickSchedule = new Schedule(firstStart == SchedulerThreadPool.DEADLINE_NOT_SET ? firstStart : firstStart - TIME_BETWEEN_TICKS);
++ }
++
++ /**
++ * Subclasses should call this instead of {@link ca.spottedleaf.concurrentutil.scheduler.SchedulerThreadPool.SchedulableTick#setScheduledStart(long)}
++ * so that the tick schedule and scheduled start remain synchronised
++ */
++ protected final void updateScheduledStart(final long to) {
++ this.setScheduledStart(to);
++ this.tickSchedule.setLastPeriod(to == SchedulerThreadPool.DEADLINE_NOT_SET ? to : to - TIME_BETWEEN_TICKS);
++ }
++
++ public final void markNonSchedulable() {
++ this.cancelled.set(true);
++ }
++
++ protected abstract boolean tryMarkTicking();
++
++ protected abstract boolean markNotTicking();
++
++ protected abstract void tickRegion(final int tickCount, final long startTime, final long scheduledEnd);
++
++ protected abstract boolean runRegionTasks(final BooleanSupplier canContinue);
++
++ protected abstract boolean hasIntermediateTasks();
++
++ @Override
++ public final boolean hasTasks() {
++ return this.hasIntermediateTasks();
++ }
++
++ @Override
++ public final Boolean runTasks(final BooleanSupplier canContinue) {
++ if (this.cancelled.get()) {
++ return null;
++ }
++
++ final long cpuStart = MEASURE_CPU_TIME ? THREAD_MX_BEAN.getCurrentThreadCpuTime() : 0L;
++ final long tickStart = System.nanoTime();
++
++ if (!this.tryMarkTicking()) {
++ if (!this.cancelled.get()) {
++ throw new IllegalStateException("Scheduled region should be acquirable");
++ }
++ // region was killed
++ return null;
++ }
++
++ TickRegionScheduler.setTickTask(this);
++ if (this.region != null) {
++ TickRegionScheduler.setTickingRegion(this.region.region);
++ }
++
++ synchronized (this) {
++ this.currentTickData = new TickTime(
++ SchedulerThreadPool.DEADLINE_NOT_SET, SchedulerThreadPool.DEADLINE_NOT_SET, tickStart, cpuStart,
++ SchedulerThreadPool.DEADLINE_NOT_SET, SchedulerThreadPool.DEADLINE_NOT_SET, MEASURE_CPU_TIME,
++ false
++ );
++ this.currentTickingThread = Thread.currentThread();
++ }
++
++ final boolean ret;
++ try {
++ ret = this.runRegionTasks(() -> {
++ return !RegionScheduleHandle.this.cancelled.get() && canContinue.getAsBoolean();
++ });
++ } catch (final Throwable thr) {
++ this.scheduler.regionFailed(this, true, thr);
++ if (thr instanceof ThreadDeath) {
++ throw (ThreadDeath)thr;
++ }
++ // don't release region for another tick
++ return null;
++ } finally {
++ TickRegionScheduler.setTickTask(null);
++ if (this.region != null) {
++ TickRegionScheduler.setTickingRegion(null);
++ }
++ final long tickEnd = System.nanoTime();
++ final long cpuEnd = MEASURE_CPU_TIME ? THREAD_MX_BEAN.getCurrentThreadCpuTime() : 0L;
++
++ final TickTime time = new TickTime(
++ SchedulerThreadPool.DEADLINE_NOT_SET, SchedulerThreadPool.DEADLINE_NOT_SET,
++ tickStart, cpuStart, tickEnd, cpuEnd, MEASURE_CPU_TIME, false
++ );
++
++ this.addTickTime(time);
++ }
++
++ return !this.markNotTicking() || this.cancelled.get() ? null : Boolean.valueOf(ret);
++ }
++
++ @Override
++ public final boolean runTick() {
++ // Remember, we are supposed use setScheduledStart if we return true here, otherwise
++ // the scheduler will try to schedule for the same time.
++ if (this.cancelled.get()) {
++ return false;
++ }
++
++ final long cpuStart = MEASURE_CPU_TIME ? THREAD_MX_BEAN.getCurrentThreadCpuTime() : 0L;
++ final long tickStart = System.nanoTime();
++
++ // use max(), don't assume that tickStart >= scheduledStart
++ final int tickCount = Math.max(1, this.tickSchedule.getPeriodsAhead(TIME_BETWEEN_TICKS, tickStart));
++
++ if (!this.tryMarkTicking()) {
++ if (!this.cancelled.get()) {
++ throw new IllegalStateException("Scheduled region should be acquirable");
++ }
++ // region was killed
++ return false;
++ }
++ if (this.cancelled.get()) {
++ this.markNotTicking();
++ // region should be killed
++ return false;
++ }
++
++ TickRegionScheduler.setTickTask(this);
++ if (this.region != null) {
++ TickRegionScheduler.setTickingRegion(this.region.region);
++ }
++ this.incrementTickCount();
++ final long lastTickStart = this.lastTickStart;
++ this.lastTickStart = tickStart;
++
++ final long scheduledStart = this.getScheduledStart();
++ final long scheduledEnd = scheduledStart + TIME_BETWEEN_TICKS;
++
++ synchronized (this) {
++ this.currentTickData = new TickTime(
++ lastTickStart, scheduledStart, tickStart, cpuStart,
++ SchedulerThreadPool.DEADLINE_NOT_SET, SchedulerThreadPool.DEADLINE_NOT_SET, MEASURE_CPU_TIME,
++ true
++ );
++ this.currentTickingThread = Thread.currentThread();
++ }
++
++ try {
++ // next start isn't updated until the end of this tick
++ this.tickRegion(tickCount, tickStart, scheduledEnd);
++ } catch (final Throwable thr) {
++ this.scheduler.regionFailed(this, false, thr);
++ if (thr instanceof ThreadDeath) {
++ throw (ThreadDeath)thr;
++ }
++ // regionFailed will schedule a shutdown, so we should avoid letting this region tick further
++ return false;
++ } finally {
++ TickRegionScheduler.setTickTask(null);
++ if (this.region != null) {
++ TickRegionScheduler.setTickingRegion(null);
++ }
++ final long tickEnd = System.nanoTime();
++ final long cpuEnd = MEASURE_CPU_TIME ? THREAD_MX_BEAN.getCurrentThreadCpuTime() : 0L;
++
++ // in order to ensure all regions get their chance at scheduling, we have to ensure that regions
++ // that exceed the max tick time are not always prioritised over everything else. Thus, we use the greatest
++ // of the current time and "ideal" next tick start.
++ this.tickSchedule.advanceBy(tickCount, TIME_BETWEEN_TICKS);
++ this.setScheduledStart(TimeUtil.getGreatestTime(tickEnd, this.tickSchedule.getDeadline(TIME_BETWEEN_TICKS)));
++
++ final TickTime time = new TickTime(
++ lastTickStart, scheduledStart, tickStart, cpuStart, tickEnd, cpuEnd, MEASURE_CPU_TIME, true
++ );
++
++ this.addTickTime(time);
++ }
++
++ // Only AFTER updating the tickStart
++ return this.markNotTicking() && !this.cancelled.get();
++ }
++
++ /**
++ * Only safe to call if this tick data matches the current ticking region.
++ */
++ private void addTickTime(final TickTime time) {
++ synchronized (this) {
++ this.currentTickData = null;
++ this.currentTickingThread = null;
++ this.tickTimes5s.addDataFrom(time);
++ this.tickTimes15s.addDataFrom(time);
++ this.tickTimes1m.addDataFrom(time);
++ this.tickTimes5m.addDataFrom(time);
++ this.tickTimes15m.addDataFrom(time);
++ }
++ }
++
++ private TickTime adjustCurrentTickData(final long tickEnd) {
++ final TickTime currentTickData = this.currentTickData;
++ if (currentTickData == null) {
++ return null;
++ }
++
++ final long cpuEnd = MEASURE_CPU_TIME ? THREAD_MX_BEAN.getThreadCpuTime(this.currentTickingThread.getId()) : 0L;
++
++ return new TickTime(
++ currentTickData.previousTickStart(), currentTickData.scheduledTickStart(),
++ currentTickData.tickStart(), currentTickData.tickStartCPU(),
++ tickEnd, cpuEnd,
++ MEASURE_CPU_TIME, currentTickData.isTickExecution()
++ );
++ }
++
++ public final TickData.TickReportData getTickReport5s(final long currTime) {
++ synchronized (this) {
++ return this.tickTimes5s.generateTickReport(this.adjustCurrentTickData(currTime), currTime);
++ }
++ }
++
++ public final TickData.TickReportData getTickReport15s(final long currTime) {
++ synchronized (this) {
++ return this.tickTimes15s.generateTickReport(this.adjustCurrentTickData(currTime), currTime);
++ }
++ }
++
++ public final TickData.TickReportData getTickReport1m(final long currTime) {
++ synchronized (this) {
++ return this.tickTimes1m.generateTickReport(this.adjustCurrentTickData(currTime), currTime);
++ }
++ }
++
++ public final TickData.TickReportData getTickReport5m(final long currTime) {
++ synchronized (this) {
++ return this.tickTimes5m.generateTickReport(this.adjustCurrentTickData(currTime), currTime);
++ }
++ }
++
++ public final TickData.TickReportData getTickReport15m(final long currTime) {
++ synchronized (this) {
++ return this.tickTimes15m.generateTickReport(this.adjustCurrentTickData(currTime), currTime);
++ }
++ }
++
++ /**
++ * Only safe to call if this tick data matches the current ticking region.
++ */
++ private void incrementTickCount() {
++ ++this.currentTick;
++ }
++
++ /**
++ * Only safe to call if this tick data matches the current ticking region.
++ */
++ public final long getCurrentTick() {
++ return this.currentTick;
++ }
++
++ protected final void setCurrentTick(final long value) {
++ this.currentTick = value;
++ }
++ }
++
++ // All time units are in nanoseconds.
++ public static final record TickTime(
++ long previousTickStart,
++ long scheduledTickStart,
++ long tickStart,
++ long tickStartCPU,
++ long tickEnd,
++ long tickEndCPU,
++ boolean supportCPUTime,
++ boolean isTickExecution
++ ) {
++ /**
++ * The difference between the start tick time and the scheduled start tick time. This value is
++ * < 0 if the tick started before the scheduled tick time.
++ * Only valid when {@link #isTickExecution()} is {@code true}.
++ */
++ public final long startOvershoot() {
++ return this.tickStart - this.scheduledTickStart;
++ }
++
++ /**
++ * The difference from the end tick time and the start tick time. Always >= 0 (unless nanoTime is just wrong).
++ */
++ public final long tickLength() {
++ return this.tickEnd - this.tickStart;
++ }
++
++ /**
++ * The total CPU time from the start tick time to the end tick time. Generally should be equal to the tickLength,
++ * unless there is CPU starvation or the tick thread was blocked by I/O or other tasks. Returns Long.MIN_VALUE
++ * if CPU time measurement is not supported.
++ */
++ public final long tickCpuTime() {
++ if (!this.supportCPUTime()) {
++ return Long.MIN_VALUE;
++ }
++ return this.tickEndCPU - this.tickStartCPU;
++ }
++
++ /**
++ * The difference in time from the start of the last tick to the start of the current tick. If there is no
++ * last tick, then this value is max(TIME_BETWEEN_TICKS, tickLength).
++ * Only valid when {@link #isTickExecution()} is {@code true}.
++ */
++ public final long differenceFromLastTick() {
++ if (this.hasLastTick()) {
++ return this.tickStart - this.previousTickStart;
++ }
++ return Math.max(TIME_BETWEEN_TICKS, this.tickLength());
++ }
++
++ /**
++ * Returns whether there was a tick that occurred before this one.
++ * Only valid when {@link #isTickExecution()} is {@code true}.
++ */
++ public boolean hasLastTick() {
++ return this.previousTickStart != SchedulerThreadPool.DEADLINE_NOT_SET;
++ }
++
++ /*
++ * Remember, this is the expected behavior of the following:
++ *
++ * MSPT: Time per tick. This does not include overshoot time, just the tickLength().
++ *
++ * TPS: The number of ticks per second. It should be ticks / (sum of differenceFromLastTick).
++ */
++ }
++}
+diff --git a/src/main/java/io/papermc/paper/threadedregions/TickRegions.java b/src/main/java/io/papermc/paper/threadedregions/TickRegions.java
+new file mode 100644
+index 0000000000000000000000000000000000000000..c17669c1e98cd954643fa3b988c12b4b6c3b174e
+--- /dev/null
++++ b/src/main/java/io/papermc/paper/threadedregions/TickRegions.java
+@@ -0,0 +1,340 @@
++package io.papermc.paper.threadedregions;
++
++import ca.spottedleaf.concurrentutil.scheduler.SchedulerThreadPool;
++import ca.spottedleaf.concurrentutil.util.TimeUtil;
++import com.mojang.logging.LogUtils;
++import io.papermc.paper.chunk.system.scheduling.ChunkHolderManager;
++import io.papermc.paper.configuration.GlobalConfiguration;
++import it.unimi.dsi.fastutil.longs.Long2ReferenceMap;
++import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
++import it.unimi.dsi.fastutil.objects.Reference2ReferenceMap;
++import it.unimi.dsi.fastutil.objects.Reference2ReferenceOpenHashMap;
++import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
++import net.minecraft.server.MinecraftServer;
++import net.minecraft.server.level.ServerLevel;
++import org.slf4j.Logger;
++import java.util.Iterator;
++import java.util.concurrent.TimeUnit;
++import java.util.concurrent.atomic.AtomicLong;
++import java.util.function.BooleanSupplier;
++
++public final class TickRegions implements ThreadedRegioniser.RegionCallbacks {
++
++ private static final Logger LOGGER = LogUtils.getLogger();
++
++ public static int getRegionChunkShift() {
++ return 4;
++ }
++
++ private static boolean initialised;
++ private static TickRegionScheduler scheduler;
++
++ public static TickRegionScheduler getScheduler() {
++ return scheduler;
++ }
++
++ public static void init(final GlobalConfiguration.ThreadedRegions config) {
++ if (initialised) {
++ return;
++ }
++ initialised = true;
++
++ int tickThreads;
++ if (config.threads <= 0) {
++ tickThreads = Runtime.getRuntime().availableProcessors() / 2;
++ if (tickThreads <= 4) {
++ tickThreads = 1;
++ } else {
++ tickThreads = (2 * tickThreads) / 3;
++ }
++ } else {
++ tickThreads = config.threads;
++ }
++
++ scheduler = new TickRegionScheduler(tickThreads);
++ LOGGER.info("Regionised ticking is enabled with " + tickThreads + " tick threads");
++ }
++
++ @Override
++ public TickRegionData createNewData(final ThreadedRegioniser.ThreadedRegion region) {
++ return new TickRegionData(region);
++ }
++
++ @Override
++ public TickRegionSectionData createNewSectionData(final int sectionX, final int sectionZ, final int sectionShift) {
++ return null;
++ }
++
++ @Override
++ public void onRegionCreate(final ThreadedRegioniser.ThreadedRegion region) {
++ // nothing for now
++ }
++
++ @Override
++ public void onRegionDestroy(final ThreadedRegioniser.ThreadedRegion region) {
++ // nothing for now
++ }
++
++ @Override
++ public void onRegionActive(final ThreadedRegioniser.ThreadedRegion region) {
++ final TickRegionData data = region.getData();
++
++ data.tickHandle.checkInitialSchedule();
++ scheduler.scheduleRegion(data.tickHandle);
++ }
++
++ @Override
++ public void onRegionInactive(final ThreadedRegioniser.ThreadedRegion region) {
++ final TickRegionData data = region.getData();
++
++ scheduler.descheduleRegion(data.tickHandle);
++ // old handle cannot be scheduled anymore, copy to a new handle
++ data.tickHandle = data.tickHandle.copy();
++ }
++
++ public static final class TickRegionSectionData implements ThreadedRegioniser.ThreadedRegionSectionData {}
++
++ public static final class TickRegionData implements ThreadedRegioniser.ThreadedRegionData {
++
++ private static final AtomicLong ID_GENERATOR = new AtomicLong();
++ /** Never 0L, since 0L is reserved for global region. */
++ public final long id = ID_GENERATOR.incrementAndGet();
++
++ public final ThreadedRegioniser.ThreadedRegion region;
++ public final ServerLevel world;
++
++ // generic regionised data
++ private final Reference2ReferenceOpenHashMap, Object> regionisedData = new Reference2ReferenceOpenHashMap<>();
++
++ // tick data
++ private ConcreteRegionTickHandle tickHandle = new ConcreteRegionTickHandle(this, SchedulerThreadPool.DEADLINE_NOT_SET);
++
++ // queue data
++ private final RegionisedTaskQueue.RegionTaskQueueData taskQueueData;
++
++ // chunk holder manager data
++ private final ChunkHolderManager.HolderManagerRegionData holderManagerRegionData = new ChunkHolderManager.HolderManagerRegionData();
++
++ private TickRegionData(final ThreadedRegioniser.ThreadedRegion region) {
++ this.region = region;
++ this.world = region.regioniser.world;
++ this.taskQueueData = new RegionisedTaskQueue.RegionTaskQueueData(this.world.taskQueueRegionData);
++ }
++
++ public RegionisedTaskQueue.RegionTaskQueueData getTaskQueueData() {
++ return this.taskQueueData;
++ }
++
++ // the value returned can be invalidated at any time, except when the caller
++ // is ticking this region
++ public TickRegionScheduler.RegionScheduleHandle getRegionSchedulingHandle() {
++ return this.tickHandle;
++ }
++
++ public long getCurrentTick() {
++ return this.tickHandle.getCurrentTick();
++ }
++
++ public ChunkHolderManager.HolderManagerRegionData getHolderManagerRegionData() {
++ return this.holderManagerRegionData;
++ }
++
++ T getOrCreateRegionisedData(final RegionisedData regionisedData) {
++ T ret = (T)this.regionisedData.get(regionisedData);
++
++ if (ret != null) {
++ return ret;
++ }
++
++ ret = regionisedData.createNewValue();
++ this.regionisedData.put(regionisedData, ret);
++
++ return ret;
++ }
++
++ @Override
++ public void split(final ThreadedRegioniser regioniser,
++ final Long2ReferenceOpenHashMap> into,
++ final ReferenceOpenHashSet> regions) {
++ final int shift = regioniser.sectionChunkShift;
++
++ // tick data
++ // note: here it is OK force us to access tick handle, as this region is owned (and thus not scheduled),
++ // and the other regions to split into are not scheduled yet.
++ for (final ThreadedRegioniser.ThreadedRegion region : regions) {
++ final TickRegionData data = region.getData();
++ data.tickHandle.copyDeadlineAndTickCount(this.tickHandle);
++ }
++
++ // generic regionised data
++ for (final Iterator, Object>> dataIterator = this.regionisedData.reference2ReferenceEntrySet().fastIterator();
++ dataIterator.hasNext();) {
++ final Reference2ReferenceMap.Entry, Object> regionDataEntry = dataIterator.next();
++ final RegionisedData> data = regionDataEntry.getKey();
++ final Object from = regionDataEntry.getValue();
++
++ final ReferenceOpenHashSet