mirror of
https://github.com/PaperMC/Paper.git
synced 2024-10-29 23:09:33 +01:00
18183 lines
822 KiB
Diff
18183 lines
822 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
|
|
Date: Thu, 11 Mar 2021 02:32:30 -0800
|
|
Subject: [PATCH] Rewrite chunk system
|
|
|
|
== AT ==
|
|
public net.minecraft.server.level.ChunkMap setViewDistance(I)V
|
|
public net.minecraft.server.level.ChunkHolder pos
|
|
public net.minecraft.server.level.ChunkMap overworldDataStorage
|
|
public-f net.minecraft.world.level.chunk.storage.RegionFileStorage
|
|
public net.minecraft.server.level.ChunkMap getPoiManager()Lnet/minecraft/world/entity/ai/village/poi/PoiManager;
|
|
|
|
diff --git a/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java b/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java
|
|
index 9a5fa60cb8156fe254a123e237d957ccb82f7195..0f7d36933e34e1d1b9dd27d8b0c35ff883818526 100644
|
|
--- a/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java
|
|
+++ b/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java
|
|
@@ -41,14 +41,14 @@ public final class StarLightInterface {
|
|
protected final ArrayDeque<SkyStarLightEngine> cachedSkyPropagators;
|
|
protected final ArrayDeque<BlockStarLightEngine> cachedBlockPropagators;
|
|
|
|
- protected final LightQueue lightQueue = new LightQueue(this);
|
|
+ public final io.papermc.paper.chunk.system.light.LightQueue lightQueue; // Paper - replace light queue
|
|
|
|
protected final LayerLightEventListener skyReader;
|
|
protected final LayerLightEventListener blockReader;
|
|
protected final boolean isClientSide;
|
|
|
|
- protected final int minSection;
|
|
- protected final int maxSection;
|
|
+ public final int minSection; // Paper - public
|
|
+ public final int maxSection; // Paper - public
|
|
protected final int minLightSection;
|
|
protected final int maxLightSection;
|
|
|
|
@@ -182,6 +182,7 @@ public final class StarLightInterface {
|
|
StarLightInterface.this.sectionChange(pos, notReady);
|
|
}
|
|
};
|
|
+ this.lightQueue = new io.papermc.paper.chunk.system.light.LightQueue(this); // Paper - replace light queue
|
|
}
|
|
|
|
protected int getSkyLightValue(final BlockPos blockPos, final ChunkAccess chunk) {
|
|
@@ -325,7 +326,7 @@ public final class StarLightInterface {
|
|
return this.lightAccess;
|
|
}
|
|
|
|
- protected final SkyStarLightEngine getSkyLightEngine() {
|
|
+ public final SkyStarLightEngine getSkyLightEngine() { // Paper - public
|
|
if (this.cachedSkyPropagators == null) {
|
|
return null;
|
|
}
|
|
@@ -340,7 +341,7 @@ public final class StarLightInterface {
|
|
return ret;
|
|
}
|
|
|
|
- protected final void releaseSkyLightEngine(final SkyStarLightEngine engine) {
|
|
+ public final void releaseSkyLightEngine(final SkyStarLightEngine engine) { // Paper - public
|
|
if (this.cachedSkyPropagators == null) {
|
|
return;
|
|
}
|
|
@@ -349,7 +350,7 @@ public final class StarLightInterface {
|
|
}
|
|
}
|
|
|
|
- protected final BlockStarLightEngine getBlockLightEngine() {
|
|
+ public final BlockStarLightEngine getBlockLightEngine() { // Paper - public
|
|
if (this.cachedBlockPropagators == null) {
|
|
return null;
|
|
}
|
|
@@ -364,7 +365,7 @@ public final class StarLightInterface {
|
|
return ret;
|
|
}
|
|
|
|
- protected final void releaseBlockLightEngine(final BlockStarLightEngine engine) {
|
|
+ public final void releaseBlockLightEngine(final BlockStarLightEngine engine) { // Paper - public
|
|
if (this.cachedBlockPropagators == null) {
|
|
return;
|
|
}
|
|
@@ -511,57 +512,15 @@ public final class StarLightInterface {
|
|
}
|
|
|
|
public void scheduleChunkLight(final ChunkPos pos, final Runnable run) {
|
|
- this.lightQueue.queueChunkLighting(pos, run);
|
|
+ throw new UnsupportedOperationException("No longer implemented, use the new lightQueue field to queue tasks"); // Paper - replace light queue
|
|
}
|
|
|
|
public void removeChunkTasks(final ChunkPos pos) {
|
|
- this.lightQueue.removeChunk(pos);
|
|
+ throw new UnsupportedOperationException("No longer implemented, use the new lightQueue field to queue tasks"); // Paper - replace light queue
|
|
}
|
|
|
|
public void propagateChanges() {
|
|
- if (this.lightQueue.isEmpty()) {
|
|
- return;
|
|
- }
|
|
-
|
|
- final SkyStarLightEngine skyEngine = this.getSkyLightEngine();
|
|
- final BlockStarLightEngine blockEngine = this.getBlockLightEngine();
|
|
-
|
|
- try {
|
|
- LightQueue.ChunkTasks task;
|
|
- while ((task = this.lightQueue.removeFirstTask()) != null) {
|
|
- if (task.lightTasks != null) {
|
|
- for (final Runnable run : task.lightTasks) {
|
|
- run.run();
|
|
- }
|
|
- }
|
|
-
|
|
- final long coordinate = task.chunkCoordinate;
|
|
- final int chunkX = CoordinateUtils.getChunkX(coordinate);
|
|
- final int chunkZ = CoordinateUtils.getChunkZ(coordinate);
|
|
-
|
|
- final Set<BlockPos> positions = task.changedPositions;
|
|
- final Boolean[] sectionChanges = task.changedSectionSet;
|
|
-
|
|
- if (skyEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
|
|
- skyEngine.blocksChangedInChunk(this.lightAccess, chunkX, chunkZ, positions, sectionChanges);
|
|
- }
|
|
- if (blockEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
|
|
- blockEngine.blocksChangedInChunk(this.lightAccess, chunkX, chunkZ, positions, sectionChanges);
|
|
- }
|
|
-
|
|
- if (skyEngine != null && task.queuedEdgeChecksSky != null) {
|
|
- skyEngine.checkChunkEdges(this.lightAccess, chunkX, chunkZ, task.queuedEdgeChecksSky);
|
|
- }
|
|
- if (blockEngine != null && task.queuedEdgeChecksBlock != null) {
|
|
- blockEngine.checkChunkEdges(this.lightAccess, chunkX, chunkZ, task.queuedEdgeChecksBlock);
|
|
- }
|
|
-
|
|
- task.onComplete.complete(null);
|
|
- }
|
|
- } finally {
|
|
- this.releaseSkyLightEngine(skyEngine);
|
|
- this.releaseBlockLightEngine(blockEngine);
|
|
- }
|
|
+ throw new UnsupportedOperationException("No longer implemented, task draining is now performed by the light thread"); // Paper - replace light queue
|
|
}
|
|
|
|
protected static final class LightQueue {
|
|
diff --git a/src/main/java/co/aikar/timings/TimingsExport.java b/src/main/java/co/aikar/timings/TimingsExport.java
|
|
index 46297ac0a19fd2398ab777a381eff4d0a256161e..98171f6c8e23f6ef89b897e4b80e3afb2a1950a0 100644
|
|
--- a/src/main/java/co/aikar/timings/TimingsExport.java
|
|
+++ b/src/main/java/co/aikar/timings/TimingsExport.java
|
|
@@ -162,7 +162,11 @@ public class TimingsExport extends Thread {
|
|
pair("gamerules", toObjectMapper(world.getWorld().getGameRules(), rule -> {
|
|
return pair(rule, world.getWorld().getGameRuleValue(rule));
|
|
})),
|
|
- pair("ticking-distance", world.getChunkSource().chunkMap.getEffectiveViewDistance())
|
|
+ // Paper start - replace chunk loader system
|
|
+ pair("ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()),
|
|
+ pair("no-ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()),
|
|
+ pair("sending-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance())
|
|
+ // Paper end - replace chunk loader system
|
|
));
|
|
}));
|
|
|
|
diff --git a/src/main/java/co/aikar/timings/WorldTimingsHandler.java b/src/main/java/co/aikar/timings/WorldTimingsHandler.java
|
|
index 0fda52841b5e1643efeda92106124998abc4e0aa..fe79c0add4f7cb18d487c5bb9415c40c5b551ea2 100644
|
|
--- a/src/main/java/co/aikar/timings/WorldTimingsHandler.java
|
|
+++ b/src/main/java/co/aikar/timings/WorldTimingsHandler.java
|
|
@@ -58,6 +58,16 @@ public class WorldTimingsHandler {
|
|
|
|
public final Timing miscMobSpawning;
|
|
|
|
+ public final Timing poiUnload;
|
|
+ public final Timing chunkUnload;
|
|
+ public final Timing poiSaveDataSerialization;
|
|
+ public final Timing chunkSave;
|
|
+ public final Timing chunkSaveDataSerialization;
|
|
+ public final Timing chunkSaveIOWait;
|
|
+ public final Timing chunkUnloadPrepareSave;
|
|
+ public final Timing chunkUnloadPOISerialization;
|
|
+ public final Timing chunkUnloadDataSave;
|
|
+
|
|
public WorldTimingsHandler(Level server) {
|
|
String name = ((PrimaryLevelData) server.getLevelData()).getLevelName() + " - ";
|
|
|
|
@@ -111,6 +121,16 @@ public class WorldTimingsHandler {
|
|
|
|
|
|
miscMobSpawning = Timings.ofSafe(name + "Mob spawning - Misc");
|
|
+
|
|
+ poiUnload = Timings.ofSafe(name + "Chunk unload - POI");
|
|
+ chunkUnload = Timings.ofSafe(name + "Chunk unload - Chunk");
|
|
+ poiSaveDataSerialization = Timings.ofSafe(name + "Chunk save - POI Data serialization");
|
|
+ chunkSave = Timings.ofSafe(name + "Chunk save - Chunk");
|
|
+ chunkSaveDataSerialization = Timings.ofSafe(name + "Chunk save - Chunk Data serialization");
|
|
+ chunkSaveIOWait = Timings.ofSafe(name + "Chunk save - Chunk IO Wait");
|
|
+ chunkUnloadPrepareSave = Timings.ofSafe(name + "Chunk unload - Async Save Prepare");
|
|
+ chunkUnloadPOISerialization = Timings.ofSafe(name + "Chunk unload - POI Data Serialization");
|
|
+ chunkUnloadDataSave = Timings.ofSafe(name + "Chunk unload - Data Serialization");
|
|
}
|
|
|
|
public static Timing getTickList(ServerLevel worldserver, String timingsType) {
|
|
diff --git a/src/main/java/com/destroystokyo/paper/io/IOUtil.java b/src/main/java/com/destroystokyo/paper/io/IOUtil.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..e064f96c90afd1a4890060baa055cfd0469b6a6f
|
|
--- /dev/null
|
|
+++ b/src/main/java/com/destroystokyo/paper/io/IOUtil.java
|
|
@@ -0,0 +1,63 @@
|
|
+package com.destroystokyo.paper.io;
|
|
+
|
|
+import org.bukkit.Bukkit;
|
|
+
|
|
+@Deprecated(forRemoval = true)
|
|
+public final class IOUtil {
|
|
+
|
|
+ /* Copied from concrete or concurrentutil */
|
|
+
|
|
+ public static long getCoordinateKey(final int x, final int z) {
|
|
+ return ((long)z << 32) | (x & 0xFFFFFFFFL);
|
|
+ }
|
|
+
|
|
+ public static int getCoordinateX(final long key) {
|
|
+ return (int)key;
|
|
+ }
|
|
+
|
|
+ public static int getCoordinateZ(final long key) {
|
|
+ return (int)(key >>> 32);
|
|
+ }
|
|
+
|
|
+ public static int getRegionCoordinate(final int chunkCoordinate) {
|
|
+ return chunkCoordinate >> 5;
|
|
+ }
|
|
+
|
|
+ public static int getChunkInRegion(final int chunkCoordinate) {
|
|
+ return chunkCoordinate & 31;
|
|
+ }
|
|
+
|
|
+ public static String genericToString(final Object object) {
|
|
+ return object == null ? "null" : object.getClass().getName() + ":" + object.toString();
|
|
+ }
|
|
+
|
|
+ public static <T> T notNull(final T obj) {
|
|
+ if (obj == null) {
|
|
+ throw new NullPointerException();
|
|
+ }
|
|
+ return obj;
|
|
+ }
|
|
+
|
|
+ public static <T> T notNull(final T obj, final String msgIfNull) {
|
|
+ if (obj == null) {
|
|
+ throw new NullPointerException(msgIfNull);
|
|
+ }
|
|
+ return obj;
|
|
+ }
|
|
+
|
|
+ public static void arrayBounds(final int off, final int len, final int arrayLength, final String msgPrefix) {
|
|
+ if (off < 0 || len < 0 || (arrayLength - off) < len) {
|
|
+ throw new ArrayIndexOutOfBoundsException(msgPrefix + ": off: " + off + ", len: " + len + ", array length: " + arrayLength);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static int getPriorityForCurrentThread() {
|
|
+ return Bukkit.isPrimaryThread() ? PrioritizedTaskQueue.HIGHEST_PRIORITY : PrioritizedTaskQueue.NORMAL_PRIORITY;
|
|
+ }
|
|
+
|
|
+ @SuppressWarnings("unchecked")
|
|
+ public static <T extends Throwable> void rethrow(final Throwable throwable) throws T {
|
|
+ throw (T)throwable;
|
|
+ }
|
|
+
|
|
+}
|
|
diff --git a/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..f2c27e0ac65be4b75c1d86ef6fd45fdb538d96ac
|
|
--- /dev/null
|
|
+++ b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java
|
|
@@ -0,0 +1,474 @@
|
|
+package com.destroystokyo.paper.io;
|
|
+
|
|
+import com.mojang.logging.LogUtils;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.storage.RegionFile;
|
|
+import org.slf4j.Logger;
|
|
+
|
|
+import java.io.IOException;
|
|
+import java.util.concurrent.CompletableFuture;
|
|
+import java.util.concurrent.ConcurrentHashMap;
|
|
+import java.util.concurrent.atomic.AtomicLong;
|
|
+import java.util.function.Consumer;
|
|
+import java.util.function.Function;
|
|
+
|
|
+/**
|
|
+ * Prioritized singleton thread responsible for all chunk IO that occurs in a minecraft server.
|
|
+ *
|
|
+ * <p>
|
|
+ * Singleton access: {@link Holder#INSTANCE}
|
|
+ * </p>
|
|
+ *
|
|
+ * <p>
|
|
+ * All functions provided are MT-Safe, however certain ordering constraints are (but not enforced):
|
|
+ * <li>
|
|
+ * Chunk saves may not occur for unloaded chunks.
|
|
+ * </li>
|
|
+ * <li>
|
|
+ * Tasks must be scheduled on the main thread.
|
|
+ * </li>
|
|
+ * </p>
|
|
+ *
|
|
+ * @see Holder#INSTANCE
|
|
+ * @see #scheduleSave(ServerLevel, int, int, CompoundTag, CompoundTag, int)
|
|
+ * @see #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)
|
|
+ * @deprecated
|
|
+ */
|
|
+@Deprecated(forRemoval = true)
|
|
+public final class PaperFileIOThread extends QueueExecutorThread {
|
|
+
|
|
+ public static final Logger LOGGER = LogUtils.getLogger();
|
|
+ public static final CompoundTag FAILURE_VALUE = new CompoundTag();
|
|
+
|
|
+ public static final class Holder {
|
|
+
|
|
+ public static final PaperFileIOThread INSTANCE = new PaperFileIOThread();
|
|
+
|
|
+ static {
|
|
+ // Paper - fail hard on usage
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private final AtomicLong writeCounter = new AtomicLong();
|
|
+
|
|
+ private PaperFileIOThread() {
|
|
+ super(new PrioritizedTaskQueue<>(), (int)(1.0e6)); // 1.0ms spinwait time
|
|
+ this.setName("Paper RegionFile IO Thread");
|
|
+ this.setPriority(Thread.NORM_PRIORITY - 1); // we keep priority close to normal because threads can wait on us
|
|
+ this.setUncaughtExceptionHandler((final Thread unused, final Throwable thr) -> {
|
|
+ LOGGER.error("Uncaught exception thrown from IO thread, report this!", thr);
|
|
+ });
|
|
+ }
|
|
+
|
|
+ /* run() is implemented by superclass */
|
|
+
|
|
+ /*
|
|
+ *
|
|
+ * IO thread will perform reads before writes
|
|
+ *
|
|
+ * How reads/writes are scheduled:
|
|
+ *
|
|
+ * If read in progress while scheduling write, ignore read and schedule write
|
|
+ * If read in progress while scheduling read (no write in progress), chain the read task
|
|
+ *
|
|
+ *
|
|
+ * If write in progress while scheduling read, use the pending write data and ret immediately
|
|
+ * If write in progress while scheduling write (ignore read in progress), overwrite the write in progress data
|
|
+ *
|
|
+ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however
|
|
+ * it fails to properly propagate write failures. When writes fail the data is kept so future reads will actually
|
|
+ * read the failed write data. This should hopefully act as a way to prevent data loss for spurious fails for writing data.
|
|
+ *
|
|
+ */
|
|
+
|
|
+ /**
|
|
+ * Attempts to bump the priority of all IO tasks for the given chunk coordinates. This has no effect if no tasks are queued.
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param priority Priority level to try to bump to
|
|
+ */
|
|
+ public void bumpPriority(final ServerLevel world, final int chunkX, final int chunkZ, final int priority) {
|
|
+ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
|
|
+ }
|
|
+
|
|
+ public CompoundTag getPendingWrite(final ServerLevel world, final int chunkX, final int chunkZ, final boolean poiData) {
|
|
+ // Paper start - rewrite chunk system
|
|
+ return io.papermc.paper.chunk.system.io.RegionFileIOThread.getPendingWrite(
|
|
+ world, chunkX, chunkZ, poiData ? io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA :
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA
|
|
+ );
|
|
+ // Paper end - rewrite chunk system
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Sets the priority of all IO tasks for the given chunk coordinates. This has no effect if no tasks are queued.
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param priority Priority level to set to
|
|
+ */
|
|
+ public void setPriority(final ServerLevel world, final int chunkX, final int chunkZ, final int priority) {
|
|
+ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules the chunk data to be written asynchronously.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means
|
|
+ * saves must be scheduled before a chunk is unloaded.
|
|
+ * </li>
|
|
+ * <li>
|
|
+ * Writes may be called concurrently, although only the "later" write will go through.
|
|
+ * </li>
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param poiData Chunk point of interest data. If {@code null}, then no poi data is saved.
|
|
+ * @param chunkData Chunk data. If {@code null}, then no chunk data is saved.
|
|
+ * @param priority Priority level for this task. See {@link PrioritizedTaskQueue}
|
|
+ * @throws IllegalArgumentException If both {@code poiData} and {@code chunkData} are {@code null}.
|
|
+ * @throws IllegalStateException If the file io thread has shutdown.
|
|
+ */
|
|
+ public void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final CompoundTag poiData, final CompoundTag chunkData,
|
|
+ final int priority) throws IllegalArgumentException {
|
|
+ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
|
|
+ }
|
|
+
|
|
+ private void scheduleWrite(final ChunkDataController dataController, final ServerLevel world,
|
|
+ final int chunkX, final int chunkZ, final CompoundTag data, final int priority, final long writeCounter) {
|
|
+ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Same as {@link #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)}, except this function returns
|
|
+ * a {@link CompletableFuture} which is potentially completed <b>ASYNCHRONOUSLY ON THE FILE IO THREAD</b> when the load task
|
|
+ * has completed.
|
|
+ * <p>
|
|
+ * Note that if the chunk fails to load the returned future is completed with {@code null}.
|
|
+ * </p>
|
|
+ */
|
|
+ public CompletableFuture<ChunkData> loadChunkDataAsyncFuture(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final int priority, final boolean readPoiData, final boolean readChunkData,
|
|
+ final boolean intendingToBlock) {
|
|
+ final CompletableFuture<ChunkData> future = new CompletableFuture<>();
|
|
+ this.loadChunkDataAsync(world, chunkX, chunkZ, priority, future::complete, readPoiData, readChunkData, intendingToBlock);
|
|
+ return future;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules a load to be executed asynchronously.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * If a chunk fails to load, the {@code onComplete} parameter is completed with {@code null}.
|
|
+ * </li>
|
|
+ * <li>
|
|
+ * It is possible for the {@code onComplete} parameter to be given {@link ChunkData} containing data
|
|
+ * this call did not request.
|
|
+ * </li>
|
|
+ * <li>
|
|
+ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
|
|
+ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
|
|
+ * data is undefined behaviour, and can cause deadlock.
|
|
+ * </li>
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param priority Priority level for this task. See {@link PrioritizedTaskQueue}
|
|
+ * @param onComplete Consumer to execute once this task has completed
|
|
+ * @param readPoiData Whether to read point of interest data. If {@code false}, the {@code NBTTagCompound} will be {@code null}.
|
|
+ * @param readChunkData Whether to read chunk data. If {@code false}, the {@code NBTTagCompound} will be {@code null}.
|
|
+ * @return The {@link PrioritizedTaskQueue.PrioritizedTask} associated with this task. Note that this task does not support
|
|
+ * cancellation.
|
|
+ */
|
|
+ public void loadChunkDataAsync(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final int priority, final Consumer<ChunkData> onComplete,
|
|
+ final boolean readPoiData, final boolean readChunkData,
|
|
+ final boolean intendingToBlock) {
|
|
+ if (!PrioritizedTaskQueue.validPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority: " + priority);
|
|
+ }
|
|
+
|
|
+ if (!(readPoiData | readChunkData)) {
|
|
+ throw new IllegalArgumentException("Must read chunk data or poi data");
|
|
+ }
|
|
+
|
|
+ final ChunkData complete = new ChunkData();
|
|
+ // Paper start - rewrite chunk system
|
|
+ final java.util.List<io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType> types = new java.util.ArrayList<>();
|
|
+ if (readPoiData) {
|
|
+ types.add(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA);
|
|
+ }
|
|
+ if (readChunkData) {
|
|
+ types.add(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA);
|
|
+ }
|
|
+ final ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority newPriority;
|
|
+ switch (priority) {
|
|
+ case PrioritizedTaskQueue.HIGHEST_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.BLOCKING;
|
|
+ case PrioritizedTaskQueue.HIGHER_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGHEST;
|
|
+ case PrioritizedTaskQueue.HIGH_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGH;
|
|
+ case PrioritizedTaskQueue.NORMAL_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.NORMAL;
|
|
+ case PrioritizedTaskQueue.LOW_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.LOW;
|
|
+ case PrioritizedTaskQueue.LOWEST_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.IDLE;
|
|
+ default -> throw new IllegalStateException("Legacy priority " + priority + " should be valid");
|
|
+ }
|
|
+ final Consumer<io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileData> transformComplete = (io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileData data) -> {
|
|
+ if (readPoiData) {
|
|
+ if (data.getThrowable(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA) != null) {
|
|
+ complete.poiData = FAILURE_VALUE;
|
|
+ } else {
|
|
+ complete.poiData = data.getData(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (readChunkData) {
|
|
+ if (data.getThrowable(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA) != null) {
|
|
+ complete.chunkData = FAILURE_VALUE;
|
|
+ } else {
|
|
+ complete.chunkData = data.getData(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ onComplete.accept(complete);
|
|
+ };
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, transformComplete, intendingToBlock, newPriority, types.toArray(new io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType[0]));
|
|
+ // Paper end - rewrite chunk system
|
|
+
|
|
+ }
|
|
+
|
|
+ // Note: the onComplete may be called asynchronously or synchronously here.
|
|
+ private void scheduleRead(final ChunkDataController dataController, final ServerLevel world,
|
|
+ final int chunkX, final int chunkZ, final Consumer<CompoundTag> onComplete, final int priority,
|
|
+ final boolean intendingToBlock) {
|
|
+ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Same as {@link #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)}, except this function returns
|
|
+ * the {@link ChunkData} associated with the specified chunk when the task is complete.
|
|
+ * @return The chunk data, or {@code null} if the chunk failed to load.
|
|
+ */
|
|
+ public ChunkData loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ, final int priority,
|
|
+ final boolean readPoiData, final boolean readChunkData) {
|
|
+ return this.loadChunkDataAsyncFuture(world, chunkX, chunkZ, priority, readPoiData, readChunkData, true).join();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules the given task at the specified priority to be executed on the IO thread.
|
|
+ * <p>
|
|
+ * Internal api. Do not use.
|
|
+ * </p>
|
|
+ */
|
|
+ public void runTask(final int priority, final Runnable runnable) {
|
|
+ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
|
|
+ }
|
|
+
|
|
+ static final class GeneralTask extends PrioritizedTaskQueue.PrioritizedTask implements Runnable {
|
|
+
|
|
+ private final Runnable run;
|
|
+
|
|
+ public GeneralTask(final int priority, final Runnable run) {
|
|
+ super(priority);
|
|
+ this.run = IOUtil.notNull(run, "Task may not be null");
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ try {
|
|
+ this.run.run();
|
|
+ } catch (final Throwable throwable) {
|
|
+ if (throwable instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)throwable;
|
|
+ }
|
|
+ LOGGER.error("Failed to execute general task on IO thread " + IOUtil.genericToString(this.run), throwable);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class ChunkData {
|
|
+
|
|
+ public CompoundTag poiData;
|
|
+ public CompoundTag chunkData;
|
|
+
|
|
+ public ChunkData() {}
|
|
+
|
|
+ public ChunkData(final CompoundTag poiData, final CompoundTag chunkData) {
|
|
+ this.poiData = poiData;
|
|
+ this.chunkData = chunkData;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static abstract class ChunkDataController {
|
|
+
|
|
+ // ConcurrentHashMap synchronizes per chain, so reduce the chance of task's hashes colliding.
|
|
+ public final ConcurrentHashMap<Long, ChunkDataTask> tasks = new ConcurrentHashMap<>(64, 0.5f);
|
|
+
|
|
+ public abstract void writeData(final int x, final int z, final CompoundTag compound) throws IOException;
|
|
+ public abstract CompoundTag readData(final int x, final int z) throws IOException;
|
|
+
|
|
+ public abstract <T> T computeForRegionFile(final int chunkX, final int chunkZ, final Function<RegionFile, T> function);
|
|
+ public abstract <T> T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function<RegionFile, T> function);
|
|
+
|
|
+ public static final class InProgressWrite {
|
|
+ public long writeCounter;
|
|
+ public CompoundTag data;
|
|
+ }
|
|
+
|
|
+ public static final class InProgressRead {
|
|
+ public final CompletableFuture<CompoundTag> readFuture = new CompletableFuture<>();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class ChunkDataTask extends PrioritizedTaskQueue.PrioritizedTask implements Runnable {
|
|
+
|
|
+ public ChunkDataController.InProgressWrite inProgressWrite;
|
|
+ public ChunkDataController.InProgressRead inProgressRead;
|
|
+
|
|
+ private final ServerLevel world;
|
|
+ private final int x;
|
|
+ private final int z;
|
|
+ private final ChunkDataController taskController;
|
|
+
|
|
+ public ChunkDataTask(final int priority, final ServerLevel world, final int x, final int z, final ChunkDataController taskController) {
|
|
+ super(priority);
|
|
+ this.world = world;
|
|
+ this.x = x;
|
|
+ this.z = z;
|
|
+ this.taskController = taskController;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return "Task for world: '" + this.world.getWorld().getName() + "' at " + this.x + "," + this.z +
|
|
+ " poi: " + (this.taskController == null) + ", hash: " + this.hashCode(); // Paper - TODO rewrite chunk system
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ *
|
|
+ * IO thread will perform reads before writes
|
|
+ *
|
|
+ * How reads/writes are scheduled:
|
|
+ *
|
|
+ * If read in progress while scheduling write, ignore read and schedule write
|
|
+ * If read in progress while scheduling read (no write in progress), chain the read task
|
|
+ *
|
|
+ *
|
|
+ * If write in progress while scheduling read, use the pending write data and ret immediately
|
|
+ * If write in progress while scheduling write (ignore read in progress), overwrite the write in progress data
|
|
+ *
|
|
+ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however
|
|
+ * it fails to properly propagate write failures
|
|
+ *
|
|
+ */
|
|
+
|
|
+ void reschedule(final int priority) {
|
|
+ // priority is checked before this stage // TODO what
|
|
+ this.queue.lazySet(null);
|
|
+ this.priority.lazySet(priority);
|
|
+ PaperFileIOThread.Holder.INSTANCE.queueTask(this);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ if (true) throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage
|
|
+ ChunkDataController.InProgressRead read = this.inProgressRead;
|
|
+ if (read != null) {
|
|
+ CompoundTag compound = PaperFileIOThread.FAILURE_VALUE;
|
|
+ try {
|
|
+ compound = this.taskController.readData(this.x, this.z);
|
|
+ } catch (final Throwable thr) {
|
|
+ if (thr instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)thr;
|
|
+ }
|
|
+ LOGGER.error("Failed to read chunk data for task: " + this.toString(), thr);
|
|
+ // fall through to complete with null data
|
|
+ }
|
|
+ read.readFuture.complete(compound);
|
|
+ }
|
|
+
|
|
+ final Long chunkKey = Long.valueOf(IOUtil.getCoordinateKey(this.x, this.z));
|
|
+
|
|
+ ChunkDataController.InProgressWrite write = this.inProgressWrite;
|
|
+
|
|
+ if (write == null) {
|
|
+ // IntelliJ warns this is invalid, however it does not consider that writes to the task map & the inProgress field can occur concurrently.
|
|
+ ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final Long keyInMap, final ChunkDataTask valueInMap) -> {
|
|
+ if (valueInMap == null) {
|
|
+ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
|
|
+ }
|
|
+ if (valueInMap != ChunkDataTask.this) {
|
|
+ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
|
|
+ }
|
|
+ return valueInMap.inProgressWrite == null ? null : valueInMap;
|
|
+ });
|
|
+
|
|
+ if (inMap == null) {
|
|
+ return; // set the task value to null, indicating we're done
|
|
+ }
|
|
+
|
|
+ // not null, which means there was a concurrent write
|
|
+ write = this.inProgressWrite;
|
|
+ }
|
|
+
|
|
+ for (;;) {
|
|
+ final long writeCounter;
|
|
+ final CompoundTag data;
|
|
+
|
|
+ //noinspection SynchronizationOnLocalVariableOrMethodParameter
|
|
+ synchronized (write) {
|
|
+ writeCounter = write.writeCounter;
|
|
+ data = write.data;
|
|
+ }
|
|
+
|
|
+ boolean failedWrite = false;
|
|
+
|
|
+ try {
|
|
+ this.taskController.writeData(this.x, this.z, data);
|
|
+ } catch (final Throwable thr) {
|
|
+ if (thr instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)thr;
|
|
+ }
|
|
+ LOGGER.error("Failed to write chunk data for task: " + this.toString(), thr);
|
|
+ failedWrite = true;
|
|
+ }
|
|
+
|
|
+ boolean finalFailWrite = failedWrite;
|
|
+
|
|
+ ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final Long keyInMap, final ChunkDataTask valueInMap) -> {
|
|
+ if (valueInMap == null) {
|
|
+ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
|
|
+ }
|
|
+ if (valueInMap != ChunkDataTask.this) {
|
|
+ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
|
|
+ }
|
|
+ if (valueInMap.inProgressWrite.writeCounter == writeCounter) {
|
|
+ if (finalFailWrite) {
|
|
+ valueInMap.inProgressWrite.writeCounter = -1L;
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+ return valueInMap;
|
|
+ // Hack end
|
|
+ });
|
|
+
|
|
+ if (inMap == null) {
|
|
+ // write counter matched, so we wrote the most up-to-date pending data, we're done here
|
|
+ // or we failed to write and successfully set the write counter to -1
|
|
+ return; // we're done here
|
|
+ }
|
|
+
|
|
+ // fetch & write new data
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java b/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..7844a3515430472bd829ff246396bceb0797de1b
|
|
--- /dev/null
|
|
+++ b/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java
|
|
@@ -0,0 +1,299 @@
|
|
+package com.destroystokyo.paper.io;
|
|
+
|
|
+import java.util.concurrent.ConcurrentLinkedQueue;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.concurrent.atomic.AtomicInteger;
|
|
+import java.util.concurrent.atomic.AtomicReference;
|
|
+
|
|
+@Deprecated(forRemoval = true)
|
|
+public class PrioritizedTaskQueue<T extends PrioritizedTaskQueue.PrioritizedTask> {
|
|
+
|
|
+ // lower numbers are a higher priority (except < 0)
|
|
+ // higher priorities are always executed before lower priorities
|
|
+
|
|
+ /**
|
|
+ * Priority value indicating the task has completed or is being completed.
|
|
+ */
|
|
+ public static final int COMPLETING_PRIORITY = -1;
|
|
+
|
|
+ /**
|
|
+ * Highest priority, should only be used for main thread tasks or tasks that are blocking the main thread.
|
|
+ */
|
|
+ public static final int HIGHEST_PRIORITY = 0;
|
|
+
|
|
+ /**
|
|
+ * Should be only used in an IO task so that chunk loads do not wait on other IO tasks.
|
|
+ * This only exists because IO tasks are scheduled before chunk load tasks to decrease IO waiting times.
|
|
+ */
|
|
+ public static final int HIGHER_PRIORITY = 1;
|
|
+
|
|
+ /**
|
|
+ * Should be used for scheduling chunk loads/generation that would increase response times to users.
|
|
+ */
|
|
+ public static final int HIGH_PRIORITY = 2;
|
|
+
|
|
+ /**
|
|
+ * Default priority.
|
|
+ */
|
|
+ public static final int NORMAL_PRIORITY = 3;
|
|
+
|
|
+ /**
|
|
+ * Use for tasks not at all critical and can potentially be delayed.
|
|
+ */
|
|
+ public static final int LOW_PRIORITY = 4;
|
|
+
|
|
+ /**
|
|
+ * Use for tasks that should "eventually" execute.
|
|
+ */
|
|
+ public static final int LOWEST_PRIORITY = 5;
|
|
+
|
|
+ private static final int TOTAL_PRIORITIES = 6;
|
|
+
|
|
+ final ConcurrentLinkedQueue<T>[] queues = (ConcurrentLinkedQueue<T>[])new ConcurrentLinkedQueue[TOTAL_PRIORITIES];
|
|
+
|
|
+ private final AtomicBoolean shutdown = new AtomicBoolean();
|
|
+
|
|
+ {
|
|
+ for (int i = 0; i < TOTAL_PRIORITIES; ++i) {
|
|
+ this.queues[i] = new ConcurrentLinkedQueue<>();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether the specified priority is valid
|
|
+ */
|
|
+ public static boolean validPriority(final int priority) {
|
|
+ return priority >= 0 && priority < TOTAL_PRIORITIES;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Queues a task.
|
|
+ * @throws IllegalStateException If the task has already been queued. Use {@link PrioritizedTask#raisePriority(int)} to
|
|
+ * raise a task's priority.
|
|
+ * This can also be thrown if the queue has shutdown.
|
|
+ */
|
|
+ public void add(final T task) throws IllegalStateException {
|
|
+ int priority = task.getPriority();
|
|
+ if (priority != COMPLETING_PRIORITY) {
|
|
+ task.setQueue(this);
|
|
+ this.queues[priority].add(task);
|
|
+ }
|
|
+ if (this.shutdown.get()) {
|
|
+ // note: we're not actually sure at this point if our task will go through
|
|
+ throw new IllegalStateException("Queue has shutdown, refusing to execute task " + IOUtil.genericToString(task));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Polls the highest priority task currently available. {@code null} if none.
|
|
+ */
|
|
+ public T poll() {
|
|
+ T task;
|
|
+ for (int i = 0; i < TOTAL_PRIORITIES; ++i) {
|
|
+ final ConcurrentLinkedQueue<T> queue = this.queues[i];
|
|
+
|
|
+ while ((task = queue.poll()) != null) {
|
|
+ final int prevPriority = task.tryComplete(i);
|
|
+ if (prevPriority != COMPLETING_PRIORITY && prevPriority <= i) {
|
|
+ // if the prev priority was greater-than or equal to our current priority
|
|
+ return task;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Polls the highest priority task currently available. {@code null} if none.
|
|
+ */
|
|
+ public T poll(final int lowestPriority) {
|
|
+ T task;
|
|
+ final int max = Math.min(LOWEST_PRIORITY, lowestPriority);
|
|
+ for (int i = 0; i <= max; ++i) {
|
|
+ final ConcurrentLinkedQueue<T> queue = this.queues[i];
|
|
+
|
|
+ while ((task = queue.poll()) != null) {
|
|
+ final int prevPriority = task.tryComplete(i);
|
|
+ if (prevPriority != COMPLETING_PRIORITY && prevPriority <= i) {
|
|
+ // if the prev priority was greater-than or equal to our current priority
|
|
+ return task;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether this queue may have tasks queued.
|
|
+ * <p>
|
|
+ * This operation is not atomic, but is MT-Safe.
|
|
+ * </p>
|
|
+ * @return {@code true} if tasks may be queued, {@code false} otherwise
|
|
+ */
|
|
+ public boolean hasTasks() {
|
|
+ for (int i = 0; i < TOTAL_PRIORITIES; ++i) {
|
|
+ final ConcurrentLinkedQueue<T> queue = this.queues[i];
|
|
+
|
|
+ if (queue.peek() != null) {
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Prevent further additions to this queue. Attempts to add after this call has completed (potentially during) will
|
|
+ * result in {@link IllegalStateException} being thrown.
|
|
+ * <p>
|
|
+ * This operation is atomic with respect to other shutdown calls
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * After this call has completed, regardless of return value, this queue will be shutdown.
|
|
+ * </p>
|
|
+ * @return {@code true} if the queue was shutdown, {@code false} if it has shut down already
|
|
+ */
|
|
+ public boolean shutdown() {
|
|
+ return this.shutdown.getAndSet(false);
|
|
+ }
|
|
+
|
|
+ public abstract static class PrioritizedTask {
|
|
+
|
|
+ protected final AtomicReference<PrioritizedTaskQueue> queue = new AtomicReference<>();
|
|
+
|
|
+ protected final AtomicInteger priority;
|
|
+
|
|
+ protected PrioritizedTask() {
|
|
+ this(PrioritizedTaskQueue.NORMAL_PRIORITY);
|
|
+ }
|
|
+
|
|
+ protected PrioritizedTask(final int priority) {
|
|
+ if (!PrioritizedTaskQueue.validPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.priority = new AtomicInteger(priority);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the current priority. Note that {@link PrioritizedTaskQueue#COMPLETING_PRIORITY} will be returned
|
|
+ * if this task is completing or has completed.
|
|
+ */
|
|
+ public final int getPriority() {
|
|
+ return this.priority.get();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether this task is scheduled to execute, or has been already executed.
|
|
+ */
|
|
+ public boolean isScheduled() {
|
|
+ return this.queue.get() != null;
|
|
+ }
|
|
+
|
|
+ final int tryComplete(final int minPriority) {
|
|
+ for (int curr = this.getPriorityVolatile();;) {
|
|
+ if (curr == COMPLETING_PRIORITY) {
|
|
+ return COMPLETING_PRIORITY;
|
|
+ }
|
|
+ if (curr > minPriority) {
|
|
+ // curr is lower priority
|
|
+ return curr;
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, COMPLETING_PRIORITY))) {
|
|
+ return curr;
|
|
+ }
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Forces this task to be completed.
|
|
+ * @return {@code true} if the task was cancelled, {@code false} if the task has already completed or is being completed.
|
|
+ */
|
|
+ public boolean cancel() {
|
|
+ return this.exchangePriorityVolatile(PrioritizedTaskQueue.COMPLETING_PRIORITY) != PrioritizedTaskQueue.COMPLETING_PRIORITY;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Attempts to raise the priority to the priority level specified.
|
|
+ * @param priority Priority specified
|
|
+ * @return {@code true} if successful, {@code false} otherwise.
|
|
+ */
|
|
+ public boolean raisePriority(final int priority) {
|
|
+ if (!PrioritizedTaskQueue.validPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority");
|
|
+ }
|
|
+
|
|
+ for (int curr = this.getPriorityVolatile();;) {
|
|
+ if (curr == COMPLETING_PRIORITY) {
|
|
+ return false;
|
|
+ }
|
|
+ if (priority >= curr) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority))) {
|
|
+ PrioritizedTaskQueue queue = this.queue.get();
|
|
+ if (queue != null) {
|
|
+ //noinspection unchecked
|
|
+ queue.queues[priority].add(this); // silently fail on shutdown
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Attempts to set this task's priority level to the level specified.
|
|
+ * @param priority Specified priority level.
|
|
+ * @return {@code true} if successful, {@code false} if this task is completing or has completed.
|
|
+ */
|
|
+ public boolean updatePriority(final int priority) {
|
|
+ if (!PrioritizedTaskQueue.validPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority");
|
|
+ }
|
|
+
|
|
+ for (int curr = this.getPriorityVolatile();;) {
|
|
+ if (curr == COMPLETING_PRIORITY) {
|
|
+ return false;
|
|
+ }
|
|
+ if (curr == priority) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority))) {
|
|
+ PrioritizedTaskQueue queue = this.queue.get();
|
|
+ if (queue != null) {
|
|
+ //noinspection unchecked
|
|
+ queue.queues[priority].add(this); // silently fail on shutdown
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ void setQueue(final PrioritizedTaskQueue queue) {
|
|
+ this.queue.set(queue);
|
|
+ }
|
|
+
|
|
+ /* priority */
|
|
+
|
|
+ protected final int getPriorityVolatile() {
|
|
+ return this.priority.get();
|
|
+ }
|
|
+
|
|
+ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) {
|
|
+ if (this.priority.compareAndSet(expect, update)) {
|
|
+ return expect;
|
|
+ }
|
|
+ return this.priority.get();
|
|
+ }
|
|
+
|
|
+ protected final int exchangePriorityVolatile(final int value) {
|
|
+ return this.priority.getAndSet(value);
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java b/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..99f49b5625cf51d6c97640553cf5c420bb6fdd36
|
|
--- /dev/null
|
|
+++ b/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java
|
|
@@ -0,0 +1,255 @@
|
|
+package com.destroystokyo.paper.io;
|
|
+
|
|
+import com.mojang.logging.LogUtils;
|
|
+import org.slf4j.Logger;
|
|
+
|
|
+import java.util.concurrent.ConcurrentLinkedQueue;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.concurrent.locks.LockSupport;
|
|
+
|
|
+@Deprecated(forRemoval = true)
|
|
+public class QueueExecutorThread<T extends PrioritizedTaskQueue.PrioritizedTask & Runnable> extends Thread {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getLogger();
|
|
+
|
|
+ protected final PrioritizedTaskQueue<T> queue;
|
|
+ protected final long spinWaitTime;
|
|
+
|
|
+ protected volatile boolean closed;
|
|
+
|
|
+ protected final AtomicBoolean parked = new AtomicBoolean();
|
|
+
|
|
+ protected volatile ConcurrentLinkedQueue<Thread> flushQueue = new ConcurrentLinkedQueue<>();
|
|
+ protected volatile long flushCycles;
|
|
+
|
|
+ protected int lowestPriorityToPoll = PrioritizedTaskQueue.LOWEST_PRIORITY;
|
|
+
|
|
+ public int getLowestPriorityToPoll() {
|
|
+ return this.lowestPriorityToPoll;
|
|
+ }
|
|
+
|
|
+ public void setLowestPriorityToPoll(final int lowestPriorityToPoll) {
|
|
+ if (this.isAlive()) {
|
|
+ throw new IllegalStateException("Cannot set after starting");
|
|
+ }
|
|
+ this.lowestPriorityToPoll = lowestPriorityToPoll;
|
|
+ }
|
|
+
|
|
+ public QueueExecutorThread(final PrioritizedTaskQueue<T> queue) {
|
|
+ this(queue, (int)(1.e6)); // 1.0ms
|
|
+ }
|
|
+
|
|
+ public QueueExecutorThread(final PrioritizedTaskQueue<T> queue, final long spinWaitTime) { // in ms
|
|
+ this.queue = queue;
|
|
+ this.spinWaitTime = spinWaitTime;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final long spinWaitTime = this.spinWaitTime;
|
|
+ main_loop:
|
|
+ for (;;) {
|
|
+ this.pollTasks(true);
|
|
+
|
|
+ // spinwait
|
|
+
|
|
+ final long start = System.nanoTime();
|
|
+
|
|
+ for (;;) {
|
|
+ // If we are interrpted for any reason, park() will always return immediately. Clear so that we don't needlessly use cpu in such an event.
|
|
+ Thread.interrupted();
|
|
+ LockSupport.parkNanos("Spinwaiting on tasks", 1000L); // 1us
|
|
+
|
|
+ if (this.pollTasks(true)) {
|
|
+ // restart loop, found tasks
|
|
+ continue main_loop;
|
|
+ }
|
|
+
|
|
+ if (this.handleClose()) {
|
|
+ return; // we're done
|
|
+ }
|
|
+
|
|
+ if ((System.nanoTime() - start) >= spinWaitTime) {
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (this.handleClose()) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.parked.set(true);
|
|
+
|
|
+ // We need to parse here to avoid a race condition where a thread queues a task before we set parked to true
|
|
+ // (i.e it will not notify us)
|
|
+ if (this.pollTasks(true)) {
|
|
+ this.parked.set(false);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (this.handleClose()) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // we don't need to check parked before sleeping, but we do need to check parked in a do-while loop
|
|
+ // LockSupport.park() can fail for any reason
|
|
+ do {
|
|
+ Thread.interrupted();
|
|
+ LockSupport.park("Waiting on tasks");
|
|
+ } while (this.parked.get());
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected boolean handleClose() {
|
|
+ if (this.closed) {
|
|
+ this.pollTasks(true); // this ensures we've emptied the queue
|
|
+ this.handleFlushThreads(true);
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ protected boolean pollTasks(boolean flushTasks) {
|
|
+ Runnable task;
|
|
+ boolean ret = false;
|
|
+
|
|
+ while ((task = this.queue.poll(this.lowestPriorityToPoll)) != null) {
|
|
+ ret = true;
|
|
+ try {
|
|
+ task.run();
|
|
+ } catch (final Throwable throwable) {
|
|
+ if (throwable instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)throwable;
|
|
+ }
|
|
+ LOGGER.error("Exception thrown from prioritized runnable task in thread '" + this.getName() + "': " + IOUtil.genericToString(task), throwable);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (flushTasks) {
|
|
+ this.handleFlushThreads(false);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ protected void handleFlushThreads(final boolean shutdown) {
|
|
+ Thread parking;
|
|
+ ConcurrentLinkedQueue<Thread> flushQueue = this.flushQueue;
|
|
+ do {
|
|
+ ++flushCycles; // may be plain read opaque write
|
|
+ while ((parking = flushQueue.poll()) != null) {
|
|
+ LockSupport.unpark(parking);
|
|
+ }
|
|
+ } while (this.pollTasks(false));
|
|
+
|
|
+ if (shutdown) {
|
|
+ this.flushQueue = null;
|
|
+
|
|
+ // defend against a race condition where a flush thread double-checks right before we set to null
|
|
+ while ((parking = flushQueue.poll()) != null) {
|
|
+ LockSupport.unpark(parking);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Notify's this thread that a task has been added to its queue
|
|
+ * @return {@code true} if this thread was waiting for tasks, {@code false} if it is executing tasks
|
|
+ */
|
|
+ public boolean notifyTasks() {
|
|
+ if (this.parked.get() && this.parked.getAndSet(false)) {
|
|
+ LockSupport.unpark(this);
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ protected void queueTask(final T task) {
|
|
+ this.queue.add(task);
|
|
+ this.notifyTasks();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Waits until this thread's queue is empty.
|
|
+ *
|
|
+ * @throws IllegalStateException If the current thread is {@code this} thread.
|
|
+ */
|
|
+ public void flush() {
|
|
+ final Thread currentThread = Thread.currentThread();
|
|
+
|
|
+ if (currentThread == this) {
|
|
+ // avoid deadlock
|
|
+ throw new IllegalStateException("Cannot flush the queue executor thread while on the queue executor thread");
|
|
+ }
|
|
+
|
|
+ // order is important
|
|
+
|
|
+ int successes = 0;
|
|
+ long lastCycle = -1L;
|
|
+
|
|
+ do {
|
|
+ final ConcurrentLinkedQueue<Thread> flushQueue = this.flushQueue;
|
|
+ if (flushQueue == null) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ flushQueue.add(currentThread);
|
|
+
|
|
+ // double check flush queue
|
|
+ if (this.flushQueue == null) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final long currentCycle = this.flushCycles; // may be opaque read
|
|
+
|
|
+ if (currentCycle == lastCycle) {
|
|
+ Thread.yield();
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ // force response
|
|
+ this.parked.set(false);
|
|
+ LockSupport.unpark(this);
|
|
+
|
|
+ LockSupport.park("flushing queue executor thread");
|
|
+
|
|
+ // returns whether there are tasks queued, does not return whether there are tasks executing
|
|
+ // this is why we cycle twice twice through flush (we know a pollTask call is made after a flush cycle)
|
|
+ // we really only need to guarantee that the tasks this thread has queued has gone through, and can leave
|
|
+ // tasks queued concurrently that are unsychronized with this thread as undefined behavior
|
|
+ if (this.queue.hasTasks()) {
|
|
+ successes = 0;
|
|
+ } else {
|
|
+ ++successes;
|
|
+ }
|
|
+
|
|
+ } while (successes != 2);
|
|
+
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Closes this queue executor's queue and optionally waits for it to empty.
|
|
+ * <p>
|
|
+ * If wait is {@code true}, then the queue will be empty by the time this call completes.
|
|
+ * </p>
|
|
+ * <p>
|
|
+ * This function is MT-Safe.
|
|
+ * </p>
|
|
+ * @param wait If this call is to wait until the queue is empty
|
|
+ * @param killQueue Whether to shutdown this thread's queue
|
|
+ * @return whether this thread shut down the queue
|
|
+ */
|
|
+ public boolean close(final boolean wait, final boolean killQueue) {
|
|
+ boolean ret = !killQueue ? false : this.queue.shutdown();
|
|
+ this.closed = true;
|
|
+
|
|
+ // force thread to respond to the shutdown
|
|
+ this.parked.set(false);
|
|
+ LockSupport.unpark(this);
|
|
+
|
|
+ if (wait) {
|
|
+ this.flush();
|
|
+ }
|
|
+ return ret;
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..0b060183429f4c72ec767075538477b4302bbf0d
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java
|
|
@@ -0,0 +1,1128 @@
|
|
+package io.papermc.paper.chunk;
|
|
+
|
|
+import com.destroystokyo.paper.util.misc.PlayerAreaMap;
|
|
+import com.destroystokyo.paper.util.misc.PooledLinkedHashSets;
|
|
+import io.papermc.paper.configuration.GlobalConfiguration;
|
|
+import io.papermc.paper.util.CoordinateUtils;
|
|
+import io.papermc.paper.util.IntervalledCounter;
|
|
+import io.papermc.paper.util.TickThread;
|
|
+import it.unimi.dsi.fastutil.longs.LongOpenHashSet;
|
|
+import it.unimi.dsi.fastutil.objects.Reference2IntOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.objects.Reference2ObjectLinkedOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
|
|
+import net.minecraft.network.protocol.game.ClientboundSetChunkCacheCenterPacket;
|
|
+import net.minecraft.network.protocol.game.ClientboundSetChunkCacheRadiusPacket;
|
|
+import net.minecraft.network.protocol.game.ClientboundSetSimulationDistancePacket;
|
|
+import io.papermc.paper.util.MCUtil;
|
|
+import net.minecraft.server.MinecraftServer;
|
|
+import net.minecraft.server.level.*;
|
|
+import net.minecraft.util.Mth;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.LevelChunk;
|
|
+import org.apache.commons.lang3.mutable.MutableObject;
|
|
+import org.bukkit.craftbukkit.entity.CraftPlayer;
|
|
+import org.bukkit.entity.Player;
|
|
+import java.util.ArrayDeque;
|
|
+import java.util.ArrayList;
|
|
+import java.util.List;
|
|
+import java.util.TreeSet;
|
|
+import java.util.concurrent.atomic.AtomicInteger;
|
|
+
|
|
+public final class PlayerChunkLoader {
|
|
+
|
|
+ public static final int MIN_VIEW_DISTANCE = 2;
|
|
+ public static final int MAX_VIEW_DISTANCE = 32;
|
|
+
|
|
+ public static final int TICK_TICKET_LEVEL = 31;
|
|
+ public static final int LOADED_TICKET_LEVEL = 33;
|
|
+
|
|
+ public static int getTickViewDistance(final Player player) {
|
|
+ return getTickViewDistance(((CraftPlayer)player).getHandle());
|
|
+ }
|
|
+
|
|
+ public static int getTickViewDistance(final ServerPlayer player) {
|
|
+ final ServerLevel level = (ServerLevel)player.level;
|
|
+ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player);
|
|
+ if (data == null) {
|
|
+ return level.chunkSource.chunkMap.playerChunkManager.getTargetTickViewDistance();
|
|
+ }
|
|
+ return data.getTargetTickViewDistance();
|
|
+ }
|
|
+
|
|
+ public static int getLoadViewDistance(final Player player) {
|
|
+ return getLoadViewDistance(((CraftPlayer)player).getHandle());
|
|
+ }
|
|
+
|
|
+ public static int getLoadViewDistance(final ServerPlayer player) {
|
|
+ final ServerLevel level = (ServerLevel)player.level;
|
|
+ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player);
|
|
+ if (data == null) {
|
|
+ return level.chunkSource.chunkMap.playerChunkManager.getLoadDistance();
|
|
+ }
|
|
+ return data.getLoadDistance();
|
|
+ }
|
|
+
|
|
+ public static int getSendViewDistance(final Player player) {
|
|
+ return getSendViewDistance(((CraftPlayer)player).getHandle());
|
|
+ }
|
|
+
|
|
+ public static int getSendViewDistance(final ServerPlayer player) {
|
|
+ final ServerLevel level = (ServerLevel)player.level;
|
|
+ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player);
|
|
+ if (data == null) {
|
|
+ return level.chunkSource.chunkMap.playerChunkManager.getTargetSendDistance();
|
|
+ }
|
|
+ return data.getTargetSendViewDistance();
|
|
+ }
|
|
+
|
|
+ protected final ChunkMap chunkMap;
|
|
+ protected final Reference2ObjectLinkedOpenHashMap<ServerPlayer, PlayerLoaderData> playerMap = new Reference2ObjectLinkedOpenHashMap<>(512, 0.7f);
|
|
+ protected final ReferenceLinkedOpenHashSet<PlayerLoaderData> chunkSendQueue = new ReferenceLinkedOpenHashSet<>(512, 0.7f);
|
|
+
|
|
+ protected final TreeSet<PlayerLoaderData> chunkLoadQueue = new TreeSet<>((final PlayerLoaderData p1, final PlayerLoaderData p2) -> {
|
|
+ if (p1 == p2) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ final ChunkPriorityHolder holder1 = p1.loadQueue.peekFirst();
|
|
+ final ChunkPriorityHolder holder2 = p2.loadQueue.peekFirst();
|
|
+
|
|
+ final int priorityCompare = Double.compare(holder1 == null ? Double.MAX_VALUE : holder1.priority, holder2 == null ? Double.MAX_VALUE : holder2.priority);
|
|
+
|
|
+ final int lastLoadTimeCompare = Long.compare(p1.lastChunkLoad, p2.lastChunkLoad);
|
|
+
|
|
+ if ((holder1 == null || holder2 == null || lastLoadTimeCompare == 0 || holder1.priority < 0.0 || holder2.priority < 0.0) && priorityCompare != 0) {
|
|
+ return priorityCompare;
|
|
+ }
|
|
+
|
|
+ if (lastLoadTimeCompare != 0) {
|
|
+ return lastLoadTimeCompare;
|
|
+ }
|
|
+
|
|
+ final int idCompare = Integer.compare(p1.player.getId(), p2.player.getId());
|
|
+
|
|
+ if (idCompare != 0) {
|
|
+ return idCompare;
|
|
+ }
|
|
+
|
|
+ // last resort
|
|
+ return Integer.compare(System.identityHashCode(p1), System.identityHashCode(p2));
|
|
+ });
|
|
+
|
|
+ protected final TreeSet<PlayerLoaderData> chunkSendWaitQueue = new TreeSet<>((final PlayerLoaderData p1, final PlayerLoaderData p2) -> {
|
|
+ if (p1 == p2) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ final int timeCompare = Long.compare(p1.nextChunkSendTarget, p2.nextChunkSendTarget);
|
|
+ if (timeCompare != 0) {
|
|
+ return timeCompare;
|
|
+ }
|
|
+
|
|
+ final int idCompare = Integer.compare(p1.player.getId(), p2.player.getId());
|
|
+
|
|
+ if (idCompare != 0) {
|
|
+ return idCompare;
|
|
+ }
|
|
+
|
|
+ // last resort
|
|
+ return Integer.compare(System.identityHashCode(p1), System.identityHashCode(p2));
|
|
+ });
|
|
+
|
|
+
|
|
+ // no throttling is applied below this VD for loading
|
|
+
|
|
+ /**
|
|
+ * The chunks to be sent to players, provided they're send-ready. Send-ready means the chunk and its 1 radius neighbours are loaded.
|
|
+ */
|
|
+ public final PlayerAreaMap broadcastMap;
|
|
+
|
|
+ /**
|
|
+ * The chunks to be brought up to send-ready status. Send-ready means the chunk and its 1 radius neighbours are loaded.
|
|
+ */
|
|
+ public final PlayerAreaMap loadMap;
|
|
+
|
|
+ /**
|
|
+ * Areamap used only to remove tickets for send-ready chunks. View distance is always + 1 of load view distance. Thus,
|
|
+ * this map is always representing the chunks we are actually going to load.
|
|
+ */
|
|
+ public final PlayerAreaMap loadTicketCleanup;
|
|
+
|
|
+ /**
|
|
+ * The chunks to brought to ticking level. Each chunk must have 2 radius neighbours loaded before this can happen.
|
|
+ */
|
|
+ public final PlayerAreaMap tickMap;
|
|
+
|
|
+ /**
|
|
+ * -1 if defaulting to [load distance], else always in [2, load distance]
|
|
+ */
|
|
+ protected int rawSendDistance = -1;
|
|
+
|
|
+ /**
|
|
+ * -1 if defaulting to [tick view distance + 1], else always in [tick view distance + 1, 32 + 1]
|
|
+ */
|
|
+ protected int rawLoadDistance = -1;
|
|
+
|
|
+ /**
|
|
+ * Never -1, always in [2, 32]
|
|
+ */
|
|
+ protected int rawTickDistance = -1;
|
|
+
|
|
+ // methods to bridge for API
|
|
+
|
|
+ public int getTargetTickViewDistance() {
|
|
+ return this.getTickDistance();
|
|
+ }
|
|
+
|
|
+ public void setTargetTickViewDistance(final int distance) {
|
|
+ this.setTickDistance(distance);
|
|
+ }
|
|
+
|
|
+ public int getTargetNoTickViewDistance() {
|
|
+ return this.getLoadDistance() - 1;
|
|
+ }
|
|
+
|
|
+ public void setTargetNoTickViewDistance(final int distance) {
|
|
+ this.setLoadDistance(distance == -1 ? -1 : distance + 1);
|
|
+ }
|
|
+
|
|
+ public int getTargetSendDistance() {
|
|
+ return this.rawSendDistance == -1 ? this.getLoadDistance() : this.rawSendDistance;
|
|
+ }
|
|
+
|
|
+ public void setTargetSendDistance(final int distance) {
|
|
+ this.setSendDistance(distance);
|
|
+ }
|
|
+
|
|
+ // internal methods
|
|
+
|
|
+ public int getSendDistance() {
|
|
+ final int loadDistance = this.getLoadDistance();
|
|
+ return this.rawSendDistance == -1 ? loadDistance : Math.min(this.rawSendDistance, loadDistance);
|
|
+ }
|
|
+
|
|
+ public void setSendDistance(final int distance) {
|
|
+ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) {
|
|
+ throw new IllegalArgumentException("Send distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + ", or -1, got: " + distance);
|
|
+ }
|
|
+ this.rawSendDistance = distance;
|
|
+ }
|
|
+
|
|
+ public int getLoadDistance() {
|
|
+ final int tickDistance = this.getTickDistance();
|
|
+ return this.rawLoadDistance == -1 ? tickDistance + 1 : Math.max(tickDistance + 1, this.rawLoadDistance);
|
|
+ }
|
|
+
|
|
+ public void setLoadDistance(final int distance) {
|
|
+ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) {
|
|
+ throw new IllegalArgumentException("Load distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + ", or -1, got: " + distance);
|
|
+ }
|
|
+ this.rawLoadDistance = distance;
|
|
+ }
|
|
+
|
|
+ public int getTickDistance() {
|
|
+ return this.rawTickDistance;
|
|
+ }
|
|
+
|
|
+ public void setTickDistance(final int distance) {
|
|
+ if (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE) {
|
|
+ throw new IllegalArgumentException("View distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + ", got: " + distance);
|
|
+ }
|
|
+ this.rawTickDistance = distance;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ Players have 3 different types of view distance:
|
|
+ 1. Sending view distance
|
|
+ 2. Loading view distance
|
|
+ 3. Ticking view distance
|
|
+
|
|
+ But for configuration purposes (and API) there are:
|
|
+ 1. No-tick view distance
|
|
+ 2. Tick view distance
|
|
+ 3. Broadcast view distance
|
|
+
|
|
+ These aren't always the same as the types we represent internally.
|
|
+
|
|
+ Loading view distance is always max(no-tick + 1, tick + 1)
|
|
+ - no-tick has 1 added because clients need an extra radius to render chunks
|
|
+ - tick has 1 added because it needs an extra radius of chunks to load before they can be marked ticking
|
|
+
|
|
+ Loading view distance is defined as the radius of chunks that will be brought to send-ready status, which means
|
|
+ it loads chunks in radius load-view-distance + 1.
|
|
+
|
|
+ The maximum value for send view distance is the load view distance. API can set it lower.
|
|
+ */
|
|
+
|
|
+ public PlayerChunkLoader(final ChunkMap chunkMap, final PooledLinkedHashSets<ServerPlayer> pooledHashSets) {
|
|
+ this.chunkMap = chunkMap;
|
|
+ this.broadcastMap = new PlayerAreaMap(pooledHashSets,
|
|
+ null,
|
|
+ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
|
|
+ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet<ServerPlayer> newState) -> {
|
|
+ PlayerChunkLoader.this.onChunkLeave(player, rangeX, rangeZ);
|
|
+ });
|
|
+ this.loadMap = new PlayerAreaMap(pooledHashSets,
|
|
+ null,
|
|
+ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
|
|
+ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet<ServerPlayer> newState) -> {
|
|
+ if (newState != null) {
|
|
+ return;
|
|
+ }
|
|
+ PlayerChunkLoader.this.isTargetedForPlayerLoad.remove(CoordinateUtils.getChunkKey(rangeX, rangeZ));
|
|
+ });
|
|
+ this.loadTicketCleanup = new PlayerAreaMap(pooledHashSets,
|
|
+ null,
|
|
+ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
|
|
+ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet<ServerPlayer> newState) -> {
|
|
+ if (newState != null) {
|
|
+ return;
|
|
+ }
|
|
+ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ);
|
|
+ PlayerChunkLoader.this.chunkMap.level.getChunkSource().removeTicketAtLevel(TicketType.PLAYER, chunkPos, LOADED_TICKET_LEVEL, chunkPos);
|
|
+ if (PlayerChunkLoader.this.chunkTicketTracker.remove(chunkPos.toLong())) {
|
|
+ --PlayerChunkLoader.this.concurrentChunkLoads;
|
|
+ }
|
|
+ });
|
|
+ this.tickMap = new PlayerAreaMap(pooledHashSets,
|
|
+ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
|
|
+ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet<ServerPlayer> newState) -> {
|
|
+ if (newState.size() != 1) {
|
|
+ return;
|
|
+ }
|
|
+ LevelChunk chunk = PlayerChunkLoader.this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(rangeX, rangeZ);
|
|
+ if (chunk == null || !chunk.areNeighboursLoaded(2)) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ);
|
|
+ PlayerChunkLoader.this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos);
|
|
+ },
|
|
+ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ,
|
|
+ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet<ServerPlayer> newState) -> {
|
|
+ if (newState != null) {
|
|
+ return;
|
|
+ }
|
|
+ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ);
|
|
+ PlayerChunkLoader.this.chunkMap.level.getChunkSource().removeTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos);
|
|
+ });
|
|
+ }
|
|
+
|
|
+ protected final LongOpenHashSet isTargetedForPlayerLoad = new LongOpenHashSet();
|
|
+ protected final LongOpenHashSet chunkTicketTracker = new LongOpenHashSet();
|
|
+
|
|
+ public boolean isChunkNearPlayers(final int chunkX, final int chunkZ) {
|
|
+ final PooledLinkedHashSets.PooledObjectLinkedOpenHashSet<ServerPlayer> playersInSendRange = this.broadcastMap.getObjectsInRange(chunkX, chunkZ);
|
|
+
|
|
+ return playersInSendRange != null;
|
|
+ }
|
|
+
|
|
+ public void onChunkPostProcessing(final int chunkX, final int chunkZ) {
|
|
+ this.onChunkSendReady(chunkX, chunkZ);
|
|
+ }
|
|
+
|
|
+ private boolean chunkNeedsPostProcessing(final int chunkX, final int chunkZ) {
|
|
+ final long key = CoordinateUtils.getChunkKey(chunkX, chunkZ);
|
|
+ final ChunkHolder chunk = this.chunkMap.getVisibleChunkIfPresent(key);
|
|
+
|
|
+ if (chunk == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final LevelChunk levelChunk = chunk.getSendingChunk();
|
|
+
|
|
+ return levelChunk != null && !levelChunk.isPostProcessingDone;
|
|
+ }
|
|
+
|
|
+ // rets whether the chunk is at a loaded stage that is ready to be sent to players
|
|
+ public boolean isChunkPlayerLoaded(final int chunkX, final int chunkZ) {
|
|
+ final long key = CoordinateUtils.getChunkKey(chunkX, chunkZ);
|
|
+ final ChunkHolder chunk = this.chunkMap.getVisibleChunkIfPresent(key);
|
|
+
|
|
+ if (chunk == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final LevelChunk levelChunk = chunk.getSendingChunk();
|
|
+
|
|
+ return levelChunk != null && levelChunk.isPostProcessingDone && this.isTargetedForPlayerLoad.contains(key);
|
|
+ }
|
|
+
|
|
+ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ, final boolean borderOnly) {
|
|
+ return borderOnly ? this.isChunkSentBorderOnly(player, chunkX, chunkZ) : this.isChunkSent(player, chunkX, chunkZ);
|
|
+ }
|
|
+
|
|
+ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ) {
|
|
+ final PlayerLoaderData data = this.playerMap.get(player);
|
|
+ if (data == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return data.hasSentChunk(chunkX, chunkZ);
|
|
+ }
|
|
+
|
|
+ public boolean isChunkSentBorderOnly(final ServerPlayer player, final int chunkX, final int chunkZ) {
|
|
+ final PlayerLoaderData data = this.playerMap.get(player);
|
|
+ if (data == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final boolean center = data.hasSentChunk(chunkX, chunkZ);
|
|
+ if (!center) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return !(data.hasSentChunk(chunkX - 1, chunkZ) && data.hasSentChunk(chunkX + 1, chunkZ) &&
|
|
+ data.hasSentChunk(chunkX, chunkZ - 1) && data.hasSentChunk(chunkX, chunkZ + 1));
|
|
+ }
|
|
+
|
|
+ protected int getMaxConcurrentChunkSends() {
|
|
+ return GlobalConfiguration.get().chunkLoading.maxConcurrentSends;
|
|
+ }
|
|
+
|
|
+ protected int getMaxChunkLoads() {
|
|
+ double config = GlobalConfiguration.get().chunkLoading.playerMaxConcurrentLoads;
|
|
+ double max = GlobalConfiguration.get().chunkLoading.globalMaxConcurrentLoads;
|
|
+ return (int)Math.ceil(Math.min(config * MinecraftServer.getServer().getPlayerCount(), max <= 1.0 ? Double.MAX_VALUE : max));
|
|
+ }
|
|
+
|
|
+ protected long getTargetSendPerPlayerAddend() {
|
|
+ return GlobalConfiguration.get().chunkLoading.targetPlayerChunkSendRate <= 1.0 ? 0L : (long)Math.round(1.0e9 / GlobalConfiguration.get().chunkLoading.targetPlayerChunkSendRate);
|
|
+ }
|
|
+
|
|
+ protected long getMaxSendAddend() {
|
|
+ return GlobalConfiguration.get().chunkLoading.globalMaxChunkSendRate <= 1.0 ? 0L : (long)Math.round(1.0e9 / GlobalConfiguration.get().chunkLoading.globalMaxChunkSendRate);
|
|
+ }
|
|
+
|
|
+ public void onChunkPlayerTickReady(final int chunkX, final int chunkZ) {
|
|
+ final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
|
|
+ this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos);
|
|
+ }
|
|
+
|
|
+ public void onChunkSendReady(final int chunkX, final int chunkZ) {
|
|
+ final PooledLinkedHashSets.PooledObjectLinkedOpenHashSet<ServerPlayer> playersInSendRange = this.broadcastMap.getObjectsInRange(chunkX, chunkZ);
|
|
+
|
|
+ if (playersInSendRange == null) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final Object[] rawData = playersInSendRange.getBackingSet();
|
|
+ for (int i = 0, len = rawData.length; i < len; ++i) {
|
|
+ final Object raw = rawData[i];
|
|
+
|
|
+ if (!(raw instanceof ServerPlayer)) {
|
|
+ continue;
|
|
+ }
|
|
+ this.onChunkSendReady((ServerPlayer)raw, chunkX, chunkZ);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void onChunkSendReady(final ServerPlayer player, final int chunkX, final int chunkZ) {
|
|
+ final PlayerLoaderData data = this.playerMap.get(player);
|
|
+
|
|
+ if (data == null) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (data.hasSentChunk(chunkX, chunkZ) || !this.isChunkPlayerLoaded(chunkX, chunkZ)) {
|
|
+ // if we don't have player tickets, then the load logic will pick this up and queue to send
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!data.chunksToBeSent.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
|
|
+ // don't queue to send, we don't want the chunk
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final long playerPos = this.broadcastMap.getLastCoordinate(player);
|
|
+ final int playerChunkX = CoordinateUtils.getChunkX(playerPos);
|
|
+ final int playerChunkZ = CoordinateUtils.getChunkZ(playerPos);
|
|
+ final int manhattanDistance = Math.abs(playerChunkX - chunkX) + Math.abs(playerChunkZ - chunkZ);
|
|
+
|
|
+ final ChunkPriorityHolder holder = new ChunkPriorityHolder(chunkX, chunkZ, manhattanDistance, 0.0);
|
|
+ data.sendQueue.add(holder);
|
|
+ }
|
|
+
|
|
+ public void onChunkLoad(final int chunkX, final int chunkZ) {
|
|
+ if (this.chunkTicketTracker.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
|
|
+ --this.concurrentChunkLoads;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void onChunkLeave(final ServerPlayer player, final int chunkX, final int chunkZ) {
|
|
+ final PlayerLoaderData data = this.playerMap.get(player);
|
|
+
|
|
+ if (data == null) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ data.unloadChunk(chunkX, chunkZ);
|
|
+ }
|
|
+
|
|
+ public void addPlayer(final ServerPlayer player) {
|
|
+ TickThread.ensureTickThread("Cannot add player async");
|
|
+ if (!player.isRealPlayer) {
|
|
+ return;
|
|
+ }
|
|
+ final PlayerLoaderData data = new PlayerLoaderData(player, this);
|
|
+ if (this.playerMap.putIfAbsent(player, data) == null) {
|
|
+ data.update();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void removePlayer(final ServerPlayer player) {
|
|
+ TickThread.ensureTickThread("Cannot remove player async");
|
|
+ if (!player.isRealPlayer) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final PlayerLoaderData loaderData = this.playerMap.remove(player);
|
|
+ if (loaderData == null) {
|
|
+ return;
|
|
+ }
|
|
+ loaderData.remove();
|
|
+ this.chunkLoadQueue.remove(loaderData);
|
|
+ this.chunkSendQueue.remove(loaderData);
|
|
+ this.chunkSendWaitQueue.remove(loaderData);
|
|
+ synchronized (this.sendingChunkCounts) {
|
|
+ final int count = this.sendingChunkCounts.removeInt(loaderData);
|
|
+ if (count != 0) {
|
|
+ concurrentChunkSends.getAndAdd(-count);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void updatePlayer(final ServerPlayer player) {
|
|
+ TickThread.ensureTickThread("Cannot update player async");
|
|
+ if (!player.isRealPlayer) {
|
|
+ return;
|
|
+ }
|
|
+ final PlayerLoaderData loaderData = this.playerMap.get(player);
|
|
+ if (loaderData != null) {
|
|
+ loaderData.update();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public PlayerLoaderData getData(final ServerPlayer player) {
|
|
+ return this.playerMap.get(player);
|
|
+ }
|
|
+
|
|
+ public void tick() {
|
|
+ TickThread.ensureTickThread("Cannot tick async");
|
|
+ for (final PlayerLoaderData data : this.playerMap.values()) {
|
|
+ data.update();
|
|
+ }
|
|
+ this.tickMidTick();
|
|
+ }
|
|
+
|
|
+ protected static final AtomicInteger concurrentChunkSends = new AtomicInteger();
|
|
+ protected final Reference2IntOpenHashMap<PlayerLoaderData> sendingChunkCounts = new Reference2IntOpenHashMap<>();
|
|
+ private static long nextChunkSend;
|
|
+ private void trySendChunks() {
|
|
+ final long time = System.nanoTime();
|
|
+ if (time < nextChunkSend) {
|
|
+ return;
|
|
+ }
|
|
+ // drain entries from wait queue
|
|
+ while (!this.chunkSendWaitQueue.isEmpty()) {
|
|
+ final PlayerLoaderData data = this.chunkSendWaitQueue.first();
|
|
+
|
|
+ if (data.nextChunkSendTarget > time) {
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ this.chunkSendWaitQueue.pollFirst();
|
|
+
|
|
+ this.chunkSendQueue.add(data);
|
|
+ }
|
|
+
|
|
+ if (this.chunkSendQueue.isEmpty()) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final int maxSends = this.getMaxConcurrentChunkSends();
|
|
+ final long nextPlayerDeadline = this.getTargetSendPerPlayerAddend() + time;
|
|
+ for (;;) {
|
|
+ if (this.chunkSendQueue.isEmpty()) {
|
|
+ break;
|
|
+ }
|
|
+ final int currSends = concurrentChunkSends.get();
|
|
+ if (currSends >= maxSends) {
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (!concurrentChunkSends.compareAndSet(currSends, currSends + 1)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ // send chunk
|
|
+
|
|
+ final PlayerLoaderData data = this.chunkSendQueue.removeFirst();
|
|
+
|
|
+ final ChunkPriorityHolder queuedSend = data.sendQueue.pollFirst();
|
|
+ if (queuedSend == null) {
|
|
+ concurrentChunkSends.getAndDecrement(); // we never sent, so decrease
|
|
+ // stop iterating over players who have nothing to send
|
|
+ if (this.chunkSendQueue.isEmpty()) {
|
|
+ // nothing left
|
|
+ break;
|
|
+ }
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (!this.isChunkPlayerLoaded(queuedSend.chunkX, queuedSend.chunkZ)) {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+
|
|
+ data.nextChunkSendTarget = nextPlayerDeadline;
|
|
+ this.chunkSendWaitQueue.add(data);
|
|
+
|
|
+ synchronized (this.sendingChunkCounts) {
|
|
+ this.sendingChunkCounts.addTo(data, 1);
|
|
+ }
|
|
+
|
|
+ data.sendChunk(queuedSend.chunkX, queuedSend.chunkZ, () -> {
|
|
+ synchronized (this.sendingChunkCounts) {
|
|
+ final int count = this.sendingChunkCounts.getInt(data);
|
|
+ if (count == 0) {
|
|
+ // disconnected, so we don't need to decrement: it will be decremented for us
|
|
+ return;
|
|
+ }
|
|
+ if (count == 1) {
|
|
+ this.sendingChunkCounts.removeInt(data);
|
|
+ } else {
|
|
+ this.sendingChunkCounts.put(data, count - 1);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ concurrentChunkSends.getAndDecrement();
|
|
+ });
|
|
+
|
|
+ nextChunkSend = this.getMaxSendAddend() + time;
|
|
+ if (time < nextChunkSend) {
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected int concurrentChunkLoads;
|
|
+ // this interval prevents bursting a lot of chunk loads
|
|
+ protected static final IntervalledCounter TICKET_ADDITION_COUNTER_SHORT = new IntervalledCounter((long)(1.0e6 * 50.0)); // 50ms
|
|
+ // this interval ensures the rate is kept between ticks correctly
|
|
+ protected static final IntervalledCounter TICKET_ADDITION_COUNTER_LONG = new IntervalledCounter((long)(1.0e6 * 1000.0)); // 1000ms
|
|
+ private void tryLoadChunks() {
|
|
+ if (this.chunkLoadQueue.isEmpty()) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final int maxLoads = this.getMaxChunkLoads();
|
|
+ final long time = System.nanoTime();
|
|
+ boolean updatedCounters = false;
|
|
+ for (;;) {
|
|
+ final PlayerLoaderData data = this.chunkLoadQueue.pollFirst();
|
|
+
|
|
+ data.lastChunkLoad = time;
|
|
+
|
|
+ final ChunkPriorityHolder queuedLoad = data.loadQueue.peekFirst();
|
|
+ if (queuedLoad == null) {
|
|
+ if (this.chunkLoadQueue.isEmpty()) {
|
|
+ break;
|
|
+ }
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (!updatedCounters) {
|
|
+ updatedCounters = true;
|
|
+ TICKET_ADDITION_COUNTER_SHORT.updateCurrentTime(time);
|
|
+ TICKET_ADDITION_COUNTER_LONG.updateCurrentTime(time);
|
|
+ data.ticketAdditionCounterShort.updateCurrentTime(time);
|
|
+ data.ticketAdditionCounterLong.updateCurrentTime(time);
|
|
+ }
|
|
+
|
|
+ if (this.isChunkPlayerLoaded(queuedLoad.chunkX, queuedLoad.chunkZ)) {
|
|
+ // already loaded!
|
|
+ data.loadQueue.pollFirst(); // already loaded so we just skip
|
|
+ this.chunkLoadQueue.add(data);
|
|
+
|
|
+ // ensure the chunk is queued to send
|
|
+ this.onChunkSendReady(queuedLoad.chunkX, queuedLoad.chunkZ);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final long chunkKey = CoordinateUtils.getChunkKey(queuedLoad.chunkX, queuedLoad.chunkZ);
|
|
+
|
|
+ final double priority = queuedLoad.priority;
|
|
+ // while we do need to rate limit chunk loads, the logic for sending chunks requires that tickets are present.
|
|
+ // when chunks are loaded (i.e spawn) but do not have this player's tickets, they have to wait behind the
|
|
+ // load queue. To avoid this problem, we check early here if tickets are required to load the chunk - if they
|
|
+ // aren't required, it bypasses the limiter system.
|
|
+ boolean unloadedTargetChunk = false;
|
|
+ unloaded_check:
|
|
+ for (int dz = -1; dz <= 1; ++dz) {
|
|
+ for (int dx = -1; dx <= 1; ++dx) {
|
|
+ final int offX = queuedLoad.chunkX + dx;
|
|
+ final int offZ = queuedLoad.chunkZ + dz;
|
|
+ if (this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(offX, offZ) == null) {
|
|
+ unloadedTargetChunk = true;
|
|
+ break unloaded_check;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (unloadedTargetChunk && priority >= 0.0) {
|
|
+ // priority >= 0.0 implies rate limited chunks
|
|
+
|
|
+ final int currentChunkLoads = this.concurrentChunkLoads;
|
|
+ if (currentChunkLoads >= maxLoads || (GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate > 0 && (TICKET_ADDITION_COUNTER_SHORT.getRate() >= GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate || TICKET_ADDITION_COUNTER_LONG.getRate() >= GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate))
|
|
+ || (GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate > 0.0 && (data.ticketAdditionCounterShort.getRate() >= GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate || data.ticketAdditionCounterLong.getRate() >= GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate))) {
|
|
+ // don't poll, we didn't load it
|
|
+ this.chunkLoadQueue.add(data);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // can only poll after we decide to load
|
|
+ data.loadQueue.pollFirst();
|
|
+
|
|
+ // now that we've polled we can re-add to load queue
|
|
+ this.chunkLoadQueue.add(data);
|
|
+
|
|
+ // add necessary tickets to load chunk up to send-ready
|
|
+ for (int dz = -1; dz <= 1; ++dz) {
|
|
+ for (int dx = -1; dx <= 1; ++dx) {
|
|
+ final int offX = queuedLoad.chunkX + dx;
|
|
+ final int offZ = queuedLoad.chunkZ + dz;
|
|
+ final ChunkPos chunkPos = new ChunkPos(offX, offZ);
|
|
+
|
|
+ this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, LOADED_TICKET_LEVEL, chunkPos);
|
|
+ if (this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(offX, offZ) != null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (priority > 0.0 && this.chunkTicketTracker.add(CoordinateUtils.getChunkKey(offX, offZ))) {
|
|
+ // won't reach here if unloadedTargetChunk is false
|
|
+ ++this.concurrentChunkLoads;
|
|
+ TICKET_ADDITION_COUNTER_SHORT.addTime(time);
|
|
+ TICKET_ADDITION_COUNTER_LONG.addTime(time);
|
|
+ data.ticketAdditionCounterShort.addTime(time);
|
|
+ data.ticketAdditionCounterLong.addTime(time);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // mark that we've added tickets here
|
|
+ this.isTargetedForPlayerLoad.add(chunkKey);
|
|
+
|
|
+ // it's possible all we needed was the player tickets to queue up the send.
|
|
+ if (this.isChunkPlayerLoaded(queuedLoad.chunkX, queuedLoad.chunkZ)) {
|
|
+ // yup, all we needed.
|
|
+ this.onChunkSendReady(queuedLoad.chunkX, queuedLoad.chunkZ);
|
|
+ } else if (this.chunkNeedsPostProcessing(queuedLoad.chunkX, queuedLoad.chunkZ)) {
|
|
+ // requires post processing
|
|
+ this.chunkMap.mainThreadExecutor.execute(() -> {
|
|
+ final long key = CoordinateUtils.getChunkKey(queuedLoad.chunkX, queuedLoad.chunkZ);
|
|
+ final ChunkHolder holder = PlayerChunkLoader.this.chunkMap.getVisibleChunkIfPresent(key);
|
|
+
|
|
+ if (holder == null) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final LevelChunk chunk = holder.getSendingChunk();
|
|
+
|
|
+ if (chunk != null && !chunk.isPostProcessingDone) {
|
|
+ chunk.postProcessGeneration();
|
|
+ }
|
|
+ });
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void tickMidTick() {
|
|
+ // try to send more chunks
|
|
+ this.trySendChunks();
|
|
+
|
|
+ // try to queue more chunks to load
|
|
+ this.tryLoadChunks();
|
|
+ }
|
|
+
|
|
+ static final class ChunkPriorityHolder {
|
|
+ public final int chunkX;
|
|
+ public final int chunkZ;
|
|
+ public final int manhattanDistanceToPlayer;
|
|
+ public final double priority;
|
|
+
|
|
+ public ChunkPriorityHolder(final int chunkX, final int chunkZ, final int manhattanDistanceToPlayer, final double priority) {
|
|
+ this.chunkX = chunkX;
|
|
+ this.chunkZ = chunkZ;
|
|
+ this.manhattanDistanceToPlayer = manhattanDistanceToPlayer;
|
|
+ this.priority = priority;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class PlayerLoaderData {
|
|
+
|
|
+ protected static final float FOV = 110.0f;
|
|
+ protected static final double PRIORITISED_DISTANCE = 12.0 * 16.0;
|
|
+
|
|
+ // Player max sprint speed is approximately 8m/s
|
|
+ protected static final double LOOK_PRIORITY_SPEED_THRESHOLD = (10.0/20.0) * (10.0/20.0);
|
|
+ protected static final double LOOK_PRIORITY_YAW_DELTA_RECALC_THRESHOLD = 3.0f;
|
|
+
|
|
+ protected double lastLocX = Double.NEGATIVE_INFINITY;
|
|
+ protected double lastLocZ = Double.NEGATIVE_INFINITY;
|
|
+
|
|
+ protected int lastChunkX = Integer.MIN_VALUE;
|
|
+ protected int lastChunkZ = Integer.MIN_VALUE;
|
|
+
|
|
+ // this is corrected so that 0 is along the positive x-axis
|
|
+ protected float lastYaw = Float.NEGATIVE_INFINITY;
|
|
+
|
|
+ protected int lastSendDistance = Integer.MIN_VALUE;
|
|
+ protected int lastLoadDistance = Integer.MIN_VALUE;
|
|
+ protected int lastTickDistance = Integer.MIN_VALUE;
|
|
+ protected boolean usingLookingPriority;
|
|
+
|
|
+ protected final ServerPlayer player;
|
|
+ protected final PlayerChunkLoader loader;
|
|
+
|
|
+ // warning: modifications of this field must be aware that the loadQueue inside PlayerChunkLoader uses this field
|
|
+ // in a comparator!
|
|
+ protected final ArrayDeque<ChunkPriorityHolder> loadQueue = new ArrayDeque<>();
|
|
+ protected final LongOpenHashSet sentChunks = new LongOpenHashSet();
|
|
+ protected final LongOpenHashSet chunksToBeSent = new LongOpenHashSet();
|
|
+
|
|
+ protected final TreeSet<ChunkPriorityHolder> sendQueue = new TreeSet<>((final ChunkPriorityHolder p1, final ChunkPriorityHolder p2) -> {
|
|
+ final int distanceCompare = Integer.compare(p1.manhattanDistanceToPlayer, p2.manhattanDistanceToPlayer);
|
|
+ if (distanceCompare != 0) {
|
|
+ return distanceCompare;
|
|
+ }
|
|
+
|
|
+ final int coordinateXCompare = Integer.compare(p1.chunkX, p2.chunkX);
|
|
+ if (coordinateXCompare != 0) {
|
|
+ return coordinateXCompare;
|
|
+ }
|
|
+
|
|
+ return Integer.compare(p1.chunkZ, p2.chunkZ);
|
|
+ });
|
|
+
|
|
+ protected int sendViewDistance = -1;
|
|
+ protected int loadViewDistance = -1;
|
|
+ protected int tickViewDistance = -1;
|
|
+
|
|
+ protected long nextChunkSendTarget;
|
|
+
|
|
+ // this interval prevents bursting a lot of chunk loads
|
|
+ protected final IntervalledCounter ticketAdditionCounterShort = new IntervalledCounter((long)(1.0e6 * 50.0)); // 50ms
|
|
+ // this ensures the rate is kept between ticks correctly
|
|
+ protected final IntervalledCounter ticketAdditionCounterLong = new IntervalledCounter((long)(1.0e6 * 1000.0)); // 1000ms
|
|
+
|
|
+ public long lastChunkLoad;
|
|
+
|
|
+ public PlayerLoaderData(final ServerPlayer player, final PlayerChunkLoader loader) {
|
|
+ this.player = player;
|
|
+ this.loader = loader;
|
|
+ }
|
|
+
|
|
+ // these view distance methods are for api
|
|
+ public int getTargetSendViewDistance() {
|
|
+ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
|
|
+ final int loadViewDistance = Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance);
|
|
+ final int clientViewDistance = this.getClientViewDistance();
|
|
+ final int sendViewDistance = Math.min(loadViewDistance, this.sendViewDistance == -1 ? (!GlobalConfiguration.get().chunkLoading.autoconfigSendDistance || clientViewDistance == -1 ? this.loader.getSendDistance() : clientViewDistance + 1) : this.sendViewDistance);
|
|
+ return sendViewDistance;
|
|
+ }
|
|
+
|
|
+ public void setTargetSendViewDistance(final int distance) {
|
|
+ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) {
|
|
+ throw new IllegalArgumentException("Send view distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance);
|
|
+ }
|
|
+ this.sendViewDistance = distance;
|
|
+ }
|
|
+
|
|
+ public int getTargetNoTickViewDistance() {
|
|
+ return (this.loadViewDistance == -1 ? this.getLoadDistance() : this.loadViewDistance) - 1;
|
|
+ }
|
|
+
|
|
+ public void setTargetNoTickViewDistance(final int distance) {
|
|
+ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE)) {
|
|
+ throw new IllegalArgumentException("Simulation distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + " or -1, got: " + distance);
|
|
+ }
|
|
+ this.loadViewDistance = distance == -1 ? -1 : distance + 1;
|
|
+ }
|
|
+
|
|
+ public int getTargetTickViewDistance() {
|
|
+ return this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
|
|
+ }
|
|
+
|
|
+ public void setTargetTickViewDistance(final int distance) {
|
|
+ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE)) {
|
|
+ throw new IllegalArgumentException("View distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + " or -1, got: " + distance);
|
|
+ }
|
|
+ this.tickViewDistance = distance;
|
|
+ }
|
|
+
|
|
+ protected int getLoadDistance() {
|
|
+ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
|
|
+
|
|
+ return Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance);
|
|
+ }
|
|
+
|
|
+ public boolean hasSentChunk(final int chunkX, final int chunkZ) {
|
|
+ return this.sentChunks.contains(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ }
|
|
+
|
|
+ public void sendChunk(final int chunkX, final int chunkZ, final Runnable onChunkSend) {
|
|
+ if (this.sentChunks.add(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
|
|
+ this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player,
|
|
+ new ChunkPos(chunkX, chunkZ), new MutableObject<>(), false, true); // unloaded, loaded
|
|
+ this.player.connection.connection.execute(onChunkSend);
|
|
+ } else {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void unloadChunk(final int chunkX, final int chunkZ) {
|
|
+ if (this.sentChunks.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) {
|
|
+ this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player,
|
|
+ new ChunkPos(chunkX, chunkZ), null, true, false); // unloaded, loaded
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static boolean wantChunkLoaded(final int centerX, final int centerZ, final int chunkX, final int chunkZ,
|
|
+ final int sendRadius) {
|
|
+ // expect sendRadius to be = 1 + target viewable radius
|
|
+ return ChunkMap.isChunkInRange(chunkX, chunkZ, centerX, centerZ, sendRadius);
|
|
+ }
|
|
+
|
|
+ protected static boolean triangleIntersects(final double p1x, final double p1z, // triangle point
|
|
+ final double p2x, final double p2z, // triangle point
|
|
+ final double p3x, final double p3z, // triangle point
|
|
+
|
|
+ final double targetX, final double targetZ) { // point
|
|
+ // from barycentric coordinates:
|
|
+ // targetX = a*p1x + b*p2x + c*p3x
|
|
+ // targetZ = a*p1z + b*p2z + c*p3z
|
|
+ // 1.0 = a*1.0 + b*1.0 + c*1.0
|
|
+ // where a, b, c >= 0.0
|
|
+ // so, if any of a, b, c are less-than zero then there is no intersection.
|
|
+
|
|
+ // d = ((p2z - p3z)(p1x - p3x) + (p3x - p2x)(p1z - p3z))
|
|
+ // a = ((p2z - p3z)(targetX - p3x) + (p3x - p2x)(targetZ - p3z)) / d
|
|
+ // b = ((p3z - p1z)(targetX - p3x) + (p1x - p3x)(targetZ - p3z)) / d
|
|
+ // c = 1.0 - a - b
|
|
+
|
|
+ final double d = (p2z - p3z)*(p1x - p3x) + (p3x - p2x)*(p1z - p3z);
|
|
+ final double a = ((p2z - p3z)*(targetX - p3x) + (p3x - p2x)*(targetZ - p3z)) / d;
|
|
+
|
|
+ if (a < 0.0 || a > 1.0) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final double b = ((p3z - p1z)*(targetX - p3x) + (p1x - p3x)*(targetZ - p3z)) / d;
|
|
+ if (b < 0.0 || b > 1.0) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final double c = 1.0 - a - b;
|
|
+
|
|
+ return c >= 0.0 && c <= 1.0;
|
|
+ }
|
|
+
|
|
+ public void remove() {
|
|
+ this.loader.broadcastMap.remove(this.player);
|
|
+ this.loader.loadMap.remove(this.player);
|
|
+ this.loader.loadTicketCleanup.remove(this.player);
|
|
+ this.loader.tickMap.remove(this.player);
|
|
+ }
|
|
+
|
|
+ protected int getClientViewDistance() {
|
|
+ return this.player.clientViewDistance == null ? -1 : Math.max(0, this.player.clientViewDistance.intValue());
|
|
+ }
|
|
+
|
|
+ public void update() {
|
|
+ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance;
|
|
+ // load view cannot be less-than tick view + 1
|
|
+ final int loadViewDistance = Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance);
|
|
+ // send view cannot be greater-than load view
|
|
+ final int clientViewDistance = this.getClientViewDistance();
|
|
+ final int sendViewDistance = Math.min(loadViewDistance, this.sendViewDistance == -1 ? (!GlobalConfiguration.get().chunkLoading.autoconfigSendDistance || clientViewDistance == -1 ? this.loader.getSendDistance() : clientViewDistance + 1) : this.sendViewDistance);
|
|
+
|
|
+ final double posX = this.player.getX();
|
|
+ final double posZ = this.player.getZ();
|
|
+ final float yaw = MCUtil.normalizeYaw(this.player.getYRot() + 90.0f); // mc yaw 0 is along the positive z axis, but obviously this is really dumb - offset so we are at positive x-axis
|
|
+
|
|
+ // in general, we really only want to prioritise chunks in front if we know we're moving pretty fast into them.
|
|
+ final boolean useLookPriority = GlobalConfiguration.get().chunkLoading.enableFrustumPriority && (this.player.getDeltaMovement().horizontalDistanceSqr() > LOOK_PRIORITY_SPEED_THRESHOLD ||
|
|
+ this.player.getAbilities().flying);
|
|
+
|
|
+ // make sure we're in the send queue
|
|
+ this.loader.chunkSendWaitQueue.add(this);
|
|
+
|
|
+ if (
|
|
+ // has view distance stayed the same?
|
|
+ sendViewDistance == this.lastSendDistance
|
|
+ && loadViewDistance == this.lastLoadDistance
|
|
+ && tickViewDistance == this.lastTickDistance
|
|
+
|
|
+ && (this.usingLookingPriority ? (
|
|
+ // has our block stayed the same (this also accounts for chunk change)?
|
|
+ Mth.floor(this.lastLocX) == Mth.floor(posX)
|
|
+ && Mth.floor(this.lastLocZ) == Mth.floor(posZ)
|
|
+ ) : (
|
|
+ // has our chunk stayed the same
|
|
+ (Mth.floor(this.lastLocX) >> 4) == (Mth.floor(posX) >> 4)
|
|
+ && (Mth.floor(this.lastLocZ) >> 4) == (Mth.floor(posZ) >> 4)
|
|
+ ))
|
|
+
|
|
+ // has our decision about look priority changed?
|
|
+ && this.usingLookingPriority == useLookPriority
|
|
+
|
|
+ // if we are currently using look priority, has our yaw stayed within recalc threshold?
|
|
+ && (!this.usingLookingPriority || Math.abs(yaw - this.lastYaw) <= LOOK_PRIORITY_YAW_DELTA_RECALC_THRESHOLD)
|
|
+ ) {
|
|
+ // nothing we care about changed, so we're not re-calculating
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final int centerChunkX = Mth.floor(posX) >> 4;
|
|
+ final int centerChunkZ = Mth.floor(posZ) >> 4;
|
|
+
|
|
+ final boolean needsChunkCenterUpdate = (centerChunkX != this.lastChunkX) || (centerChunkZ != this.lastChunkZ);
|
|
+ this.loader.broadcastMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, sendViewDistance);
|
|
+ this.loader.loadMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, loadViewDistance);
|
|
+ this.loader.loadTicketCleanup.addOrUpdate(this.player, centerChunkX, centerChunkZ, loadViewDistance + 1);
|
|
+ this.loader.tickMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, tickViewDistance);
|
|
+
|
|
+ if (sendViewDistance != this.lastSendDistance) {
|
|
+ // update the view radius for client
|
|
+ // note that this should be after the map calls because the client wont expect unload calls not in its VD
|
|
+ // and it's possible we decreased VD here
|
|
+ this.player.connection.send(new ClientboundSetChunkCacheRadiusPacket(sendViewDistance));
|
|
+ }
|
|
+ if (tickViewDistance != this.lastTickDistance) {
|
|
+ this.player.connection.send(new ClientboundSetSimulationDistancePacket(tickViewDistance));
|
|
+ }
|
|
+
|
|
+ this.lastLocX = posX;
|
|
+ this.lastLocZ = posZ;
|
|
+ this.lastYaw = yaw;
|
|
+ this.lastSendDistance = sendViewDistance;
|
|
+ this.lastLoadDistance = loadViewDistance;
|
|
+ this.lastTickDistance = tickViewDistance;
|
|
+ this.usingLookingPriority = useLookPriority;
|
|
+
|
|
+ this.lastChunkX = centerChunkX;
|
|
+ this.lastChunkZ = centerChunkZ;
|
|
+
|
|
+ // points for player "view" triangle:
|
|
+
|
|
+ // obviously, the player pos is a vertex
|
|
+ final double p1x = posX;
|
|
+ final double p1z = posZ;
|
|
+
|
|
+ // to the left of the looking direction
|
|
+ final double p2x = PRIORITISED_DISTANCE * Math.cos(Math.toRadians(yaw + (double)(FOV / 2.0))) // calculate rotated vector
|
|
+ + p1x; // offset vector
|
|
+ final double p2z = PRIORITISED_DISTANCE * Math.sin(Math.toRadians(yaw + (double)(FOV / 2.0))) // calculate rotated vector
|
|
+ + p1z; // offset vector
|
|
+
|
|
+ // to the right of the looking direction
|
|
+ final double p3x = PRIORITISED_DISTANCE * Math.cos(Math.toRadians(yaw - (double)(FOV / 2.0))) // calculate rotated vector
|
|
+ + p1x; // offset vector
|
|
+ final double p3z = PRIORITISED_DISTANCE * Math.sin(Math.toRadians(yaw - (double)(FOV / 2.0))) // calculate rotated vector
|
|
+ + p1z; // offset vector
|
|
+
|
|
+ // now that we have all of our points, we can recalculate the load queue
|
|
+
|
|
+ final List<ChunkPriorityHolder> loadQueue = new ArrayList<>();
|
|
+
|
|
+ // clear send queue, we are re-sorting
|
|
+ this.sendQueue.clear();
|
|
+ // clear chunk want set, vd/position might have changed
|
|
+ this.chunksToBeSent.clear();
|
|
+
|
|
+ final int searchViewDistance = Math.max(loadViewDistance, sendViewDistance);
|
|
+
|
|
+ for (int dx = -searchViewDistance; dx <= searchViewDistance; ++dx) {
|
|
+ for (int dz = -searchViewDistance; dz <= searchViewDistance; ++dz) {
|
|
+ final int chunkX = dx + centerChunkX;
|
|
+ final int chunkZ = dz + centerChunkZ;
|
|
+ final int squareDistance = Math.max(Math.abs(dx), Math.abs(dz));
|
|
+ final boolean sendChunk = squareDistance <= sendViewDistance && wantChunkLoaded(centerChunkX, centerChunkZ, chunkX, chunkZ, sendViewDistance);
|
|
+
|
|
+ if (this.hasSentChunk(chunkX, chunkZ)) {
|
|
+ // already sent (which means it is also loaded)
|
|
+ if (!sendChunk) {
|
|
+ // have sent the chunk, but don't want it anymore
|
|
+ // unload it now
|
|
+ this.unloadChunk(chunkX, chunkZ);
|
|
+ }
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final boolean loadChunk = squareDistance <= loadViewDistance;
|
|
+
|
|
+ final boolean prioritised = useLookPriority && triangleIntersects(
|
|
+ // prioritisation triangle
|
|
+ p1x, p1z, p2x, p2z, p3x, p3z,
|
|
+
|
|
+ // center of chunk
|
|
+ (double)((chunkX << 4) | 8), (double)((chunkZ << 4) | 8)
|
|
+ );
|
|
+
|
|
+ final int manhattanDistance = Math.abs(dx) + Math.abs(dz);
|
|
+
|
|
+ final double priority;
|
|
+
|
|
+ if (squareDistance <= GlobalConfiguration.get().chunkLoading.minLoadRadius) {
|
|
+ // priority should be negative, and we also want to order it from center outwards
|
|
+ // so we want (0,0) to be the smallest, and (minLoadedRadius,minLoadedRadius) to be the greatest
|
|
+ priority = -((2 * GlobalConfiguration.get().chunkLoading.minLoadRadius + 1) - manhattanDistance);
|
|
+ } else {
|
|
+ if (prioritised) {
|
|
+ // we don't prioritise these chunks above others because we also want to make sure some chunks
|
|
+ // will be loaded if the player changes direction
|
|
+ priority = (double)manhattanDistance / 6.0;
|
|
+ } else {
|
|
+ priority = (double)manhattanDistance;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final ChunkPriorityHolder holder = new ChunkPriorityHolder(chunkX, chunkZ, manhattanDistance, priority);
|
|
+
|
|
+ if (!this.loader.isChunkPlayerLoaded(chunkX, chunkZ)) {
|
|
+ if (loadChunk) {
|
|
+ loadQueue.add(holder);
|
|
+ if (sendChunk) {
|
|
+ this.chunksToBeSent.add(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ // loaded but not sent: so queue it!
|
|
+ if (sendChunk) {
|
|
+ this.sendQueue.add(holder);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ loadQueue.sort((final ChunkPriorityHolder p1, final ChunkPriorityHolder p2) -> {
|
|
+ return Double.compare(p1.priority, p2.priority);
|
|
+ });
|
|
+
|
|
+ // we're modifying loadQueue, must remove
|
|
+ this.loader.chunkLoadQueue.remove(this);
|
|
+
|
|
+ this.loadQueue.clear();
|
|
+ this.loadQueue.addAll(loadQueue);
|
|
+
|
|
+ // must re-add
|
|
+ this.loader.chunkLoadQueue.add(this);
|
|
+
|
|
+ // update the chunk center
|
|
+ // this must be done last so that the client does not ignore any of our unload chunk packets
|
|
+ if (needsChunkCenterUpdate) {
|
|
+ this.player.connection.send(new ClientboundSetChunkCacheCenterPacket(centerChunkX, centerChunkZ));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
|
|
index 8a5e93961dac4d87c81c0e70b6f4124a1f1d2556..0dc94dec1317b3f86d38074c6cbe41ab828cab1d 100644
|
|
--- a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java
|
|
@@ -31,191 +31,41 @@ public final class ChunkSystem {
|
|
}
|
|
|
|
public static void scheduleChunkTask(final ServerLevel level, final int chunkX, final int chunkZ, final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
- level.chunkSource.mainThreadProcessor.execute(run);
|
|
+ level.chunkTaskScheduler.scheduleChunkTask(chunkX, chunkZ, run, priority); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public static void scheduleChunkLoad(final ServerLevel level, final int chunkX, final int chunkZ, final boolean gen,
|
|
final ChunkStatus toStatus, final boolean addTicket, final PrioritisedExecutor.Priority priority,
|
|
final Consumer<ChunkAccess> onComplete) {
|
|
- if (gen) {
|
|
- scheduleChunkLoad(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
|
|
- return;
|
|
- }
|
|
- scheduleChunkLoad(level, chunkX, chunkZ, ChunkStatus.EMPTY, addTicket, priority, (final ChunkAccess chunk) -> {
|
|
- if (chunk == null) {
|
|
- onComplete.accept(null);
|
|
- } else {
|
|
- if (chunk.getStatus().isOrAfter(toStatus)) {
|
|
- scheduleChunkLoad(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
|
|
- } else {
|
|
- onComplete.accept(null);
|
|
- }
|
|
- }
|
|
- });
|
|
+ level.chunkTaskScheduler.scheduleChunkLoad(chunkX, chunkZ, gen, toStatus, addTicket, priority, onComplete); // Paper - rewrite chunk system
|
|
}
|
|
|
|
- static final TicketType<Long> CHUNK_LOAD = TicketType.create("chunk_load", Long::compareTo);
|
|
-
|
|
- private static long chunkLoadCounter = 0L;
|
|
+ // Paper - rewrite chunk system
|
|
public static void scheduleChunkLoad(final ServerLevel level, final int chunkX, final int chunkZ, final ChunkStatus toStatus,
|
|
final boolean addTicket, final PrioritisedExecutor.Priority priority, final Consumer<ChunkAccess> onComplete) {
|
|
- if (!Bukkit.isPrimaryThread()) {
|
|
- scheduleChunkTask(level, chunkX, chunkZ, () -> {
|
|
- scheduleChunkLoad(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
|
|
- }, priority);
|
|
- return;
|
|
- }
|
|
-
|
|
- final int minLevel = 33 + ChunkStatus.getDistance(toStatus);
|
|
- final Long chunkReference = addTicket ? Long.valueOf(++chunkLoadCounter) : null;
|
|
- final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
|
|
-
|
|
- if (addTicket) {
|
|
- level.chunkSource.addTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference);
|
|
- }
|
|
- level.chunkSource.runDistanceManagerUpdates();
|
|
-
|
|
- final Consumer<ChunkAccess> loadCallback = (final ChunkAccess chunk) -> {
|
|
- try {
|
|
- if (onComplete != null) {
|
|
- onComplete.accept(chunk);
|
|
- }
|
|
- } catch (final ThreadDeath death) {
|
|
- throw death;
|
|
- } catch (final Throwable thr) {
|
|
- LOGGER.error("Exception handling chunk load callback", thr);
|
|
- SneakyThrow.sneaky(thr);
|
|
- } finally {
|
|
- if (addTicket) {
|
|
- level.chunkSource.addTicketAtLevel(TicketType.UNKNOWN, chunkPos, minLevel, chunkPos);
|
|
- level.chunkSource.removeTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference);
|
|
- }
|
|
- }
|
|
- };
|
|
-
|
|
- final ChunkHolder holder = level.chunkSource.chunkMap.getUpdatingChunkIfPresent(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
-
|
|
- if (holder == null || holder.getTicketLevel() > minLevel) {
|
|
- loadCallback.accept(null);
|
|
- return;
|
|
- }
|
|
-
|
|
- final CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> loadFuture = holder.getOrScheduleFuture(toStatus, level.chunkSource.chunkMap);
|
|
-
|
|
- if (loadFuture.isDone()) {
|
|
- loadCallback.accept(loadFuture.join().left().orElse(null));
|
|
- return;
|
|
- }
|
|
-
|
|
- loadFuture.whenCompleteAsync((final Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either, final Throwable thr) -> {
|
|
- if (thr != null) {
|
|
- loadCallback.accept(null);
|
|
- return;
|
|
- }
|
|
- loadCallback.accept(either.left().orElse(null));
|
|
- }, (final Runnable r) -> {
|
|
- scheduleChunkTask(level, chunkX, chunkZ, r, PrioritisedExecutor.Priority.HIGHEST);
|
|
- });
|
|
+ level.chunkTaskScheduler.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public static void scheduleTickingState(final ServerLevel level, final int chunkX, final int chunkZ,
|
|
final ChunkHolder.FullChunkStatus toStatus, final boolean addTicket,
|
|
final PrioritisedExecutor.Priority priority, final Consumer<LevelChunk> onComplete) {
|
|
- if (toStatus == ChunkHolder.FullChunkStatus.INACCESSIBLE) {
|
|
- throw new IllegalArgumentException("Cannot wait for INACCESSIBLE status");
|
|
- }
|
|
-
|
|
- if (!Bukkit.isPrimaryThread()) {
|
|
- scheduleChunkTask(level, chunkX, chunkZ, () -> {
|
|
- scheduleTickingState(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
|
|
- }, priority);
|
|
- return;
|
|
- }
|
|
-
|
|
- final int minLevel = 33 - (toStatus.ordinal() - 1);
|
|
- final int radius = toStatus.ordinal() - 1;
|
|
- final Long chunkReference = addTicket ? Long.valueOf(++chunkLoadCounter) : null;
|
|
- final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
|
|
-
|
|
- if (addTicket) {
|
|
- level.chunkSource.addTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference);
|
|
- }
|
|
- level.chunkSource.runDistanceManagerUpdates();
|
|
-
|
|
- final Consumer<LevelChunk> loadCallback = (final LevelChunk chunk) -> {
|
|
- try {
|
|
- if (onComplete != null) {
|
|
- onComplete.accept(chunk);
|
|
- }
|
|
- } catch (final ThreadDeath death) {
|
|
- throw death;
|
|
- } catch (final Throwable thr) {
|
|
- LOGGER.error("Exception handling chunk load callback", thr);
|
|
- SneakyThrow.sneaky(thr);
|
|
- } finally {
|
|
- if (addTicket) {
|
|
- level.chunkSource.addTicketAtLevel(TicketType.UNKNOWN, chunkPos, minLevel, chunkPos);
|
|
- level.chunkSource.removeTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference);
|
|
- }
|
|
- }
|
|
- };
|
|
-
|
|
- final ChunkHolder holder = level.chunkSource.chunkMap.getUpdatingChunkIfPresent(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
-
|
|
- if (holder == null || holder.getTicketLevel() > minLevel) {
|
|
- loadCallback.accept(null);
|
|
- return;
|
|
- }
|
|
-
|
|
- final CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> tickingState;
|
|
- switch (toStatus) {
|
|
- case BORDER: {
|
|
- tickingState = holder.getFullChunkFuture();
|
|
- break;
|
|
- }
|
|
- case TICKING: {
|
|
- tickingState = holder.getTickingChunkFuture();
|
|
- break;
|
|
- }
|
|
- case ENTITY_TICKING: {
|
|
- tickingState = holder.getEntityTickingChunkFuture();
|
|
- break;
|
|
- }
|
|
- default: {
|
|
- throw new IllegalStateException("Cannot reach here");
|
|
- }
|
|
- }
|
|
-
|
|
- if (tickingState.isDone()) {
|
|
- loadCallback.accept(tickingState.join().left().orElse(null));
|
|
- return;
|
|
- }
|
|
-
|
|
- tickingState.whenCompleteAsync((final Either<LevelChunk, ChunkHolder.ChunkLoadingFailure> either, final Throwable thr) -> {
|
|
- if (thr != null) {
|
|
- loadCallback.accept(null);
|
|
- return;
|
|
- }
|
|
- loadCallback.accept(either.left().orElse(null));
|
|
- }, (final Runnable r) -> {
|
|
- scheduleChunkTask(level, chunkX, chunkZ, r, PrioritisedExecutor.Priority.HIGHEST);
|
|
- });
|
|
+ level.chunkTaskScheduler.scheduleTickingState(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public static List<ChunkHolder> getVisibleChunkHolders(final ServerLevel level) {
|
|
- return new ArrayList<>(level.chunkSource.chunkMap.visibleChunkMap.values());
|
|
+ return level.chunkTaskScheduler.chunkHolderManager.getOldChunkHolders(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public static List<ChunkHolder> getUpdatingChunkHolders(final ServerLevel level) {
|
|
- return new ArrayList<>(level.chunkSource.chunkMap.updatingChunkMap.values());
|
|
+ return level.chunkTaskScheduler.chunkHolderManager.getOldChunkHolders(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public static int getVisibleChunkHolderCount(final ServerLevel level) {
|
|
- return level.chunkSource.chunkMap.visibleChunkMap.size();
|
|
+ return level.chunkTaskScheduler.chunkHolderManager.size(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public static int getUpdatingChunkHolderCount(final ServerLevel level) {
|
|
- return level.chunkSource.chunkMap.updatingChunkMap.size();
|
|
+ return level.chunkTaskScheduler.chunkHolderManager.size(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public static boolean hasAnyChunkHolders(final ServerLevel level) {
|
|
@@ -269,23 +119,15 @@ public final class ChunkSystem {
|
|
}
|
|
|
|
public static int getSendViewDistance(final ServerPlayer player) {
|
|
- return getLoadViewDistance(player);
|
|
+ return io.papermc.paper.chunk.PlayerChunkLoader.getSendViewDistance(player);
|
|
}
|
|
|
|
public static int getLoadViewDistance(final ServerPlayer player) {
|
|
- final ServerLevel level = player.getLevel();
|
|
- if (level == null) {
|
|
- return Bukkit.getViewDistance() + 1;
|
|
- }
|
|
- return level.chunkSource.chunkMap.getEffectiveViewDistance() + 1;
|
|
+ return io.papermc.paper.chunk.PlayerChunkLoader.getLoadViewDistance(player);
|
|
}
|
|
|
|
public static int getTickViewDistance(final ServerPlayer player) {
|
|
- final ServerLevel level = player.getLevel();
|
|
- if (level == null) {
|
|
- return Bukkit.getSimulationDistance();
|
|
- }
|
|
- return level.chunkSource.chunkMap.distanceManager.getSimulationDistance();
|
|
+ return io.papermc.paper.chunk.PlayerChunkLoader.getTickViewDistance(player);
|
|
}
|
|
|
|
private ChunkSystem() {
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..61c170555c8854b102c640b0b6a615f9f732edbf
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java
|
|
@@ -0,0 +1,839 @@
|
|
+package io.papermc.paper.chunk.system.entity;
|
|
+
|
|
+import com.destroystokyo.paper.util.maplist.EntityList;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import io.papermc.paper.util.CoordinateUtils;
|
|
+import io.papermc.paper.util.TickThread;
|
|
+import io.papermc.paper.util.WorldUtil;
|
|
+import io.papermc.paper.world.ChunkEntitySlices;
|
|
+import it.unimi.dsi.fastutil.ints.Int2ReferenceOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.objects.Object2ReferenceOpenHashMap;
|
|
+import net.minecraft.core.BlockPos;
|
|
+import io.papermc.paper.chunk.system.ChunkSystem;
|
|
+import net.minecraft.server.level.ChunkHolder;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.util.AbortableIterationConsumer;
|
|
+import net.minecraft.util.Mth;
|
|
+import net.minecraft.world.entity.Entity;
|
|
+import net.minecraft.world.entity.EntityType;
|
|
+import net.minecraft.world.level.entity.EntityInLevelCallback;
|
|
+import net.minecraft.world.level.entity.EntityTypeTest;
|
|
+import net.minecraft.world.level.entity.LevelCallback;
|
|
+import net.minecraft.world.level.entity.LevelEntityGetter;
|
|
+import net.minecraft.world.level.entity.Visibility;
|
|
+import net.minecraft.world.phys.AABB;
|
|
+import org.jetbrains.annotations.NotNull;
|
|
+import org.jetbrains.annotations.Nullable;
|
|
+import org.slf4j.Logger;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Iterator;
|
|
+import java.util.List;
|
|
+import java.util.NoSuchElementException;
|
|
+import java.util.UUID;
|
|
+import java.util.concurrent.locks.StampedLock;
|
|
+import java.util.function.Consumer;
|
|
+import java.util.function.Predicate;
|
|
+
|
|
+public final class EntityLookup implements LevelEntityGetter<Entity> {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ protected static final int REGION_SHIFT = 5;
|
|
+ protected static final int REGION_MASK = (1 << REGION_SHIFT) - 1;
|
|
+ protected static final int REGION_SIZE = 1 << REGION_SHIFT;
|
|
+
|
|
+ public final ServerLevel world;
|
|
+
|
|
+ private final StampedLock stateLock = new StampedLock();
|
|
+ protected final Long2ObjectOpenHashMap<ChunkSlicesRegion> regions = new Long2ObjectOpenHashMap<>(128, 0.5f);
|
|
+
|
|
+ private final int minSection; // inclusive
|
|
+ private final int maxSection; // inclusive
|
|
+ private final LevelCallback<Entity> worldCallback;
|
|
+
|
|
+ private final StampedLock entityByLock = new StampedLock();
|
|
+ private final Int2ReferenceOpenHashMap<Entity> entityById = new Int2ReferenceOpenHashMap<>();
|
|
+ private final Object2ReferenceOpenHashMap<UUID, Entity> entityByUUID = new Object2ReferenceOpenHashMap<>();
|
|
+ private final EntityList accessibleEntities = new EntityList();
|
|
+
|
|
+ public EntityLookup(final ServerLevel world, final LevelCallback<Entity> worldCallback) {
|
|
+ this.world = world;
|
|
+ this.minSection = WorldUtil.getMinSection(world);
|
|
+ this.maxSection = WorldUtil.getMaxSection(world);
|
|
+ this.worldCallback = worldCallback;
|
|
+ }
|
|
+
|
|
+ private static Entity maskNonAccessible(final Entity entity) {
|
|
+ if (entity == null) {
|
|
+ return null;
|
|
+ }
|
|
+ final Visibility visibility = EntityLookup.getEntityStatus(entity);
|
|
+ return visibility.isAccessible() ? entity : null;
|
|
+ }
|
|
+
|
|
+ @Nullable
|
|
+ @Override
|
|
+ public Entity get(final int id) {
|
|
+ final long attempt = this.entityByLock.tryOptimisticRead();
|
|
+ if (attempt != 0L) {
|
|
+ try {
|
|
+ final Entity ret = this.entityById.get(id);
|
|
+
|
|
+ if (this.entityByLock.validate(attempt)) {
|
|
+ return maskNonAccessible(ret);
|
|
+ }
|
|
+ } catch (final Error error) {
|
|
+ throw error;
|
|
+ } catch (final Throwable thr) {
|
|
+ // ignore
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.entityByLock.readLock();
|
|
+ try {
|
|
+ return maskNonAccessible(this.entityById.get(id));
|
|
+ } finally {
|
|
+ this.entityByLock.tryUnlockRead();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Nullable
|
|
+ @Override
|
|
+ public Entity get(final UUID id) {
|
|
+ final long attempt = this.entityByLock.tryOptimisticRead();
|
|
+ if (attempt != 0L) {
|
|
+ try {
|
|
+ final Entity ret = this.entityByUUID.get(id);
|
|
+
|
|
+ if (this.entityByLock.validate(attempt)) {
|
|
+ return maskNonAccessible(ret);
|
|
+ }
|
|
+ } catch (final Error error) {
|
|
+ throw error;
|
|
+ } catch (final Throwable thr) {
|
|
+ // ignore
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.entityByLock.readLock();
|
|
+ try {
|
|
+ return maskNonAccessible(this.entityByUUID.get(id));
|
|
+ } finally {
|
|
+ this.entityByLock.tryUnlockRead();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean hasEntity(final UUID uuid) {
|
|
+ return this.get(uuid) != null;
|
|
+ }
|
|
+
|
|
+ public String getDebugInfo() {
|
|
+ return "count_id:" + this.entityById.size() + ",count_uuid:" + this.entityByUUID.size() + ",region_count:" + this.regions.size();
|
|
+ }
|
|
+
|
|
+ static final class ArrayIterable<T> implements Iterable<T> {
|
|
+
|
|
+ private final T[] array;
|
|
+ private final int off;
|
|
+ private final int length;
|
|
+
|
|
+ public ArrayIterable(final T[] array, final int off, final int length) {
|
|
+ this.array = array;
|
|
+ this.off = off;
|
|
+ this.length = length;
|
|
+ if (length > array.length) {
|
|
+ throw new IllegalArgumentException("Length must be no greater-than the array length");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @NotNull
|
|
+ @Override
|
|
+ public Iterator<T> iterator() {
|
|
+ return new ArrayIterator<>(this.array, this.off, this.length);
|
|
+ }
|
|
+
|
|
+ static final class ArrayIterator<T> implements Iterator<T> {
|
|
+
|
|
+ private final T[] array;
|
|
+ private int off;
|
|
+ private final int length;
|
|
+
|
|
+ public ArrayIterator(final T[] array, final int off, final int length) {
|
|
+ this.array = array;
|
|
+ this.off = off;
|
|
+ this.length = length;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean hasNext() {
|
|
+ return this.off < this.length;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public T next() {
|
|
+ if (this.off >= this.length) {
|
|
+ throw new NoSuchElementException();
|
|
+ }
|
|
+ return this.array[this.off++];
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void remove() {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Iterable<Entity> getAll() {
|
|
+ return new ArrayIterable<>(this.accessibleEntities.getRawData(), 0, this.accessibleEntities.size());
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public <U extends Entity> void get(final EntityTypeTest<Entity, U> filter, final AbortableIterationConsumer<U> action) {
|
|
+ for (final Entity entity : this.entityById.values()) {
|
|
+ final Visibility visibility = EntityLookup.getEntityStatus(entity);
|
|
+ if (!visibility.isAccessible()) {
|
|
+ continue;
|
|
+ }
|
|
+ final U casted = filter.tryCast(entity);
|
|
+ if (casted != null && action.accept(casted).shouldAbort()) {
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void get(final AABB box, final Consumer<Entity> action) {
|
|
+ List<Entity> entities = new ArrayList<>();
|
|
+ this.getEntitiesWithoutDragonParts(null, box, entities, null);
|
|
+ for (int i = 0, len = entities.size(); i < len; ++i) {
|
|
+ action.accept(entities.get(i));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public <U extends Entity> void get(final EntityTypeTest<Entity, U> filter, final AABB box, final AbortableIterationConsumer<U> action) {
|
|
+ List<Entity> entities = new ArrayList<>();
|
|
+ this.getEntitiesWithoutDragonParts(null, box, entities, null);
|
|
+ for (int i = 0, len = entities.size(); i < len; ++i) {
|
|
+ final U casted = filter.tryCast(entities.get(i));
|
|
+ if (casted != null && action.accept(casted).shouldAbort()) {
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void entityStatusChange(final Entity entity, final ChunkEntitySlices slices, final Visibility oldVisibility, final Visibility newVisibility, final boolean moved,
|
|
+ final boolean created, final boolean destroyed) {
|
|
+ TickThread.ensureTickThread(entity, "Entity status change must only happen on the main thread");
|
|
+
|
|
+ if (entity.updatingSectionStatus) {
|
|
+ // recursive status update
|
|
+ LOGGER.error("Cannot recursively update entity chunk status for entity " + entity, new Throwable());
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final boolean entityStatusUpdateBefore = slices == null ? false : slices.startPreventingStatusUpdates();
|
|
+
|
|
+ if (entityStatusUpdateBefore) {
|
|
+ LOGGER.error("Cannot update chunk status for entity " + entity + " since entity chunk (" + slices.chunkX + "," + slices.chunkZ + ") is receiving update", new Throwable());
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ final Boolean ticketBlockBefore = this.world.chunkTaskScheduler.chunkHolderManager.blockTicketUpdates();
|
|
+ try {
|
|
+ entity.updatingSectionStatus = true;
|
|
+ try {
|
|
+ if (created) {
|
|
+ EntityLookup.this.worldCallback.onCreated(entity);
|
|
+ }
|
|
+
|
|
+ if (oldVisibility == newVisibility) {
|
|
+ if (moved && newVisibility.isAccessible()) {
|
|
+ EntityLookup.this.worldCallback.onSectionChange(entity);
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (newVisibility.ordinal() > oldVisibility.ordinal()) {
|
|
+ // status upgrade
|
|
+ if (!oldVisibility.isAccessible() && newVisibility.isAccessible()) {
|
|
+ this.accessibleEntities.add(entity);
|
|
+ EntityLookup.this.worldCallback.onTrackingStart(entity);
|
|
+ }
|
|
+
|
|
+ if (!oldVisibility.isTicking() && newVisibility.isTicking()) {
|
|
+ EntityLookup.this.worldCallback.onTickingStart(entity);
|
|
+ }
|
|
+ } else {
|
|
+ // status downgrade
|
|
+ if (oldVisibility.isTicking() && !newVisibility.isTicking()) {
|
|
+ EntityLookup.this.worldCallback.onTickingEnd(entity);
|
|
+ }
|
|
+
|
|
+ if (oldVisibility.isAccessible() && !newVisibility.isAccessible()) {
|
|
+ this.accessibleEntities.remove(entity);
|
|
+ EntityLookup.this.worldCallback.onTrackingEnd(entity);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (moved && newVisibility.isAccessible()) {
|
|
+ EntityLookup.this.worldCallback.onSectionChange(entity);
|
|
+ }
|
|
+
|
|
+ if (destroyed) {
|
|
+ EntityLookup.this.worldCallback.onDestroyed(entity);
|
|
+ }
|
|
+ } finally {
|
|
+ entity.updatingSectionStatus = false;
|
|
+ }
|
|
+ } finally {
|
|
+ this.world.chunkTaskScheduler.chunkHolderManager.unblockTicketUpdates(ticketBlockBefore);
|
|
+ }
|
|
+ } finally {
|
|
+ if (slices != null) {
|
|
+ slices.stopPreventingStatusUpdates(false);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void chunkStatusChange(final int x, final int z, final ChunkHolder.FullChunkStatus newStatus) {
|
|
+ this.getChunk(x, z).updateStatus(newStatus, this);
|
|
+ }
|
|
+
|
|
+ public void addLegacyChunkEntities(final List<Entity> entities) {
|
|
+ for (int i = 0, len = entities.size(); i < len; ++i) {
|
|
+ this.addEntity(entities.get(i), true);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void addEntityChunkEntities(final List<Entity> entities) {
|
|
+ for (int i = 0, len = entities.size(); i < len; ++i) {
|
|
+ this.addEntity(entities.get(i), true);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void addWorldGenChunkEntities(final List<Entity> entities) {
|
|
+ for (int i = 0, len = entities.size(); i < len; ++i) {
|
|
+ this.addEntity(entities.get(i), false);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean addNewEntity(final Entity entity) {
|
|
+ return this.addEntity(entity, false);
|
|
+ }
|
|
+
|
|
+ public static Visibility getEntityStatus(final Entity entity) {
|
|
+ if (entity.isAlwaysTicking()) {
|
|
+ return Visibility.TICKING;
|
|
+ }
|
|
+ final ChunkHolder.FullChunkStatus entityStatus = entity.chunkStatus;
|
|
+ return Visibility.fromFullChunkStatus(entityStatus == null ? ChunkHolder.FullChunkStatus.INACCESSIBLE : entityStatus);
|
|
+ }
|
|
+
|
|
+ private boolean addEntity(final Entity entity, final boolean fromDisk) {
|
|
+ final BlockPos pos = entity.blockPosition();
|
|
+ final int sectionX = pos.getX() >> 4;
|
|
+ final int sectionY = Mth.clamp(pos.getY() >> 4, this.minSection, this.maxSection);
|
|
+ final int sectionZ = pos.getZ() >> 4;
|
|
+ TickThread.ensureTickThread(this.world, sectionX, sectionZ, "Cannot add entity off-main thread");
|
|
+
|
|
+ if (entity.isRemoved()) {
|
|
+ LOGGER.warn("Refusing to add removed entity: " + entity);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (entity.updatingSectionStatus) {
|
|
+ LOGGER.warn("Entity " + entity + " is currently prevented from being added/removed to world since it is processing section status updates", new Throwable());
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (fromDisk) {
|
|
+ ChunkSystem.onEntityPreAdd(this.world, entity);
|
|
+ if (entity.isRemoved()) {
|
|
+ // removed from checkDupeUUID call
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.entityByLock.writeLock();
|
|
+ try {
|
|
+ if (this.entityById.containsKey(entity.getId())) {
|
|
+ LOGGER.warn("Entity id already exists: " + entity.getId() + ", mapped to " + this.entityById.get(entity.getId()) + ", can't add " + entity);
|
|
+ return false;
|
|
+ }
|
|
+ if (this.entityByUUID.containsKey(entity.getUUID())) {
|
|
+ LOGGER.warn("Entity uuid already exists: " + entity.getUUID() + ", mapped to " + this.entityByUUID.get(entity.getUUID()) + ", can't add " + entity);
|
|
+ return false;
|
|
+ }
|
|
+ this.entityById.put(entity.getId(), entity);
|
|
+ this.entityByUUID.put(entity.getUUID(), entity);
|
|
+ } finally {
|
|
+ this.entityByLock.tryUnlockWrite();
|
|
+ }
|
|
+
|
|
+ entity.sectionX = sectionX;
|
|
+ entity.sectionY = sectionY;
|
|
+ entity.sectionZ = sectionZ;
|
|
+ final ChunkEntitySlices slices = this.getOrCreateChunk(sectionX, sectionZ);
|
|
+ if (!slices.addEntity(entity, sectionY)) {
|
|
+ LOGGER.warn("Entity " + entity + " added to world '" + this.world.getWorld().getName() + "', but was already contained in entity chunk (" + sectionX + "," + sectionZ + ")");
|
|
+ }
|
|
+
|
|
+ entity.setLevelCallback(new EntityCallback(entity));
|
|
+
|
|
+ this.entityStatusChange(entity, slices, Visibility.HIDDEN, getEntityStatus(entity), false, !fromDisk, false);
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ private void removeEntity(final Entity entity) {
|
|
+ final int sectionX = entity.sectionX;
|
|
+ final int sectionY = entity.sectionY;
|
|
+ final int sectionZ = entity.sectionZ;
|
|
+ TickThread.ensureTickThread(this.world, sectionX, sectionZ, "Cannot remove entity off-main");
|
|
+ if (!entity.isRemoved()) {
|
|
+ throw new IllegalStateException("Only call Entity#setRemoved to remove an entity");
|
|
+ }
|
|
+ final ChunkEntitySlices slices = this.getChunk(sectionX, sectionZ);
|
|
+ // all entities should be in a chunk
|
|
+ if (slices == null) {
|
|
+ LOGGER.warn("Cannot remove entity " + entity + " from null entity slices (" + sectionX + "," + sectionZ + ")");
|
|
+ } else {
|
|
+ if (!slices.removeEntity(entity, sectionY)) {
|
|
+ LOGGER.warn("Failed to remove entity " + entity + " from entity slices (" + sectionX + "," + sectionZ + ")");
|
|
+ }
|
|
+ }
|
|
+ entity.sectionX = entity.sectionY = entity.sectionZ = Integer.MIN_VALUE;
|
|
+
|
|
+ this.entityByLock.writeLock();
|
|
+ try {
|
|
+ if (!this.entityById.remove(entity.getId(), entity)) {
|
|
+ LOGGER.warn("Failed to remove entity " + entity + " by id, current entity mapped: " + this.entityById.get(entity.getId()));
|
|
+ }
|
|
+ if (!this.entityByUUID.remove(entity.getUUID(), entity)) {
|
|
+ LOGGER.warn("Failed to remove entity " + entity + " by uuid, current entity mapped: " + this.entityByUUID.get(entity.getUUID()));
|
|
+ }
|
|
+ } finally {
|
|
+ this.entityByLock.tryUnlockWrite();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private ChunkEntitySlices moveEntity(final Entity entity) {
|
|
+ // ensure we own the entity
|
|
+ TickThread.ensureTickThread(entity, "Cannot move entity off-main");
|
|
+
|
|
+ final BlockPos newPos = entity.blockPosition();
|
|
+ final int newSectionX = newPos.getX() >> 4;
|
|
+ final int newSectionY = Mth.clamp(newPos.getY() >> 4, this.minSection, this.maxSection);
|
|
+ final int newSectionZ = newPos.getZ() >> 4;
|
|
+
|
|
+ if (newSectionX == entity.sectionX && newSectionY == entity.sectionY && newSectionZ == entity.sectionZ) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ // ensure the new section is owned by this tick thread
|
|
+ TickThread.ensureTickThread(this.world, newSectionX, newSectionZ, "Cannot move entity off-main");
|
|
+
|
|
+ // ensure the old section is owned by this tick thread
|
|
+ TickThread.ensureTickThread(this.world, entity.sectionX, entity.sectionZ, "Cannot move entity off-main");
|
|
+
|
|
+ final ChunkEntitySlices old = this.getChunk(entity.sectionX, entity.sectionZ);
|
|
+ final ChunkEntitySlices slices = this.getOrCreateChunk(newSectionX, newSectionZ);
|
|
+
|
|
+ if (!old.removeEntity(entity, entity.sectionY)) {
|
|
+ LOGGER.warn("Could not remove entity " + entity + " from its old chunk section (" + entity.sectionX + "," + entity.sectionY + "," + entity.sectionZ + ") since it was not contained in the section");
|
|
+ }
|
|
+
|
|
+ if (!slices.addEntity(entity, newSectionY)) {
|
|
+ LOGGER.warn("Could not add entity " + entity + " to its new chunk section (" + newSectionX + "," + newSectionY + "," + newSectionZ + ") as it is already contained in the section");
|
|
+ }
|
|
+
|
|
+ entity.sectionX = newSectionX;
|
|
+ entity.sectionY = newSectionY;
|
|
+ entity.sectionZ = newSectionZ;
|
|
+
|
|
+ return slices;
|
|
+ }
|
|
+
|
|
+ public void getEntitiesWithoutDragonParts(final Entity except, final AABB box, final List<Entity> into, final Predicate<? super Entity> predicate) {
|
|
+ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
|
|
+ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
|
|
+ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
|
|
+ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
|
|
+
|
|
+ final int minRegionX = minChunkX >> REGION_SHIFT;
|
|
+ final int minRegionZ = minChunkZ >> REGION_SHIFT;
|
|
+ final int maxRegionX = maxChunkX >> REGION_SHIFT;
|
|
+ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
|
|
+
|
|
+ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
|
|
+ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
|
|
+ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
|
|
+ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
|
|
+
|
|
+ if (region == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
|
|
+ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
|
|
+ for (int currX = minX; currX <= maxX; ++currX) {
|
|
+ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
|
|
+ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ chunk.getEntitiesWithoutDragonParts(except, box, into, predicate);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void getEntities(final Entity except, final AABB box, final List<Entity> into, final Predicate<? super Entity> predicate) {
|
|
+ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
|
|
+ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
|
|
+ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
|
|
+ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
|
|
+
|
|
+ final int minRegionX = minChunkX >> REGION_SHIFT;
|
|
+ final int minRegionZ = minChunkZ >> REGION_SHIFT;
|
|
+ final int maxRegionX = maxChunkX >> REGION_SHIFT;
|
|
+ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
|
|
+
|
|
+ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
|
|
+ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
|
|
+ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
|
|
+ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
|
|
+
|
|
+ if (region == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
|
|
+ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
|
|
+ for (int currX = minX; currX <= maxX; ++currX) {
|
|
+ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
|
|
+ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ chunk.getEntities(except, box, into, predicate);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void getHardCollidingEntities(final Entity except, final AABB box, final List<Entity> into, final Predicate<? super Entity> predicate) {
|
|
+ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
|
|
+ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
|
|
+ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
|
|
+ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
|
|
+
|
|
+ final int minRegionX = minChunkX >> REGION_SHIFT;
|
|
+ final int minRegionZ = minChunkZ >> REGION_SHIFT;
|
|
+ final int maxRegionX = maxChunkX >> REGION_SHIFT;
|
|
+ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
|
|
+
|
|
+ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
|
|
+ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
|
|
+ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
|
|
+ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
|
|
+
|
|
+ if (region == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
|
|
+ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
|
|
+ for (int currX = minX; currX <= maxX; ++currX) {
|
|
+ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
|
|
+ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ chunk.getHardCollidingEntities(except, box, into, predicate);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public <T extends Entity> void getEntities(final EntityType<?> type, final AABB box, final List<? super T> into,
|
|
+ final Predicate<? super T> predicate) {
|
|
+ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
|
|
+ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
|
|
+ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
|
|
+ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
|
|
+
|
|
+ final int minRegionX = minChunkX >> REGION_SHIFT;
|
|
+ final int minRegionZ = minChunkZ >> REGION_SHIFT;
|
|
+ final int maxRegionX = maxChunkX >> REGION_SHIFT;
|
|
+ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
|
|
+
|
|
+ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
|
|
+ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
|
|
+ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
|
|
+ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
|
|
+
|
|
+ if (region == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
|
|
+ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
|
|
+ for (int currX = minX; currX <= maxX; ++currX) {
|
|
+ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
|
|
+ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ chunk.getEntities(type, box, (List)into, (Predicate)predicate);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public <T extends Entity> void getEntities(final Class<? extends T> clazz, final Entity except, final AABB box, final List<? super T> into,
|
|
+ final Predicate<? super T> predicate) {
|
|
+ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4;
|
|
+ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4;
|
|
+ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4;
|
|
+ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4;
|
|
+
|
|
+ final int minRegionX = minChunkX >> REGION_SHIFT;
|
|
+ final int minRegionZ = minChunkZ >> REGION_SHIFT;
|
|
+ final int maxRegionX = maxChunkX >> REGION_SHIFT;
|
|
+ final int maxRegionZ = maxChunkZ >> REGION_SHIFT;
|
|
+
|
|
+ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) {
|
|
+ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0;
|
|
+ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) {
|
|
+ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ);
|
|
+
|
|
+ if (region == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0;
|
|
+ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK;
|
|
+
|
|
+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
|
|
+ for (int currX = minX; currX <= maxX; ++currX) {
|
|
+ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT));
|
|
+ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ chunk.getEntities(clazz, except, box, into, predicate);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void entitySectionLoad(final int chunkX, final int chunkZ, final ChunkEntitySlices slices) {
|
|
+ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot load in entity section off-main");
|
|
+ synchronized (this) {
|
|
+ final ChunkEntitySlices curr = this.getChunk(chunkX, chunkZ);
|
|
+ if (curr != null) {
|
|
+ this.removeChunk(chunkX, chunkZ);
|
|
+
|
|
+ curr.mergeInto(slices);
|
|
+
|
|
+ this.addChunk(chunkX, chunkZ, slices);
|
|
+ } else {
|
|
+ this.addChunk(chunkX, chunkZ, slices);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void entitySectionUnload(final int chunkX, final int chunkZ) {
|
|
+ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot unload entity section off-main");
|
|
+ this.removeChunk(chunkX, chunkZ);
|
|
+ }
|
|
+
|
|
+ public ChunkEntitySlices getChunk(final int chunkX, final int chunkZ) {
|
|
+ final ChunkSlicesRegion region = this.getRegion(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
|
|
+ if (region == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ return region.get((chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT));
|
|
+ }
|
|
+
|
|
+ public ChunkEntitySlices getOrCreateChunk(final int chunkX, final int chunkZ) {
|
|
+ final ChunkSlicesRegion region = this.getRegion(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
|
|
+ ChunkEntitySlices ret;
|
|
+ if (region == null || (ret = region.get((chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT))) == null) {
|
|
+ // loadInEntityChunk will call addChunk for us
|
|
+ return this.world.chunkTaskScheduler.chunkHolderManager.getOrCreateEntityChunk(chunkX, chunkZ, true);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public ChunkSlicesRegion getRegion(final int regionX, final int regionZ) {
|
|
+ final long key = CoordinateUtils.getChunkKey(regionX, regionZ);
|
|
+ final long attempt = this.stateLock.tryOptimisticRead();
|
|
+ if (attempt != 0L) {
|
|
+ try {
|
|
+ final ChunkSlicesRegion ret = this.regions.get(key);
|
|
+
|
|
+ if (this.stateLock.validate(attempt)) {
|
|
+ return ret;
|
|
+ }
|
|
+ } catch (final Error error) {
|
|
+ throw error;
|
|
+ } catch (final Throwable thr) {
|
|
+ // ignore
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.stateLock.readLock();
|
|
+ try {
|
|
+ return this.regions.get(key);
|
|
+ } finally {
|
|
+ this.stateLock.tryUnlockRead();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private synchronized void removeChunk(final int chunkX, final int chunkZ) {
|
|
+ final long key = CoordinateUtils.getChunkKey(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
|
|
+ final int relIndex = (chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT);
|
|
+
|
|
+ final ChunkSlicesRegion region = this.regions.get(key);
|
|
+ final int remaining = region.remove(relIndex);
|
|
+
|
|
+ if (remaining == 0) {
|
|
+ this.stateLock.writeLock();
|
|
+ try {
|
|
+ this.regions.remove(key);
|
|
+ } finally {
|
|
+ this.stateLock.tryUnlockWrite();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public synchronized void addChunk(final int chunkX, final int chunkZ, final ChunkEntitySlices slices) {
|
|
+ final long key = CoordinateUtils.getChunkKey(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT);
|
|
+ final int relIndex = (chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT);
|
|
+
|
|
+ ChunkSlicesRegion region = this.regions.get(key);
|
|
+ if (region != null) {
|
|
+ region.add(relIndex, slices);
|
|
+ } else {
|
|
+ region = new ChunkSlicesRegion();
|
|
+ region.add(relIndex, slices);
|
|
+ this.stateLock.writeLock();
|
|
+ try {
|
|
+ this.regions.put(key, region);
|
|
+ } finally {
|
|
+ this.stateLock.tryUnlockWrite();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class ChunkSlicesRegion {
|
|
+
|
|
+ protected final ChunkEntitySlices[] slices = new ChunkEntitySlices[REGION_SIZE * REGION_SIZE];
|
|
+ protected int sliceCount;
|
|
+
|
|
+ public ChunkEntitySlices get(final int index) {
|
|
+ return this.slices[index];
|
|
+ }
|
|
+
|
|
+ public int remove(final int index) {
|
|
+ final ChunkEntitySlices slices = this.slices[index];
|
|
+ if (slices == null) {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+
|
|
+ this.slices[index] = null;
|
|
+
|
|
+ return --this.sliceCount;
|
|
+ }
|
|
+
|
|
+ public void add(final int index, final ChunkEntitySlices slices) {
|
|
+ final ChunkEntitySlices curr = this.slices[index];
|
|
+ if (curr != null) {
|
|
+ throw new IllegalStateException();
|
|
+ }
|
|
+
|
|
+ this.slices[index] = slices;
|
|
+
|
|
+ ++this.sliceCount;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private final class EntityCallback implements EntityInLevelCallback {
|
|
+
|
|
+ public final Entity entity;
|
|
+
|
|
+ public EntityCallback(final Entity entity) {
|
|
+ this.entity = entity;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void onMove() {
|
|
+ final Entity entity = this.entity;
|
|
+ final Visibility oldVisibility = getEntityStatus(entity);
|
|
+ final ChunkEntitySlices newSlices = EntityLookup.this.moveEntity(this.entity);
|
|
+ if (newSlices == null) {
|
|
+ // no new section, so didn't change sections
|
|
+ return;
|
|
+ }
|
|
+ final Visibility newVisibility = getEntityStatus(entity);
|
|
+
|
|
+ EntityLookup.this.entityStatusChange(entity, newSlices, oldVisibility, newVisibility, true, false, false);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void onRemove(final Entity.RemovalReason reason) {
|
|
+ final Entity entity = this.entity;
|
|
+ TickThread.ensureTickThread(entity, "Cannot remove entity off-main"); // Paper - rewrite chunk system
|
|
+ final Visibility tickingState = EntityLookup.getEntityStatus(entity);
|
|
+
|
|
+ EntityLookup.this.removeEntity(entity);
|
|
+
|
|
+ EntityLookup.this.entityStatusChange(entity, null, tickingState, Visibility.HIDDEN, false, false, reason.shouldDestroy());
|
|
+
|
|
+ this.entity.setLevelCallback(NoOpCallback.INSTANCE);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class NoOpCallback implements EntityInLevelCallback {
|
|
+
|
|
+ public static final NoOpCallback INSTANCE = new NoOpCallback();
|
|
+
|
|
+ @Override
|
|
+ public void onMove() {}
|
|
+
|
|
+ @Override
|
|
+ public void onRemove(final Entity.RemovalReason reason) {}
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..a08cde4eefe879adcee7c4118bc38f98c5097ed0
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
|
@@ -0,0 +1,1328 @@
|
|
+package io.papermc.paper.chunk.system.io;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
|
|
+import ca.spottedleaf.concurrentutil.executor.Cancellable;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedQueueExecutorThread;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import io.papermc.paper.util.CoordinateUtils;
|
|
+import io.papermc.paper.util.TickThread;
|
|
+import it.unimi.dsi.fastutil.HashCommon;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.storage.RegionFile;
|
|
+import net.minecraft.world.level.chunk.storage.RegionFileStorage;
|
|
+import org.slf4j.Logger;
|
|
+import java.io.IOException;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.concurrent.CompletableFuture;
|
|
+import java.util.concurrent.CompletionException;
|
|
+import java.util.concurrent.ConcurrentHashMap;
|
|
+import java.util.concurrent.atomic.AtomicInteger;
|
|
+import java.util.function.BiConsumer;
|
|
+import java.util.function.BiFunction;
|
|
+import java.util.function.Consumer;
|
|
+import java.util.function.Function;
|
|
+
|
|
+/**
|
|
+ * Prioritised RegionFile I/O executor, responsible for all RegionFile access.
|
|
+ * <p>
|
|
+ * All functions provided are MT-Safe, however certain ordering constraints are recommended:
|
|
+ * <li>
|
|
+ * Chunk saves may not occur for unloaded chunks.
|
|
+ * </li>
|
|
+ * <li>
|
|
+ * Tasks must be scheduled on the chunk scheduler thread.
|
|
+ * </li>
|
|
+ * By following these constraints, no chunk data loss should occur with the exception of underlying I/O problems.
|
|
+ * </p>
|
|
+ */
|
|
+public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ /**
|
|
+ * The kinds of region files controlled by the region file thread. Add more when needed, and ensure
|
|
+ * getControllerFor is updated.
|
|
+ */
|
|
+ public static enum RegionFileType {
|
|
+ CHUNK_DATA,
|
|
+ POI_DATA,
|
|
+ ENTITY_DATA;
|
|
+ }
|
|
+
|
|
+ protected static final RegionFileType[] CACHED_REGIONFILE_TYPES = RegionFileType.values();
|
|
+
|
|
+ private ChunkDataController getControllerFor(final ServerLevel world, final RegionFileType type) {
|
|
+ switch (type) {
|
|
+ case CHUNK_DATA:
|
|
+ return world.chunkDataControllerNew;
|
|
+ case POI_DATA:
|
|
+ return world.poiDataControllerNew;
|
|
+ case ENTITY_DATA:
|
|
+ return world.entityDataControllerNew;
|
|
+ default:
|
|
+ throw new IllegalStateException("Unknown controller type " + type);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Collects regionfile data for a certain chunk.
|
|
+ */
|
|
+ public static final class RegionFileData {
|
|
+
|
|
+ private final boolean[] hasResult = new boolean[CACHED_REGIONFILE_TYPES.length];
|
|
+ private final CompoundTag[] data = new CompoundTag[CACHED_REGIONFILE_TYPES.length];
|
|
+ private final Throwable[] throwables = new Throwable[CACHED_REGIONFILE_TYPES.length];
|
|
+
|
|
+ /**
|
|
+ * Sets the result associated with the specified regionfile type. Note that
|
|
+ * results can only be set once per regionfile type.
|
|
+ *
|
|
+ * @param type The regionfile type.
|
|
+ * @param data The result to set.
|
|
+ */
|
|
+ public void setData(final RegionFileType type, final CompoundTag data) {
|
|
+ final int index = type.ordinal();
|
|
+
|
|
+ if (this.hasResult[index]) {
|
|
+ throw new IllegalArgumentException("Result already exists for type " + type);
|
|
+ }
|
|
+ this.hasResult[index] = true;
|
|
+ this.data[index] = data;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Sets the result associated with the specified regionfile type. Note that
|
|
+ * results can only be set once per regionfile type.
|
|
+ *
|
|
+ * @param type The regionfile type.
|
|
+ * @param throwable The result to set.
|
|
+ */
|
|
+ public void setThrowable(final RegionFileType type, final Throwable throwable) {
|
|
+ final int index = type.ordinal();
|
|
+
|
|
+ if (this.hasResult[index]) {
|
|
+ throw new IllegalArgumentException("Result already exists for type " + type);
|
|
+ }
|
|
+ this.hasResult[index] = true;
|
|
+ this.throwables[index] = throwable;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether there is a result for the specified regionfile type.
|
|
+ *
|
|
+ * @param type Specified regionfile type.
|
|
+ *
|
|
+ * @return Whether a result exists for {@code type}.
|
|
+ */
|
|
+ public boolean hasResult(final RegionFileType type) {
|
|
+ return this.hasResult[type.ordinal()];
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the data result for the regionfile type.
|
|
+ *
|
|
+ * @param type Specified regionfile type.
|
|
+ *
|
|
+ * @throws IllegalArgumentException If the result has not been set for {@code type}.
|
|
+ * @return The data result for the specified type. If the result is a {@code Throwable},
|
|
+ * then returns {@code null}.
|
|
+ */
|
|
+ public CompoundTag getData(final RegionFileType type) {
|
|
+ final int index = type.ordinal();
|
|
+
|
|
+ if (!this.hasResult[index]) {
|
|
+ throw new IllegalArgumentException("Result does not exist for type " + type);
|
|
+ }
|
|
+
|
|
+ return this.data[index];
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the throwable result for the regionfile type.
|
|
+ *
|
|
+ * @param type Specified regionfile type.
|
|
+ *
|
|
+ * @throws IllegalArgumentException If the result has not been set for {@code type}.
|
|
+ * @return The throwable result for the specified type. If the result is an {@code CompoundTag},
|
|
+ * then returns {@code null}.
|
|
+ */
|
|
+ public Throwable getThrowable(final RegionFileType type) {
|
|
+ final int index = type.ordinal();
|
|
+
|
|
+ if (!this.hasResult[index]) {
|
|
+ throw new IllegalArgumentException("Result does not exist for type " + type);
|
|
+ }
|
|
+
|
|
+ return this.throwables[index];
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final Object INIT_LOCK = new Object();
|
|
+
|
|
+ static RegionFileIOThread[] threads;
|
|
+
|
|
+ /* needs to be consistent given a set of parameters */
|
|
+ static RegionFileIOThread selectThread(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
|
|
+ if (threads == null) {
|
|
+ throw new IllegalStateException("Threads not initialised");
|
|
+ }
|
|
+
|
|
+ final int regionX = chunkX >> 5;
|
|
+ final int regionZ = chunkZ >> 5;
|
|
+ final int typeOffset = type.ordinal();
|
|
+
|
|
+ return threads[(System.identityHashCode(world) + regionX + regionZ + typeOffset) % threads.length];
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Shuts down the I/O executor(s). Watis for all tasks to complete if specified.
|
|
+ * Tasks queued during this call might not be accepted, and tasks queued after will not be accepted.
|
|
+ *
|
|
+ * @param wait Whether to wait until all tasks have completed.
|
|
+ */
|
|
+ public static void close(final boolean wait) {
|
|
+ for (int i = 0, len = threads.length; i < len; ++i) {
|
|
+ threads[i].close(false, true);
|
|
+ }
|
|
+ if (wait) {
|
|
+ RegionFileIOThread.flush();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static long[] getExecutedTasks() {
|
|
+ final long[] ret = new long[threads.length];
|
|
+ for (int i = 0, len = threads.length; i < len; ++i) {
|
|
+ ret[i] = threads[i].getTotalTasksExecuted();
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public static long[] getTasksScheduled() {
|
|
+ final long[] ret = new long[threads.length];
|
|
+ for (int i = 0, len = threads.length; i < len; ++i) {
|
|
+ ret[i] = threads[i].getTotalTasksScheduled();
|
|
+ }
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public static void flush() {
|
|
+ for (int i = 0, len = threads.length; i < len; ++i) {
|
|
+ threads[i].waitUntilAllExecuted();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static void partialFlush(final int totalTasksRemaining) {
|
|
+ long failures = 1L; // start out at 0.25ms
|
|
+
|
|
+ for (;;) {
|
|
+ final long[] executed = getExecutedTasks();
|
|
+ final long[] scheduled = getTasksScheduled();
|
|
+
|
|
+ long sum = 0;
|
|
+ for (int i = 0; i < executed.length; ++i) {
|
|
+ sum += scheduled[i] - executed[i];
|
|
+ }
|
|
+
|
|
+ if (sum <= totalTasksRemaining) {
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ failures = ConcurrentUtil.linearLongBackoff(failures, 250_000L, 5_000_000L); // 500us, 5ms
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Inits the executor with the specified number of threads.
|
|
+ *
|
|
+ * @param threads Specified number of threads.
|
|
+ */
|
|
+ public static void init(final int threads) {
|
|
+ synchronized (INIT_LOCK) {
|
|
+ if (RegionFileIOThread.threads != null) {
|
|
+ throw new IllegalStateException("Already initialised threads");
|
|
+ }
|
|
+
|
|
+ RegionFileIOThread.threads = new RegionFileIOThread[threads];
|
|
+
|
|
+ for (int i = 0; i < threads; ++i) {
|
|
+ RegionFileIOThread.threads[i] = new RegionFileIOThread(i);
|
|
+ RegionFileIOThread.threads[i].start();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private RegionFileIOThread(final int threadNumber) {
|
|
+ super(new PrioritisedThreadedTaskQueue(), (int)(1.0e6)); // 1.0ms spinwait time
|
|
+ this.setName("RegionFile I/O Thread #" + threadNumber);
|
|
+ this.setPriority(Thread.NORM_PRIORITY - 2); // we keep priority close to normal because threads can wait on us
|
|
+ this.setUncaughtExceptionHandler((final Thread thread, final Throwable thr) -> {
|
|
+ LOGGER.error("Uncaught exception thrown from I/O thread, report this! Thread: " + thread.getName(), thr);
|
|
+ });
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns whether the current thread is a regionfile I/O executor.
|
|
+ * @return Whether the current thread is a regionfile I/O executor.
|
|
+ */
|
|
+ public static boolean isRegionFileThread() {
|
|
+ return Thread.currentThread() instanceof RegionFileIOThread;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the priority associated with blocking I/O based on the current thread. The goal is to avoid
|
|
+ * dumb plugins from taking away priority from threads we consider crucial.
|
|
+ * @return The priroity to use with blocking I/O on the current thread.
|
|
+ */
|
|
+ public static PrioritisedExecutor.Priority getIOBlockingPriorityForCurrentThread() {
|
|
+ if (TickThread.isTickThread()) {
|
|
+ return PrioritisedExecutor.Priority.BLOCKING;
|
|
+ }
|
|
+ return PrioritisedExecutor.Priority.HIGHEST;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the current {@code CompoundTag} pending for write for the specified chunk & regionfile type.
|
|
+ * Note that this does not copy the result, so do not modify the result returned.
|
|
+ *
|
|
+ * @param world Specified world.
|
|
+ * @param chunkX Specified chunk x.
|
|
+ * @param chunkZ Specified chunk z.
|
|
+ * @param type Specified regionfile type.
|
|
+ *
|
|
+ * @return The compound tag associated for the specified chunk. {@code null} if no write was pending, or if {@code null} is the write pending.
|
|
+ */
|
|
+ public static CompoundTag getPendingWrite(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
|
|
+ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
|
|
+ return thread.getPendingWriteInternal(world, chunkX, chunkZ, type);
|
|
+ }
|
|
+
|
|
+ CompoundTag getPendingWriteInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
|
|
+ final ChunkDataController taskController = this.getControllerFor(world, type);
|
|
+ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
|
|
+
|
|
+ if (task == null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final CompoundTag ret = task.inProgressWrite;
|
|
+
|
|
+ return ret == ChunkDataTask.NOTHING_TO_WRITE ? null : ret;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Returns the priority for the specified regionfile type for the specified chunk.
|
|
+ * @param world Specified world.
|
|
+ * @param chunkX Specified chunk x.
|
|
+ * @param chunkZ Specified chunk z.
|
|
+ * @param type Specified regionfile type.
|
|
+ * @return The priority for the chunk
|
|
+ */
|
|
+ public static PrioritisedExecutor.Priority getPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
|
|
+ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
|
|
+ return thread.getPriorityInternal(world, chunkX, chunkZ, type);
|
|
+ }
|
|
+
|
|
+ PrioritisedExecutor.Priority getPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) {
|
|
+ final ChunkDataController taskController = this.getControllerFor(world, type);
|
|
+ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
|
|
+
|
|
+ if (task == null) {
|
|
+ return PrioritisedExecutor.Priority.COMPLETING;
|
|
+ }
|
|
+
|
|
+ return task.prioritisedTask.getPriority();
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Sets the priority for all regionfile types for the specified chunk. Note that great care should
|
|
+ * be taken using this method, as there can be multiple tasks tied to the same chunk that want different
|
|
+ * priorities.
|
|
+ *
|
|
+ * @param world Specified world.
|
|
+ * @param chunkX Specified chunk x.
|
|
+ * @param chunkZ Specified chunk z.
|
|
+ * @param priority New priority.
|
|
+ *
|
|
+ * @see #raisePriority(ServerLevel, int, int, Priority)
|
|
+ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ * @see #lowerPriority(ServerLevel, int, int, Priority)
|
|
+ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ */
|
|
+ public static void setPriority(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) {
|
|
+ RegionFileIOThread.setPriority(world, chunkX, chunkZ, type, priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Sets the priority for the specified regionfile type for the specified chunk. Note that great care should
|
|
+ * be taken using this method, as there can be multiple tasks tied to the same chunk that want different
|
|
+ * priorities.
|
|
+ *
|
|
+ * @param world Specified world.
|
|
+ * @param chunkX Specified chunk x.
|
|
+ * @param chunkZ Specified chunk z.
|
|
+ * @param type Specified regionfile type.
|
|
+ * @param priority New priority.
|
|
+ *
|
|
+ * @see #raisePriority(ServerLevel, int, int, Priority)
|
|
+ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ * @see #lowerPriority(ServerLevel, int, int, Priority)
|
|
+ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ */
|
|
+ public static void setPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
|
|
+ thread.setPriorityInternal(world, chunkX, chunkZ, type, priority);
|
|
+ }
|
|
+
|
|
+ void setPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkDataController taskController = this.getControllerFor(world, type);
|
|
+ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
|
|
+
|
|
+ if (task != null) {
|
|
+ task.prioritisedTask.setPriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Raises the priority for all regionfile types for the specified chunk.
|
|
+ *
|
|
+ * @param world Specified world.
|
|
+ * @param chunkX Specified chunk x.
|
|
+ * @param chunkZ Specified chunk z.
|
|
+ * @param priority New priority.
|
|
+ *
|
|
+ * @see #setPriority(ServerLevel, int, int, Priority)
|
|
+ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ * @see #lowerPriority(ServerLevel, int, int, Priority)
|
|
+ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ */
|
|
+ public static void raisePriority(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) {
|
|
+ RegionFileIOThread.raisePriority(world, chunkX, chunkZ, type, priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Raises the priority for the specified regionfile type for the specified chunk.
|
|
+ *
|
|
+ * @param world Specified world.
|
|
+ * @param chunkX Specified chunk x.
|
|
+ * @param chunkZ Specified chunk z.
|
|
+ * @param type Specified regionfile type.
|
|
+ * @param priority New priority.
|
|
+ *
|
|
+ * @see #setPriority(ServerLevel, int, int, Priority)
|
|
+ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ * @see #lowerPriority(ServerLevel, int, int, Priority)
|
|
+ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ */
|
|
+ public static void raisePriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
|
|
+ thread.raisePriorityInternal(world, chunkX, chunkZ, type, priority);
|
|
+ }
|
|
+
|
|
+ void raisePriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkDataController taskController = this.getControllerFor(world, type);
|
|
+ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
|
|
+
|
|
+ if (task != null) {
|
|
+ task.prioritisedTask.raisePriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Lowers the priority for all regionfile types for the specified chunk.
|
|
+ *
|
|
+ * @param world Specified world.
|
|
+ * @param chunkX Specified chunk x.
|
|
+ * @param chunkZ Specified chunk z.
|
|
+ * @param priority New priority.
|
|
+ *
|
|
+ * @see #raisePriority(ServerLevel, int, int, Priority)
|
|
+ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ * @see #setPriority(ServerLevel, int, int, Priority)
|
|
+ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ */
|
|
+ public static void lowerPriority(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) {
|
|
+ RegionFileIOThread.lowerPriority(world, chunkX, chunkZ, type, priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Lowers the priority for the specified regionfile type for the specified chunk.
|
|
+ *
|
|
+ * @param world Specified world.
|
|
+ * @param chunkX Specified chunk x.
|
|
+ * @param chunkZ Specified chunk z.
|
|
+ * @param type Specified regionfile type.
|
|
+ * @param priority New priority.
|
|
+ *
|
|
+ * @see #raisePriority(ServerLevel, int, int, Priority)
|
|
+ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ * @see #setPriority(ServerLevel, int, int, Priority)
|
|
+ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority)
|
|
+ */
|
|
+ public static void lowerPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
|
|
+ thread.lowerPriorityInternal(world, chunkX, chunkZ, type, priority);
|
|
+ }
|
|
+
|
|
+ void lowerPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkDataController taskController = this.getControllerFor(world, type);
|
|
+ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ)));
|
|
+
|
|
+ if (task != null) {
|
|
+ task.prioritisedTask.lowerPriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules the chunk data to be written asynchronously.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means
|
|
+ * saves must be scheduled before a chunk is unloaded.
|
|
+ * </li>
|
|
+ * <li>
|
|
+ * Writes may be called concurrently, although only the "later" write will go through.
|
|
+ * </li>
|
|
+ *
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param data Chunk's data
|
|
+ * @param type The regionfile type to write to.
|
|
+ *
|
|
+ * @throws IllegalStateException If the file io thread has shutdown.
|
|
+ */
|
|
+ public static void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data,
|
|
+ final RegionFileType type) {
|
|
+ RegionFileIOThread.scheduleSave(world, chunkX, chunkZ, data, type, PrioritisedExecutor.Priority.NORMAL);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules the chunk data to be written asynchronously.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means
|
|
+ * saves must be scheduled before a chunk is unloaded.
|
|
+ * </li>
|
|
+ * <li>
|
|
+ * Writes may be called concurrently, although only the "later" write will go through.
|
|
+ * </li>
|
|
+ *
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param data Chunk's data
|
|
+ * @param type The regionfile type to write to.
|
|
+ * @param priority The minimum priority to schedule at.
|
|
+ *
|
|
+ * @throws IllegalStateException If the file io thread has shutdown.
|
|
+ */
|
|
+ public static void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data,
|
|
+ final RegionFileType type, final PrioritisedExecutor.Priority priority) {
|
|
+ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
|
|
+ thread.scheduleSaveInternal(world, chunkX, chunkZ, data, type, priority);
|
|
+ }
|
|
+
|
|
+ void scheduleSaveInternal(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data,
|
|
+ final RegionFileType type, final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkDataController taskController = this.getControllerFor(world, type);
|
|
+
|
|
+ final boolean[] created = new boolean[1];
|
|
+ final ChunkCoordinate key = new ChunkCoordinate(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ final ChunkDataTask task = taskController.tasks.compute(key, (final ChunkCoordinate keyInMap, final ChunkDataTask taskRunning) -> {
|
|
+ if (taskRunning == null || taskRunning.failedWrite) {
|
|
+ // no task is scheduled or the previous write failed - meaning we need to overwrite it
|
|
+
|
|
+ // create task
|
|
+ final ChunkDataTask newTask = new ChunkDataTask(world, chunkX, chunkZ, taskController, RegionFileIOThread.this, priority);
|
|
+ newTask.inProgressWrite = data;
|
|
+ created[0] = true;
|
|
+
|
|
+ return newTask;
|
|
+ }
|
|
+
|
|
+ taskRunning.inProgressWrite = data;
|
|
+
|
|
+ return taskRunning;
|
|
+ });
|
|
+
|
|
+ if (created[0]) {
|
|
+ task.prioritisedTask.queue();
|
|
+ } else {
|
|
+ task.prioritisedTask.raisePriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules a load to be executed asynchronously. This task will load all regionfile types, and then call
|
|
+ * {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)}
|
|
+ * for single load.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
|
|
+ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
|
|
+ * data is undefined behaviour, and can cause deadlock.
|
|
+ * </li>
|
|
+ *
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param onComplete Consumer to execute once this task has completed
|
|
+ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
|
|
+ * of this call.
|
|
+ *
|
|
+ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
|
|
+ *
|
|
+ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
|
|
+ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
|
|
+ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
|
|
+ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
|
|
+ */
|
|
+ public static Cancellable loadAllChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final Consumer<RegionFileData> onComplete, final boolean intendingToBlock) {
|
|
+ return RegionFileIOThread.loadAllChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules a load to be executed asynchronously. This task will load all regionfile types, and then call
|
|
+ * {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)}
|
|
+ * for single load.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
|
|
+ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
|
|
+ * data is undefined behaviour, and can cause deadlock.
|
|
+ * </li>
|
|
+ *
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param onComplete Consumer to execute once this task has completed
|
|
+ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
|
|
+ * of this call.
|
|
+ * @param priority The minimum priority to load the data at.
|
|
+ *
|
|
+ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
|
|
+ *
|
|
+ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
|
|
+ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
|
|
+ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
|
|
+ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
|
|
+ */
|
|
+ public static Cancellable loadAllChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final Consumer<RegionFileData> onComplete, final boolean intendingToBlock,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ return RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, priority, CACHED_REGIONFILE_TYPES);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules a load to be executed asynchronously. This task will load data for the specified regionfile type(s), and
|
|
+ * then call {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)}
|
|
+ * for single load.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
|
|
+ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
|
|
+ * data is undefined behaviour, and can cause deadlock.
|
|
+ * </li>
|
|
+ *
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param onComplete Consumer to execute once this task has completed
|
|
+ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
|
|
+ * of this call.
|
|
+ * @param types The regionfile type(s) to load.
|
|
+ *
|
|
+ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
|
|
+ *
|
|
+ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
|
|
+ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
|
|
+ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
|
|
+ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
|
|
+ */
|
|
+ public static Cancellable loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final Consumer<RegionFileData> onComplete, final boolean intendingToBlock,
|
|
+ final RegionFileType... types) {
|
|
+ return RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL, types);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules a load to be executed asynchronously. This task will load data for the specified regionfile type(s), and
|
|
+ * then call {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)}
|
|
+ * for single load.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
|
|
+ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
|
|
+ * data is undefined behaviour, and can cause deadlock.
|
|
+ * </li>
|
|
+ *
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param onComplete Consumer to execute once this task has completed
|
|
+ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
|
|
+ * of this call.
|
|
+ * @param types The regionfile type(s) to load.
|
|
+ * @param priority The minimum priority to load the data at.
|
|
+ *
|
|
+ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
|
|
+ *
|
|
+ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)
|
|
+ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)
|
|
+ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
|
|
+ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
|
|
+ */
|
|
+ public static Cancellable loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final Consumer<RegionFileData> onComplete, final boolean intendingToBlock,
|
|
+ final PrioritisedExecutor.Priority priority, final RegionFileType... types) {
|
|
+ if (types == null) {
|
|
+ throw new NullPointerException("Types cannot be null");
|
|
+ }
|
|
+ if (types.length == 0) {
|
|
+ throw new IllegalArgumentException("Types cannot be empty");
|
|
+ }
|
|
+
|
|
+ final RegionFileData ret = new RegionFileData();
|
|
+
|
|
+ final Cancellable[] reads = new CancellableRead[types.length];
|
|
+ final AtomicInteger completions = new AtomicInteger();
|
|
+ final int expectedCompletions = types.length;
|
|
+
|
|
+ for (int i = 0; i < expectedCompletions; ++i) {
|
|
+ final RegionFileType type = types[i];
|
|
+ reads[i] = RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type,
|
|
+ (final CompoundTag data, final Throwable throwable) -> {
|
|
+ if (throwable != null) {
|
|
+ ret.setThrowable(type, throwable);
|
|
+ } else {
|
|
+ ret.setData(type, data);
|
|
+ }
|
|
+
|
|
+ if (completions.incrementAndGet() == expectedCompletions) {
|
|
+ onComplete.accept(ret);
|
|
+ }
|
|
+ }, intendingToBlock, priority);
|
|
+ }
|
|
+
|
|
+ return new CancellableReads(reads);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules a load to be executed asynchronously. This task will load the specified regionfile type, and then call
|
|
+ * {@code onComplete}.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
|
|
+ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
|
|
+ * data is undefined behaviour, and can cause deadlock.
|
|
+ * </li>
|
|
+ *
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param onComplete Consumer to execute once this task has completed
|
|
+ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
|
|
+ * of this call.
|
|
+ *
|
|
+ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
|
|
+ *
|
|
+ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
|
|
+ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
|
|
+ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
|
|
+ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
|
|
+ */
|
|
+ public static Cancellable loadDataAsync(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final RegionFileType type, final BiConsumer<CompoundTag, Throwable> onComplete,
|
|
+ final boolean intendingToBlock) {
|
|
+ return RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules a load to be executed asynchronously. This task will load the specified regionfile type, and then call
|
|
+ * {@code onComplete}.
|
|
+ * <p>
|
|
+ * Impl notes:
|
|
+ * </p>
|
|
+ * <li>
|
|
+ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may
|
|
+ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of
|
|
+ * data is undefined behaviour, and can cause deadlock.
|
|
+ * </li>
|
|
+ *
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param onComplete Consumer to execute once this task has completed
|
|
+ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost
|
|
+ * of this call.
|
|
+ * @param priority Minimum priority to load the data at.
|
|
+ *
|
|
+ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data.
|
|
+ *
|
|
+ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...)
|
|
+ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...)
|
|
+ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean)
|
|
+ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority)
|
|
+ */
|
|
+ public static Cancellable loadDataAsync(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final RegionFileType type, final BiConsumer<CompoundTag, Throwable> onComplete,
|
|
+ final boolean intendingToBlock, final PrioritisedExecutor.Priority priority) {
|
|
+ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type);
|
|
+ return thread.loadDataAsyncInternal(world, chunkX, chunkZ, type, onComplete, intendingToBlock, priority);
|
|
+ }
|
|
+
|
|
+ private static Boolean doesRegionFileExist(final int chunkX, final int chunkZ, final boolean intendingToBlock,
|
|
+ final ChunkDataController taskController) {
|
|
+ final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ);
|
|
+ if (intendingToBlock) {
|
|
+ return taskController.computeForRegionFile(chunkX, chunkZ, true, (final RegionFile file) -> {
|
|
+ if (file == null) { // null if no regionfile exists
|
|
+ return Boolean.FALSE;
|
|
+ }
|
|
+
|
|
+ return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE;
|
|
+ });
|
|
+ } else {
|
|
+ return taskController.computeForRegionFileIfLoaded(chunkX, chunkZ, (final RegionFile file) -> {
|
|
+ if (file == null) { // null if not loaded
|
|
+ return Boolean.TRUE;
|
|
+ }
|
|
+
|
|
+ return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE;
|
|
+ });
|
|
+ }
|
|
+ }
|
|
+
|
|
+ Cancellable loadDataAsyncInternal(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final RegionFileType type, final BiConsumer<CompoundTag, Throwable> onComplete,
|
|
+ final boolean intendingToBlock, final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkDataController taskController = this.getControllerFor(world, type);
|
|
+
|
|
+ final ImmediateCallbackCompletion callbackInfo = new ImmediateCallbackCompletion();
|
|
+
|
|
+ final ChunkCoordinate key = new ChunkCoordinate(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ final BiFunction<ChunkCoordinate, ChunkDataTask, ChunkDataTask> compute = (final ChunkCoordinate keyInMap, final ChunkDataTask running) -> {
|
|
+ if (running == null) {
|
|
+ // not scheduled
|
|
+
|
|
+ if (callbackInfo.regionFileCalculation == null) {
|
|
+ // caller will compute this outside of compute(), to avoid holding the bin lock
|
|
+ callbackInfo.needsRegionFileTest = true;
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ if (callbackInfo.regionFileCalculation == Boolean.FALSE) {
|
|
+ // not on disk
|
|
+ callbackInfo.data = null;
|
|
+ callbackInfo.throwable = null;
|
|
+ callbackInfo.completeNow = true;
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ // set up task
|
|
+ final ChunkDataTask newTask = new ChunkDataTask(
|
|
+ world, chunkX, chunkZ, taskController, RegionFileIOThread.this, priority
|
|
+ );
|
|
+ newTask.inProgressRead = new RegionFileIOThread.InProgressRead();
|
|
+ newTask.inProgressRead.waiters.add(onComplete);
|
|
+
|
|
+ callbackInfo.tasksNeedsScheduling = true;
|
|
+ return newTask;
|
|
+ }
|
|
+
|
|
+ final CompoundTag pendingWrite = running.inProgressWrite;
|
|
+
|
|
+ if (pendingWrite == ChunkDataTask.NOTHING_TO_WRITE) {
|
|
+ // need to add to waiters here, because the regionfile thread will use compute() to lock and check for cancellations
|
|
+ if (!running.inProgressRead.addToWaiters(onComplete)) {
|
|
+ callbackInfo.data = running.inProgressRead.value;
|
|
+ callbackInfo.throwable = running.inProgressRead.throwable;
|
|
+ callbackInfo.completeNow = true;
|
|
+ }
|
|
+ return running;
|
|
+ }
|
|
+ // using the result sync here - don't bump priority
|
|
+
|
|
+ // at this stage we have to use the in progress write's data to avoid an order issue
|
|
+ callbackInfo.data = pendingWrite;
|
|
+ callbackInfo.throwable = null;
|
|
+ callbackInfo.completeNow = true;
|
|
+ return running;
|
|
+ };
|
|
+
|
|
+ ChunkDataTask curr = taskController.tasks.get(key);
|
|
+ if (curr == null) {
|
|
+ callbackInfo.regionFileCalculation = doesRegionFileExist(chunkX, chunkZ, intendingToBlock, taskController);
|
|
+ }
|
|
+ ChunkDataTask ret = taskController.tasks.compute(key, compute);
|
|
+ if (callbackInfo.needsRegionFileTest) {
|
|
+ // curr isn't null but when we went into compute() it was
|
|
+ callbackInfo.regionFileCalculation = doesRegionFileExist(chunkX, chunkZ, intendingToBlock, taskController);
|
|
+ // now it should be fine
|
|
+ ret = taskController.tasks.compute(key, compute);
|
|
+ }
|
|
+
|
|
+ // needs to be scheduled
|
|
+ if (callbackInfo.tasksNeedsScheduling) {
|
|
+ ret.prioritisedTask.queue();
|
|
+ } else if (callbackInfo.completeNow) {
|
|
+ try {
|
|
+ onComplete.accept(callbackInfo.data, callbackInfo.throwable);
|
|
+ } catch (final ThreadDeath thr) {
|
|
+ throw thr;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Callback " + ConcurrentUtil.genericToString(onComplete) + " synchronously failed to handle chunk data for task " + ret.toString(), thr);
|
|
+ }
|
|
+ } else {
|
|
+ // we're waiting on a task we didn't schedule, so raise its priority to what we want
|
|
+ ret.prioritisedTask.raisePriority(priority);
|
|
+ }
|
|
+
|
|
+ return new CancellableRead(onComplete, ret);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Schedules a load task to be executed asynchronously, and blocks on that task.
|
|
+ *
|
|
+ * @param world Chunk's world
|
|
+ * @param chunkX Chunk's x coordinate
|
|
+ * @param chunkZ Chunk's z coordinate
|
|
+ * @param type Regionfile type
|
|
+ * @param priority Minimum priority to load the data at.
|
|
+ *
|
|
+ * @return The chunk data for the chunk. Note that a {@code null} result means the chunk or regionfile does not exist on disk.
|
|
+ *
|
|
+ * @throws IOException If the load fails for any reason
|
|
+ */
|
|
+ public static CompoundTag loadData(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type,
|
|
+ final PrioritisedExecutor.Priority priority) throws IOException {
|
|
+ final CompletableFuture<CompoundTag> ret = new CompletableFuture<>();
|
|
+
|
|
+ RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type, (final CompoundTag compound, final Throwable thr) -> {
|
|
+ if (thr != null) {
|
|
+ ret.completeExceptionally(thr);
|
|
+ } else {
|
|
+ ret.complete(compound);
|
|
+ }
|
|
+ }, true, priority);
|
|
+
|
|
+ try {
|
|
+ return ret.join();
|
|
+ } catch (final CompletionException ex) {
|
|
+ throw new IOException(ex);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class ImmediateCallbackCompletion {
|
|
+
|
|
+ public CompoundTag data;
|
|
+ public Throwable throwable;
|
|
+ public boolean completeNow;
|
|
+ public boolean tasksNeedsScheduling;
|
|
+ public boolean needsRegionFileTest;
|
|
+ public Boolean regionFileCalculation;
|
|
+
|
|
+ }
|
|
+
|
|
+ static final class CancellableRead implements Cancellable {
|
|
+
|
|
+ private BiConsumer<CompoundTag, Throwable> callback;
|
|
+ private RegionFileIOThread.ChunkDataTask task;
|
|
+
|
|
+ CancellableRead(final BiConsumer<CompoundTag, Throwable> callback, final RegionFileIOThread.ChunkDataTask task) {
|
|
+ this.callback = callback;
|
|
+ this.task = task;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean cancel() {
|
|
+ final BiConsumer<CompoundTag, Throwable> callback = this.callback;
|
|
+ final RegionFileIOThread.ChunkDataTask task = this.task;
|
|
+
|
|
+ if (callback == null || task == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.callback = null;
|
|
+ this.task = null;
|
|
+
|
|
+ final RegionFileIOThread.InProgressRead read = task.inProgressRead;
|
|
+
|
|
+ // read can be null if no read was scheduled (i.e no regionfile existed or chunk in regionfile didn't)
|
|
+ return (read != null && read.waiters.remove(callback));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ static final class CancellableReads implements Cancellable {
|
|
+
|
|
+ private Cancellable[] reads;
|
|
+
|
|
+ protected static final VarHandle READS_HANDLE = ConcurrentUtil.getVarHandle(CancellableReads.class, "reads", Cancellable[].class);
|
|
+
|
|
+ CancellableReads(final Cancellable[] reads) {
|
|
+ this.reads = reads;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean cancel() {
|
|
+ final Cancellable[] reads = (Cancellable[])READS_HANDLE.getAndSet((CancellableReads)this, (Cancellable[])null);
|
|
+
|
|
+ if (reads == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ boolean ret = false;
|
|
+
|
|
+ for (final Cancellable read : reads) {
|
|
+ ret |= read.cancel();
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ static final class InProgressRead {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ CompoundTag value;
|
|
+ Throwable throwable;
|
|
+ final MultiThreadedQueue<BiConsumer<CompoundTag, Throwable>> waiters = new MultiThreadedQueue<>();
|
|
+
|
|
+ // rets false if already completed (callback not invoked), true if callback was added
|
|
+ boolean addToWaiters(final BiConsumer<CompoundTag, Throwable> callback) {
|
|
+ return this.waiters.add(callback);
|
|
+ }
|
|
+
|
|
+ void complete(final RegionFileIOThread.ChunkDataTask task, final CompoundTag value, final Throwable throwable) {
|
|
+ this.value = value;
|
|
+ this.throwable = throwable;
|
|
+
|
|
+ BiConsumer<CompoundTag, Throwable> consumer;
|
|
+ while ((consumer = this.waiters.pollOrBlockAdds()) != null) {
|
|
+ try {
|
|
+ consumer.accept(value, throwable);
|
|
+ } catch (final ThreadDeath thr) {
|
|
+ throw thr;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Callback " + ConcurrentUtil.genericToString(consumer) + " failed to handle chunk data for task " + task.toString(), thr);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * Class exists to replace {@link Long} usages as keys inside non-fastutil hashtables. The hash for some Long {@code x}
|
|
+ * is defined as {@code (x >>> 32) ^ x}. Chunk keys as long values are defined as {@code ((chunkX & 0xFFFFFFFFL) | (chunkZ << 32))},
|
|
+ * which means the hashcode as a Long value will be {@code chunkX ^ chunkZ}. Given that most chunks are created within a radius arounds players,
|
|
+ * this will lead to many hash collisions. So, this class uses a better hashing algorithm so that usage of
|
|
+ * non-fastutil collections is not degraded.
|
|
+ */
|
|
+ public static final class ChunkCoordinate implements Comparable<ChunkCoordinate> {
|
|
+
|
|
+ public final long key;
|
|
+
|
|
+ public ChunkCoordinate(final long key) {
|
|
+ this.key = key;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public int hashCode() {
|
|
+ return (int)HashCommon.mix(this.key);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean equals(final Object obj) {
|
|
+ if (this == obj) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ if (!(obj instanceof ChunkCoordinate)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final ChunkCoordinate other = (ChunkCoordinate)obj;
|
|
+
|
|
+ return this.key == other.key;
|
|
+ }
|
|
+
|
|
+ // This class is intended for HashMap/ConcurrentHashMap usage, which do treeify bin nodes if the chain
|
|
+ // is too large. So we should implement compareTo to help.
|
|
+ @Override
|
|
+ public int compareTo(final RegionFileIOThread.ChunkCoordinate other) {
|
|
+ return Long.compare(this.key, other.key);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return new ChunkPos(this.key).toString();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static abstract class ChunkDataController {
|
|
+
|
|
+ // ConcurrentHashMap synchronizes per chain, so reduce the chance of task's hashes colliding.
|
|
+ protected final ConcurrentHashMap<ChunkCoordinate, ChunkDataTask> tasks = new ConcurrentHashMap<>(8192, 0.10f);
|
|
+
|
|
+ public final RegionFileType type;
|
|
+
|
|
+ public ChunkDataController(final RegionFileType type) {
|
|
+ this.type = type;
|
|
+ }
|
|
+
|
|
+ public abstract RegionFileStorage getCache();
|
|
+
|
|
+ public abstract void writeData(final int chunkX, final int chunkZ, final CompoundTag compound) throws IOException;
|
|
+
|
|
+ public abstract CompoundTag readData(final int chunkX, final int chunkZ) throws IOException;
|
|
+
|
|
+ public boolean hasTasks() {
|
|
+ return !this.tasks.isEmpty();
|
|
+ }
|
|
+
|
|
+ public <T> T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function<RegionFile, T> function) {
|
|
+ final RegionFileStorage cache = this.getCache();
|
|
+ final RegionFile regionFile;
|
|
+ synchronized (cache) {
|
|
+ try {
|
|
+ regionFile = cache.getRegionFile(new ChunkPos(chunkX, chunkZ), existingOnly, true);
|
|
+ } catch (final IOException ex) {
|
|
+ throw new RuntimeException(ex);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ return function.apply(regionFile);
|
|
+ } finally {
|
|
+ if (regionFile != null) {
|
|
+ regionFile.fileLock.unlock();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public <T> T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function<RegionFile, T> function) {
|
|
+ final RegionFileStorage cache = this.getCache();
|
|
+ final RegionFile regionFile;
|
|
+
|
|
+ synchronized (cache) {
|
|
+ regionFile = cache.getRegionFileIfLoaded(new ChunkPos(chunkX, chunkZ));
|
|
+ if (regionFile != null) {
|
|
+ regionFile.fileLock.lock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ return function.apply(regionFile);
|
|
+ } finally {
|
|
+ if (regionFile != null) {
|
|
+ regionFile.fileLock.unlock();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ static final class ChunkDataTask implements Runnable {
|
|
+
|
|
+ protected static final CompoundTag NOTHING_TO_WRITE = new CompoundTag();
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ RegionFileIOThread.InProgressRead inProgressRead;
|
|
+ volatile CompoundTag inProgressWrite = NOTHING_TO_WRITE; // only needs to be acquire/release
|
|
+
|
|
+ boolean failedWrite;
|
|
+
|
|
+ final ServerLevel world;
|
|
+ final int chunkX;
|
|
+ final int chunkZ;
|
|
+ final RegionFileIOThread.ChunkDataController taskController;
|
|
+
|
|
+ final PrioritisedExecutor.PrioritisedTask prioritisedTask;
|
|
+
|
|
+ /*
|
|
+ * IO thread will perform reads before writes for a given chunk x and z
|
|
+ *
|
|
+ * How reads/writes are scheduled:
|
|
+ *
|
|
+ * If read is scheduled while scheduling write, take no special action and just schedule write
|
|
+ * If read is scheduled while scheduling read and no write is scheduled, chain the read task
|
|
+ *
|
|
+ *
|
|
+ * If write is scheduled while scheduling read, use the pending write data and ret immediately (so no read is scheduled)
|
|
+ * If write is scheduled while scheduling write (ignore read in progress), overwrite the write in progress data
|
|
+ *
|
|
+ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however
|
|
+ * it fails to properly propagate write failures thanks to writes overwriting each other
|
|
+ */
|
|
+
|
|
+ public ChunkDataTask(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileIOThread.ChunkDataController taskController,
|
|
+ final PrioritisedExecutor executor, final PrioritisedExecutor.Priority priority) {
|
|
+ this.world = world;
|
|
+ this.chunkX = chunkX;
|
|
+ this.chunkZ = chunkZ;
|
|
+ this.taskController = taskController;
|
|
+ this.prioritisedTask = executor.createTask(this, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return "Task for world: '" + this.world.getWorld().getName() + "' at (" + this.chunkX + "," + this.chunkZ +
|
|
+ ") type: " + this.taskController.type.name() + ", hash: " + this.hashCode();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final RegionFileIOThread.InProgressRead read = this.inProgressRead;
|
|
+ final ChunkCoordinate chunkKey = new ChunkCoordinate(CoordinateUtils.getChunkKey(this.chunkX, this.chunkZ));
|
|
+
|
|
+ if (read != null) {
|
|
+ final boolean[] canRead = new boolean[] { true };
|
|
+
|
|
+ if (read.waiters.isEmpty()) {
|
|
+ // cancelled read? go to task controller to confirm
|
|
+ final ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> {
|
|
+ if (valueInMap == null) {
|
|
+ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
|
|
+ }
|
|
+ if (valueInMap != ChunkDataTask.this) {
|
|
+ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
|
|
+ }
|
|
+
|
|
+ if (!read.waiters.isEmpty()) { // as per usual IntelliJ is unable to figure out that there are concurrent accesses.
|
|
+ return valueInMap;
|
|
+ } else {
|
|
+ canRead[0] = false;
|
|
+ }
|
|
+
|
|
+ return valueInMap.inProgressWrite == NOTHING_TO_WRITE ? null : valueInMap;
|
|
+ });
|
|
+
|
|
+ if (inMap == null) {
|
|
+ // read is cancelled - and no write pending, so we're done
|
|
+ return;
|
|
+ }
|
|
+ // if there is a write in progress, we don't actually have to worry about waiters gaining new entries -
|
|
+ // the readers will just use the in progress write, so the value in canRead is good to use without
|
|
+ // further synchronisation.
|
|
+ }
|
|
+
|
|
+ if (canRead[0]) {
|
|
+ CompoundTag compound = null;
|
|
+ Throwable throwable = null;
|
|
+
|
|
+ try {
|
|
+ compound = this.taskController.readData(this.chunkX, this.chunkZ);
|
|
+ } catch (final ThreadDeath thr) {
|
|
+ throw thr;
|
|
+ } catch (final Throwable thr) {
|
|
+ throwable = thr;
|
|
+ LOGGER.error("Failed to read chunk data for task: " + this.toString(), thr);
|
|
+ }
|
|
+ read.complete(this, compound, throwable);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ CompoundTag write = this.inProgressWrite;
|
|
+
|
|
+ if (write == NOTHING_TO_WRITE) {
|
|
+ final ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> {
|
|
+ if (valueInMap == null) {
|
|
+ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
|
|
+ }
|
|
+ if (valueInMap != ChunkDataTask.this) {
|
|
+ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
|
|
+ }
|
|
+ return valueInMap.inProgressWrite == NOTHING_TO_WRITE ? null : valueInMap;
|
|
+ });
|
|
+
|
|
+ if (inMap == null) {
|
|
+ return; // set the task value to null, indicating we're done
|
|
+ } // else: inProgressWrite changed, so now we have something to write
|
|
+ }
|
|
+
|
|
+ for (;;) {
|
|
+ write = this.inProgressWrite;
|
|
+ final CompoundTag dataWritten = write;
|
|
+
|
|
+ boolean failedWrite = false;
|
|
+
|
|
+ try {
|
|
+ this.taskController.writeData(this.chunkX, this.chunkZ, write);
|
|
+ } catch (final ThreadDeath thr) {
|
|
+ throw thr;
|
|
+ } catch (final Throwable thr) {
|
|
+ if (thr instanceof RegionFileStorage.RegionFileSizeException) {
|
|
+ final int maxSize = RegionFile.MAX_CHUNK_SIZE / (1024 * 1024);
|
|
+ LOGGER.error("Chunk at (" + this.chunkX + "," + this.chunkZ + ") in '" + this.world.getWorld().getName() + "' exceeds max size of " + maxSize + "MiB, it has been deleted from disk.");
|
|
+ } else {
|
|
+ failedWrite = thr instanceof IOException;
|
|
+ LOGGER.error("Failed to write chunk data for task: " + this.toString(), thr);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final boolean finalFailWrite = failedWrite;
|
|
+ final boolean[] done = new boolean[] { false };
|
|
+
|
|
+ this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> {
|
|
+ if (valueInMap == null) {
|
|
+ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!");
|
|
+ }
|
|
+ if (valueInMap != ChunkDataTask.this) {
|
|
+ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!");
|
|
+ }
|
|
+ if (valueInMap.inProgressWrite == dataWritten) {
|
|
+ valueInMap.failedWrite = finalFailWrite;
|
|
+ done[0] = true;
|
|
+ // keep the data in map if we failed the write so we can try to prevent data loss
|
|
+ return finalFailWrite ? valueInMap : null;
|
|
+ }
|
|
+ // different data than expected, means we need to retry write
|
|
+ return valueInMap;
|
|
+ });
|
|
+
|
|
+ if (done[0]) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // fetch & write new data
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..0b7a2b0ead4f3bc07bfd9a38c2b7cf024bd140c6
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
|
|
@@ -0,0 +1,280 @@
|
|
+package io.papermc.paper.chunk.system.light;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.starlight.common.light.BlockStarLightEngine;
|
|
+import ca.spottedleaf.starlight.common.light.SkyStarLightEngine;
|
|
+import ca.spottedleaf.starlight.common.light.StarLightInterface;
|
|
+import io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler;
|
|
+import io.papermc.paper.util.CoordinateUtils;
|
|
+import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.shorts.ShortCollection;
|
|
+import it.unimi.dsi.fastutil.shorts.ShortOpenHashSet;
|
|
+import net.minecraft.core.BlockPos;
|
|
+import net.minecraft.core.SectionPos;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import java.util.ArrayList;
|
|
+import java.util.HashSet;
|
|
+import java.util.List;
|
|
+import java.util.Set;
|
|
+import java.util.concurrent.CompletableFuture;
|
|
+import java.util.function.BooleanSupplier;
|
|
+
|
|
+public final class LightQueue {
|
|
+
|
|
+ protected final Long2ObjectOpenHashMap<ChunkTasks> chunkTasks = new Long2ObjectOpenHashMap<>();
|
|
+ protected final StarLightInterface manager;
|
|
+ protected final ServerLevel world;
|
|
+
|
|
+ public LightQueue(final StarLightInterface manager) {
|
|
+ this.manager = manager;
|
|
+ this.world = ((ServerLevel)manager.getWorld());
|
|
+ }
|
|
+
|
|
+ public void lowerPriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkTasks task;
|
|
+ synchronized (this) {
|
|
+ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ }
|
|
+ if (task != null) {
|
|
+ task.lowerPriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void setPriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkTasks task;
|
|
+ synchronized (this) {
|
|
+ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ }
|
|
+ if (task != null) {
|
|
+ task.setPriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void raisePriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkTasks task;
|
|
+ synchronized (this) {
|
|
+ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ }
|
|
+ if (task != null) {
|
|
+ task.raisePriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public PrioritisedExecutor.Priority getPriority(final int chunkX, final int chunkZ) {
|
|
+ final ChunkTasks task;
|
|
+ synchronized (this) {
|
|
+ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ }
|
|
+ if (task != null) {
|
|
+ return task.getPriority();
|
|
+ }
|
|
+
|
|
+ return PrioritisedExecutor.Priority.COMPLETING;
|
|
+ }
|
|
+
|
|
+ public boolean isEmpty() {
|
|
+ synchronized (this) {
|
|
+ return this.chunkTasks.isEmpty();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public CompletableFuture<Void> queueBlockChange(final BlockPos pos) {
|
|
+ final ChunkTasks tasks;
|
|
+ synchronized (this) {
|
|
+ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
|
|
+ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
|
|
+ });
|
|
+ tasks.changedPositions.add(pos.immutable());
|
|
+ }
|
|
+
|
|
+ tasks.schedule();
|
|
+
|
|
+ return tasks.onComplete;
|
|
+ }
|
|
+
|
|
+ public CompletableFuture<Void> queueSectionChange(final SectionPos pos, final boolean newEmptyValue) {
|
|
+ final ChunkTasks tasks;
|
|
+ synchronized (this) {
|
|
+ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
|
|
+ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
|
|
+ });
|
|
+
|
|
+ if (tasks.changedSectionSet == null) {
|
|
+ tasks.changedSectionSet = new Boolean[this.manager.maxSection - this.manager.minSection + 1];
|
|
+ }
|
|
+ tasks.changedSectionSet[pos.getY() - this.manager.minSection] = Boolean.valueOf(newEmptyValue);
|
|
+ }
|
|
+
|
|
+ tasks.schedule();
|
|
+
|
|
+ return tasks.onComplete;
|
|
+ }
|
|
+
|
|
+ public CompletableFuture<Void> queueChunkLightTask(final ChunkPos pos, final BooleanSupplier lightTask, final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkTasks tasks;
|
|
+ synchronized (this) {
|
|
+ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
|
|
+ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this, priority);
|
|
+ });
|
|
+ if (tasks.lightTasks == null) {
|
|
+ tasks.lightTasks = new ArrayList<>();
|
|
+ }
|
|
+ tasks.lightTasks.add(lightTask);
|
|
+ }
|
|
+
|
|
+ tasks.schedule();
|
|
+
|
|
+ return tasks.onComplete;
|
|
+ }
|
|
+
|
|
+ public CompletableFuture<Void> queueChunkSkylightEdgeCheck(final SectionPos pos, final ShortCollection sections) {
|
|
+ final ChunkTasks tasks;
|
|
+ synchronized (this) {
|
|
+ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
|
|
+ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
|
|
+ });
|
|
+
|
|
+ ShortOpenHashSet queuedEdges = tasks.queuedEdgeChecksSky;
|
|
+ if (queuedEdges == null) {
|
|
+ queuedEdges = tasks.queuedEdgeChecksSky = new ShortOpenHashSet();
|
|
+ }
|
|
+ queuedEdges.addAll(sections);
|
|
+ }
|
|
+
|
|
+ tasks.schedule();
|
|
+
|
|
+ return tasks.onComplete;
|
|
+ }
|
|
+
|
|
+ public CompletableFuture<Void> queueChunkBlocklightEdgeCheck(final SectionPos pos, final ShortCollection sections) {
|
|
+ final ChunkTasks tasks;
|
|
+
|
|
+ synchronized (this) {
|
|
+ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> {
|
|
+ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this);
|
|
+ });
|
|
+
|
|
+ ShortOpenHashSet queuedEdges = tasks.queuedEdgeChecksBlock;
|
|
+ if (queuedEdges == null) {
|
|
+ queuedEdges = tasks.queuedEdgeChecksBlock = new ShortOpenHashSet();
|
|
+ }
|
|
+ queuedEdges.addAll(sections);
|
|
+ }
|
|
+
|
|
+ tasks.schedule();
|
|
+
|
|
+ return tasks.onComplete;
|
|
+ }
|
|
+
|
|
+ public void removeChunk(final ChunkPos pos) {
|
|
+ final ChunkTasks tasks;
|
|
+ synchronized (this) {
|
|
+ tasks = this.chunkTasks.remove(CoordinateUtils.getChunkKey(pos));
|
|
+ }
|
|
+ if (tasks != null && tasks.cancel()) {
|
|
+ tasks.onComplete.complete(null);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class ChunkTasks implements Runnable {
|
|
+
|
|
+ final Set<BlockPos> changedPositions = new HashSet<>();
|
|
+ Boolean[] changedSectionSet;
|
|
+ ShortOpenHashSet queuedEdgeChecksSky;
|
|
+ ShortOpenHashSet queuedEdgeChecksBlock;
|
|
+ List<BooleanSupplier> lightTasks;
|
|
+
|
|
+ final CompletableFuture<Void> onComplete = new CompletableFuture<>();
|
|
+
|
|
+ public final long chunkCoordinate;
|
|
+ private final StarLightInterface lightEngine;
|
|
+ private final LightQueue queue;
|
|
+ private final PrioritisedExecutor.PrioritisedTask task;
|
|
+
|
|
+ public ChunkTasks(final long chunkCoordinate, final StarLightInterface lightEngine, final LightQueue queue) {
|
|
+ this(chunkCoordinate, lightEngine, queue, PrioritisedExecutor.Priority.NORMAL);
|
|
+ }
|
|
+
|
|
+ public ChunkTasks(final long chunkCoordinate, final StarLightInterface lightEngine, final LightQueue queue,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ this.chunkCoordinate = chunkCoordinate;
|
|
+ this.lightEngine = lightEngine;
|
|
+ this.queue = queue;
|
|
+ this.task = queue.world.chunkTaskScheduler.lightExecutor.createTask(this, priority);
|
|
+ }
|
|
+
|
|
+ public void schedule() {
|
|
+ this.task.queue();
|
|
+ }
|
|
+
|
|
+ public boolean cancel() {
|
|
+ return this.task.cancel();
|
|
+ }
|
|
+
|
|
+ public PrioritisedExecutor.Priority getPriority() {
|
|
+ return this.task.getPriority();
|
|
+ }
|
|
+
|
|
+ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ this.task.lowerPriority(priority);
|
|
+ }
|
|
+
|
|
+ public void setPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ this.task.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ public void raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
+ this.task.raisePriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine();
|
|
+ final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine();
|
|
+ try {
|
|
+ synchronized (this.queue) {
|
|
+ this.queue.chunkTasks.remove(this.chunkCoordinate);
|
|
+ }
|
|
+
|
|
+ boolean litChunk = false;
|
|
+ if (this.lightTasks != null) {
|
|
+ for (final BooleanSupplier run : this.lightTasks) {
|
|
+ if (run.getAsBoolean()) {
|
|
+ litChunk = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final long coordinate = this.chunkCoordinate;
|
|
+ final int chunkX = CoordinateUtils.getChunkX(coordinate);
|
|
+ final int chunkZ = CoordinateUtils.getChunkZ(coordinate);
|
|
+
|
|
+ final Set<BlockPos> positions = this.changedPositions;
|
|
+ final Boolean[] sectionChanges = this.changedSectionSet;
|
|
+
|
|
+ if (!litChunk) {
|
|
+ if (skyEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
|
|
+ skyEngine.blocksChangedInChunk(this.lightEngine.getLightAccess(), chunkX, chunkZ, positions, sectionChanges);
|
|
+ }
|
|
+ if (blockEngine != null && (!positions.isEmpty() || sectionChanges != null)) {
|
|
+ blockEngine.blocksChangedInChunk(this.lightEngine.getLightAccess(), chunkX, chunkZ, positions, sectionChanges);
|
|
+ }
|
|
+
|
|
+ if (skyEngine != null && this.queuedEdgeChecksSky != null) {
|
|
+ skyEngine.checkChunkEdges(this.lightEngine.getLightAccess(), chunkX, chunkZ, this.queuedEdgeChecksSky);
|
|
+ }
|
|
+ if (blockEngine != null && this.queuedEdgeChecksBlock != null) {
|
|
+ blockEngine.checkChunkEdges(this.lightEngine.getLightAccess(), chunkX, chunkZ, this.queuedEdgeChecksBlock);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.onComplete.complete(null);
|
|
+ } finally {
|
|
+ this.lightEngine.releaseSkyLightEngine(skyEngine);
|
|
+ this.lightEngine.releaseBlockLightEngine(blockEngine);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java b/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..dc298e61e2d479801c8469b2067a8f8bcb076b1d
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java
|
|
@@ -0,0 +1,213 @@
|
|
+package io.papermc.paper.chunk.system.poi;
|
|
+
|
|
+import com.mojang.logging.LogUtils;
|
|
+import com.mojang.serialization.Codec;
|
|
+import com.mojang.serialization.DataResult;
|
|
+import io.papermc.paper.util.CoordinateUtils;
|
|
+import io.papermc.paper.util.TickThread;
|
|
+import io.papermc.paper.util.WorldUtil;
|
|
+import net.minecraft.SharedConstants;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.nbt.NbtOps;
|
|
+import net.minecraft.nbt.Tag;
|
|
+import net.minecraft.resources.RegistryOps;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.entity.ai.village.poi.PoiManager;
|
|
+import net.minecraft.world.entity.ai.village.poi.PoiSection;
|
|
+import org.slf4j.Logger;
|
|
+
|
|
+import java.util.Optional;
|
|
+
|
|
+public final class PoiChunk {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ public final ServerLevel world;
|
|
+ public final int chunkX;
|
|
+ public final int chunkZ;
|
|
+ public final int minSection;
|
|
+ public final int maxSection;
|
|
+
|
|
+ protected final PoiSection[] sections;
|
|
+
|
|
+ private boolean isDirty;
|
|
+ private boolean loaded;
|
|
+
|
|
+ public PoiChunk(final ServerLevel world, final int chunkX, final int chunkZ, final int minSection, final int maxSection) {
|
|
+ this(world, chunkX, chunkZ, minSection, maxSection, new PoiSection[maxSection - minSection + 1]);
|
|
+ }
|
|
+
|
|
+ public PoiChunk(final ServerLevel world, final int chunkX, final int chunkZ, final int minSection, final int maxSection, final PoiSection[] sections) {
|
|
+ this.world = world;
|
|
+ this.chunkX = chunkX;
|
|
+ this.chunkZ = chunkZ;
|
|
+ this.minSection = minSection;
|
|
+ this.maxSection = maxSection;
|
|
+ this.sections = sections;
|
|
+ if (this.sections.length != (maxSection - minSection + 1)) {
|
|
+ throw new IllegalStateException("Incorrect length used, expected " + (maxSection - minSection + 1) + ", got " + this.sections.length);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void load() {
|
|
+ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Loading in poi chunk off-main");
|
|
+ if (this.loaded) {
|
|
+ return;
|
|
+ }
|
|
+ this.loaded = true;
|
|
+ this.world.chunkSource.getPoiManager().loadInPoiChunk(this);
|
|
+ }
|
|
+
|
|
+ public boolean isLoaded() {
|
|
+ return this.loaded;
|
|
+ }
|
|
+
|
|
+ public boolean isEmpty() {
|
|
+ for (final PoiSection section : this.sections) {
|
|
+ if (section != null && !section.isEmpty()) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public PoiSection getOrCreateSection(final int chunkY) {
|
|
+ if (chunkY >= this.minSection && chunkY <= this.maxSection) {
|
|
+ final int idx = chunkY - this.minSection;
|
|
+ final PoiSection ret = this.sections[idx];
|
|
+ if (ret != null) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ final PoiManager poiManager = this.world.getPoiManager();
|
|
+ final long key = CoordinateUtils.getChunkSectionKey(this.chunkX, chunkY, this.chunkZ);
|
|
+
|
|
+ return this.sections[idx] = new PoiSection(() -> {
|
|
+ poiManager.setDirty(key);
|
|
+ });
|
|
+ }
|
|
+ throw new IllegalArgumentException("chunkY is out of bounds, chunkY: " + chunkY + " outside [" + this.minSection + "," + this.maxSection + "]");
|
|
+ }
|
|
+
|
|
+ public PoiSection getSection(final int chunkY) {
|
|
+ if (chunkY >= this.minSection && chunkY <= this.maxSection) {
|
|
+ return this.sections[chunkY - this.minSection];
|
|
+ }
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ public Optional<PoiSection> getSectionForVanilla(final int chunkY) {
|
|
+ if (chunkY >= this.minSection && chunkY <= this.maxSection) {
|
|
+ final PoiSection ret = this.sections[chunkY - this.minSection];
|
|
+ return ret == null ? Optional.empty() : ret.noAllocateOptional;
|
|
+ }
|
|
+ return Optional.empty();
|
|
+ }
|
|
+
|
|
+ public boolean isDirty() {
|
|
+ return this.isDirty;
|
|
+ }
|
|
+
|
|
+ public void setDirty(final boolean dirty) {
|
|
+ this.isDirty = dirty;
|
|
+ }
|
|
+
|
|
+ // returns null if empty
|
|
+ public CompoundTag save() {
|
|
+ final RegistryOps<Tag> registryOps = RegistryOps.create(NbtOps.INSTANCE, world.getPoiManager().registryAccess);
|
|
+
|
|
+ final CompoundTag ret = new CompoundTag();
|
|
+ final CompoundTag sections = new CompoundTag();
|
|
+ ret.put("Sections", sections);
|
|
+
|
|
+ ret.putInt("DataVersion", SharedConstants.getCurrentVersion().getWorldVersion());
|
|
+
|
|
+ final ServerLevel world = this.world;
|
|
+ final PoiManager poiManager = world.getPoiManager();
|
|
+ final int chunkX = this.chunkX;
|
|
+ final int chunkZ = this.chunkZ;
|
|
+
|
|
+ for (int sectionY = this.minSection; sectionY <= this.maxSection; ++sectionY) {
|
|
+ final PoiSection chunk = this.sections[sectionY - this.minSection];
|
|
+ if (chunk == null || chunk.isEmpty()) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final long key = CoordinateUtils.getChunkSectionKey(chunkX, sectionY, chunkZ);
|
|
+ // codecs are honestly such a fucking disaster. What the fuck is this trash?
|
|
+ final Codec<PoiSection> codec = PoiSection.codec(() -> {
|
|
+ poiManager.setDirty(key);
|
|
+ });
|
|
+
|
|
+ final DataResult<Tag> serializedResult = codec.encodeStart(registryOps, chunk);
|
|
+ final int finalSectionY = sectionY;
|
|
+ final Tag serialized = serializedResult.resultOrPartial((final String description) -> {
|
|
+ LOGGER.error("Failed to serialize poi chunk for world: " + world.getWorld().getName() + ", chunk: (" + chunkX + "," + finalSectionY + "," + chunkZ + "); description: " + description);
|
|
+ }).orElse(null);
|
|
+ if (serialized == null) {
|
|
+ // failed, should be logged from the resultOrPartial
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ sections.put(Integer.toString(sectionY), serialized);
|
|
+ }
|
|
+
|
|
+ return sections.isEmpty() ? null : ret;
|
|
+ }
|
|
+
|
|
+ public static PoiChunk empty(final ServerLevel world, final int chunkX, final int chunkZ) {
|
|
+ final PoiChunk ret = new PoiChunk(world, chunkX, chunkZ, WorldUtil.getMinSection(world), WorldUtil.getMaxSection(world));
|
|
+ ret.loaded = true;
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public static PoiChunk parse(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data) {
|
|
+ final PoiChunk ret = empty(world, chunkX, chunkZ);
|
|
+
|
|
+ final RegistryOps<Tag> registryOps = RegistryOps.create(NbtOps.INSTANCE, world.getPoiManager().registryAccess);
|
|
+
|
|
+ final CompoundTag sections = data.getCompound("Sections");
|
|
+
|
|
+ if (sections.isEmpty()) {
|
|
+ // nothing to parse
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ final PoiManager poiManager = world.getPoiManager();
|
|
+
|
|
+ boolean readAnything = false;
|
|
+
|
|
+ for (int sectionY = ret.minSection; sectionY <= ret.maxSection; ++sectionY) {
|
|
+ final String key = Integer.toString(sectionY);
|
|
+ if (!sections.contains(key)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final long coordinateKey = CoordinateUtils.getChunkSectionKey(chunkX, sectionY, chunkZ);
|
|
+ // codecs are honestly such a fucking disaster. What the fuck is this trash?
|
|
+ final Codec<PoiSection> codec = PoiSection.codec(() -> {
|
|
+ poiManager.setDirty(coordinateKey);
|
|
+ });
|
|
+
|
|
+ final CompoundTag section = sections.getCompound(key);
|
|
+ final DataResult<PoiSection> deserializeResult = codec.parse(registryOps, section);
|
|
+ final int finalSectionY = sectionY;
|
|
+ final PoiSection deserialized = deserializeResult.resultOrPartial((final String description) -> {
|
|
+ LOGGER.error("Failed to deserialize poi chunk for world: " + world.getWorld().getName() + ", chunk: (" + chunkX + "," + finalSectionY + "," + chunkZ + "); description: " + description);
|
|
+ }).orElse(null);
|
|
+
|
|
+ if (deserialized == null || deserialized.isEmpty()) {
|
|
+ // completely empty, no point in storing this
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ readAnything = true;
|
|
+ ret.sections[sectionY - ret.minSection] = deserialized;
|
|
+ }
|
|
+
|
|
+ ret.loaded = !readAnything; // Set loaded to false if we read anything to ensure proper callbacks to PoiManager are made on #load
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..fb42d776f15f735fb59e972e00e2b512c23a8387
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
|
|
@@ -0,0 +1,121 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import net.minecraft.server.level.ChunkMap;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+import net.minecraft.world.level.chunk.ImposterProtoChunk;
|
|
+import net.minecraft.world.level.chunk.LevelChunk;
|
|
+import net.minecraft.world.level.chunk.ProtoChunk;
|
|
+import java.lang.invoke.VarHandle;
|
|
+
|
|
+public final class ChunkFullTask extends ChunkProgressionTask implements Runnable {
|
|
+
|
|
+ protected final NewChunkHolder chunkHolder;
|
|
+ protected final ChunkAccess fromChunk;
|
|
+ protected final PrioritisedExecutor.PrioritisedTask convertToFullTask;
|
|
+
|
|
+ public ChunkFullTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final NewChunkHolder chunkHolder, final ChunkAccess fromChunk, final PrioritisedExecutor.Priority priority) {
|
|
+ super(scheduler, world, chunkX, chunkZ);
|
|
+ this.chunkHolder = chunkHolder;
|
|
+ this.fromChunk = fromChunk;
|
|
+ this.convertToFullTask = scheduler.createChunkTask(chunkX, chunkZ, this, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public ChunkStatus getTargetStatus() {
|
|
+ return ChunkStatus.FULL;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ // See Vanilla protoChunkToFullChunk for what this function should be doing
|
|
+ final LevelChunk chunk;
|
|
+ try {
|
|
+ if (this.fromChunk instanceof ImposterProtoChunk wrappedFull) {
|
|
+ chunk = wrappedFull.getWrapped();
|
|
+ } else {
|
|
+ final ServerLevel world = this.world;
|
|
+ final ProtoChunk protoChunk = (ProtoChunk)this.fromChunk;
|
|
+ chunk = new LevelChunk(this.world, protoChunk, (final LevelChunk unused) -> {
|
|
+ ChunkMap.postLoadProtoChunk(world, protoChunk.getEntities());
|
|
+ });
|
|
+ }
|
|
+
|
|
+ chunk.setChunkHolder(this.scheduler.chunkHolderManager.getChunkHolder(this.chunkX, this.chunkZ)); // replaces setFullStatus
|
|
+ chunk.runPostLoad();
|
|
+ // Unlike Vanilla, we load the entity chunk here, as we load the NBT in empty status (unlike Vanilla)
|
|
+ // This brings entity addition back in line with older versions of the game
|
|
+ // Since we load the NBT in the empty status, this will never block for I/O
|
|
+ this.world.chunkTaskScheduler.chunkHolderManager.getOrCreateEntityChunk(this.chunkX, this.chunkZ, false);
|
|
+
|
|
+ // we don't need the entitiesInLevel trash, this system doesn't double run callbacks
|
|
+ chunk.setLoaded(true);
|
|
+ chunk.registerAllBlockEntitiesAfterLevelLoad();
|
|
+ chunk.registerTickContainerInLevel(this.world);
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.complete(null, throwable);
|
|
+
|
|
+ if (throwable instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)throwable;
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+ this.complete(chunk, null);
|
|
+ }
|
|
+
|
|
+ protected volatile boolean scheduled;
|
|
+ protected static final VarHandle SCHEDULED_HANDLE = ConcurrentUtil.getVarHandle(ChunkFullTask.class, "scheduled", boolean.class);
|
|
+
|
|
+ @Override
|
|
+ public boolean isScheduled() {
|
|
+ return this.scheduled;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void schedule() {
|
|
+ if ((boolean)SCHEDULED_HANDLE.getAndSet((ChunkFullTask)this, true)) {
|
|
+ throw new IllegalStateException("Cannot double call schedule()");
|
|
+ }
|
|
+ this.convertToFullTask.queue();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void cancel() {
|
|
+ if (this.convertToFullTask.cancel()) {
|
|
+ this.complete(null, null);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedExecutor.Priority getPriority() {
|
|
+ return this.convertToFullTask.getPriority();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.convertToFullTask.lowerPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.convertToFullTask.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.convertToFullTask.raisePriority(priority);
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..e4dcadc24b3d73178ee1a4b64b8c6343e5285e59
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
|
|
@@ -0,0 +1,1204 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable;
|
|
+import co.aikar.timings.Timing;
|
|
+import com.google.common.collect.ImmutableList;
|
|
+import com.google.gson.JsonArray;
|
|
+import com.google.gson.JsonObject;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import io.papermc.paper.chunk.system.io.RegionFileIOThread;
|
|
+import io.papermc.paper.chunk.system.poi.PoiChunk;
|
|
+import io.papermc.paper.util.CoordinateUtils;
|
|
+import io.papermc.paper.util.TickThread;
|
|
+import io.papermc.paper.util.misc.Delayed8WayDistancePropagator2D;
|
|
+import io.papermc.paper.world.ChunkEntitySlices;
|
|
+import it.unimi.dsi.fastutil.longs.Long2IntLinkedOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.longs.Long2IntMap;
|
|
+import it.unimi.dsi.fastutil.longs.Long2IntOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.longs.Long2ObjectMap;
|
|
+import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.longs.LongArrayList;
|
|
+import it.unimi.dsi.fastutil.longs.LongIterator;
|
|
+import it.unimi.dsi.fastutil.objects.ObjectRBTreeSet;
|
|
+import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import io.papermc.paper.chunk.system.ChunkSystem;
|
|
+import net.minecraft.server.MinecraftServer;
|
|
+import net.minecraft.server.level.ChunkHolder;
|
|
+import net.minecraft.server.level.ChunkMap;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.server.level.Ticket;
|
|
+import net.minecraft.server.level.TicketType;
|
|
+import net.minecraft.util.SortedArraySet;
|
|
+import net.minecraft.util.Unit;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+import org.bukkit.plugin.Plugin;
|
|
+import org.slf4j.Logger;
|
|
+import java.io.IOException;
|
|
+import java.text.DecimalFormat;
|
|
+import java.util.ArrayDeque;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Collection;
|
|
+import java.util.Collections;
|
|
+import java.util.Iterator;
|
|
+import java.util.List;
|
|
+import java.util.Objects;
|
|
+import java.util.concurrent.TimeUnit;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.concurrent.atomic.AtomicReference;
|
|
+import java.util.concurrent.locks.LockSupport;
|
|
+import java.util.concurrent.locks.ReentrantLock;
|
|
+import java.util.function.Predicate;
|
|
+
|
|
+public final class ChunkHolderManager {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ public static final int FULL_LOADED_TICKET_LEVEL = 33;
|
|
+ public static final int BLOCK_TICKING_TICKET_LEVEL = 32;
|
|
+ public static final int ENTITY_TICKING_TICKET_LEVEL = 31;
|
|
+ public static final int MAX_TICKET_LEVEL = ChunkMap.MAX_CHUNK_DISTANCE; // inclusive
|
|
+
|
|
+ private static final long NO_TIMEOUT_MARKER = -1L;
|
|
+
|
|
+ final ReentrantLock ticketLock = new ReentrantLock();
|
|
+
|
|
+ private final SWMRLong2ObjectHashTable<NewChunkHolder> chunkHolders = new SWMRLong2ObjectHashTable<>(16384, 0.25f);
|
|
+ private final Long2ObjectOpenHashMap<SortedArraySet<Ticket<?>>> tickets = new Long2ObjectOpenHashMap<>(8192, 0.25f);
|
|
+ // what a disaster of a name
|
|
+ // this is a map of removal tick to a map of chunks and the number of tickets a chunk has that are to expire that tick
|
|
+ private final Long2ObjectOpenHashMap<Long2IntOpenHashMap> removeTickToChunkExpireTicketCount = new Long2ObjectOpenHashMap<>();
|
|
+ private final ServerLevel world;
|
|
+ private final ChunkTaskScheduler taskScheduler;
|
|
+ private long currentTick;
|
|
+
|
|
+ private final ArrayDeque<NewChunkHolder> pendingFullLoadUpdate = new ArrayDeque<>();
|
|
+ private final ObjectRBTreeSet<NewChunkHolder> autoSaveQueue = new ObjectRBTreeSet<>((final NewChunkHolder c1, final NewChunkHolder c2) -> {
|
|
+ if (c1 == c2) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ final int saveTickCompare = Long.compare(c1.lastAutoSave, c2.lastAutoSave);
|
|
+
|
|
+ if (saveTickCompare != 0) {
|
|
+ return saveTickCompare;
|
|
+ }
|
|
+
|
|
+ final long coord1 = CoordinateUtils.getChunkKey(c1.chunkX, c1.chunkZ);
|
|
+ final long coord2 = CoordinateUtils.getChunkKey(c2.chunkX, c2.chunkZ);
|
|
+
|
|
+ if (coord1 == coord2) {
|
|
+ throw new IllegalStateException("Duplicate chunkholder in auto save queue");
|
|
+ }
|
|
+
|
|
+ return Long.compare(coord1, coord2);
|
|
+ });
|
|
+
|
|
+ public ChunkHolderManager(final ServerLevel world, final ChunkTaskScheduler taskScheduler) {
|
|
+ this.world = world;
|
|
+ this.taskScheduler = taskScheduler;
|
|
+ }
|
|
+
|
|
+ private long statusUpgradeId;
|
|
+
|
|
+ long getNextStatusUpgradeId() {
|
|
+ return ++this.statusUpgradeId;
|
|
+ }
|
|
+
|
|
+ public List<ChunkHolder> getOldChunkHolders() {
|
|
+ final List<NewChunkHolder> holders = this.getChunkHolders();
|
|
+ final List<ChunkHolder> ret = new ArrayList<>(holders.size());
|
|
+ for (final NewChunkHolder holder : holders) {
|
|
+ ret.add(holder.vanillaChunkHolder);
|
|
+ }
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public List<NewChunkHolder> getChunkHolders() {
|
|
+ final List<NewChunkHolder> ret = new ArrayList<>(this.chunkHolders.size());
|
|
+ this.chunkHolders.forEachValue(ret::add);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public int size() {
|
|
+ return this.chunkHolders.size();
|
|
+ }
|
|
+
|
|
+ public void close(final boolean save, final boolean halt) {
|
|
+ TickThread.ensureTickThread("Closing world off-main");
|
|
+ if (halt) {
|
|
+ LOGGER.info("Waiting 60s for chunk system to halt for world '" + this.world.getWorld().getName() + "'");
|
|
+ if (!this.taskScheduler.halt(true, TimeUnit.SECONDS.toNanos(60L))) {
|
|
+ LOGGER.warn("Failed to halt world generation/loading tasks for world '" + this.world.getWorld().getName() + "'");
|
|
+ } else {
|
|
+ LOGGER.info("Halted chunk system for world '" + this.world.getWorld().getName() + "'");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (save) {
|
|
+ this.saveAllChunks(true, true, true);
|
|
+ }
|
|
+
|
|
+ if (this.world.chunkDataControllerNew.hasTasks() || this.world.entityDataControllerNew.hasTasks() || this.world.poiDataControllerNew.hasTasks()) {
|
|
+ RegionFileIOThread.flush();
|
|
+ }
|
|
+
|
|
+ // kill regionfile cache
|
|
+ try {
|
|
+ this.world.chunkDataControllerNew.getCache().close();
|
|
+ } catch (final IOException ex) {
|
|
+ LOGGER.error("Failed to close chunk regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
|
|
+ }
|
|
+ try {
|
|
+ this.world.entityDataControllerNew.getCache().close();
|
|
+ } catch (final IOException ex) {
|
|
+ LOGGER.error("Failed to close entity regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
|
|
+ }
|
|
+ try {
|
|
+ this.world.poiDataControllerNew.getCache().close();
|
|
+ } catch (final IOException ex) {
|
|
+ LOGGER.error("Failed to close poi regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ void ensureInAutosave(final NewChunkHolder holder) {
|
|
+ if (!this.autoSaveQueue.contains(holder)) {
|
|
+ holder.lastAutoSave = MinecraftServer.currentTick;
|
|
+ this.autoSaveQueue.add(holder);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void autoSave() {
|
|
+ final List<NewChunkHolder> reschedule = new ArrayList<>();
|
|
+ final long currentTick = MinecraftServer.currentTickLong;
|
|
+ final long maxSaveTime = currentTick - this.world.paperConfig().chunks.autoSaveInterval.value();
|
|
+ for (int autoSaved = 0; autoSaved < this.world.paperConfig().chunks.maxAutoSaveChunksPerTick && !this.autoSaveQueue.isEmpty();) {
|
|
+ final NewChunkHolder holder = this.autoSaveQueue.first();
|
|
+
|
|
+ if (holder.lastAutoSave > maxSaveTime) {
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ this.autoSaveQueue.remove(holder);
|
|
+
|
|
+ holder.lastAutoSave = currentTick;
|
|
+ if (holder.save(false, false) != null) {
|
|
+ ++autoSaved;
|
|
+ }
|
|
+
|
|
+ if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ reschedule.add(holder);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (final NewChunkHolder holder : reschedule) {
|
|
+ if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ this.autoSaveQueue.add(holder);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void saveAllChunks(final boolean flush, final boolean shutdown, final boolean logProgress) {
|
|
+ final List<NewChunkHolder> holders = this.getChunkHolders();
|
|
+
|
|
+ if (logProgress) {
|
|
+ LOGGER.info("Saving all chunkholders for world '" + this.world.getWorld().getName() + "'");
|
|
+ }
|
|
+
|
|
+ final DecimalFormat format = new DecimalFormat("#0.00");
|
|
+
|
|
+ int saved = 0;
|
|
+
|
|
+ long start = System.nanoTime();
|
|
+ long lastLog = start;
|
|
+ boolean needsFlush = false;
|
|
+ final int flushInterval = 50;
|
|
+
|
|
+ int savedChunk = 0;
|
|
+ int savedEntity = 0;
|
|
+ int savedPoi = 0;
|
|
+
|
|
+ for (int i = 0, len = holders.size(); i < len; ++i) {
|
|
+ final NewChunkHolder holder = holders.get(i);
|
|
+ try {
|
|
+ final NewChunkHolder.SaveStat saveStat = holder.save(shutdown, false);
|
|
+ if (saveStat != null) {
|
|
+ ++saved;
|
|
+ needsFlush = flush;
|
|
+ if (saveStat.savedChunk()) {
|
|
+ ++savedChunk;
|
|
+ }
|
|
+ if (saveStat.savedEntityChunk()) {
|
|
+ ++savedEntity;
|
|
+ }
|
|
+ if (saveStat.savedPoiChunk()) {
|
|
+ ++savedPoi;
|
|
+ }
|
|
+ }
|
|
+ } catch (final ThreadDeath thr) {
|
|
+ throw thr;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Failed to save chunk (" + holder.chunkX + "," + holder.chunkZ + ") in world '" + this.world.getWorld().getName() + "'", thr);
|
|
+ }
|
|
+ if (needsFlush && (saved % flushInterval) == 0) {
|
|
+ needsFlush = false;
|
|
+ RegionFileIOThread.partialFlush(flushInterval / 2);
|
|
+ }
|
|
+ if (logProgress) {
|
|
+ final long currTime = System.nanoTime();
|
|
+ if ((currTime - lastLog) > TimeUnit.SECONDS.toNanos(10L)) {
|
|
+ lastLog = currTime;
|
|
+ LOGGER.info("Saved " + saved + " chunks (" + format.format((double)(i+1)/(double)len * 100.0) + "%) in world '" + this.world.getWorld().getName() + "'");
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (flush) {
|
|
+ RegionFileIOThread.flush();
|
|
+ }
|
|
+ if (logProgress) {
|
|
+ LOGGER.info("Saved " + savedChunk + " block chunks, " + savedEntity + " entity chunks, " + savedPoi + " poi chunks in world '" + this.world.getWorld().getName() + "' in " + format.format(1.0E-9 * (System.nanoTime() - start)) + "s");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final Long2IntLinkedOpenHashMap ticketLevelUpdates = new Long2IntLinkedOpenHashMap() {
|
|
+ @Override
|
|
+ protected void rehash(final int newN) {
|
|
+ // no downsizing allowed
|
|
+ if (newN < this.n) {
|
|
+ return;
|
|
+ }
|
|
+ super.rehash(newN);
|
|
+ }
|
|
+ };
|
|
+
|
|
+ protected final Delayed8WayDistancePropagator2D ticketLevelPropagator = new Delayed8WayDistancePropagator2D(
|
|
+ (final long coordinate, final byte oldLevel, final byte newLevel) -> {
|
|
+ ChunkHolderManager.this.ticketLevelUpdates.putAndMoveToLast(coordinate, convertBetweenTicketLevels(newLevel));
|
|
+ }
|
|
+ );
|
|
+ // function for converting between ticket levels and propagator levels and vice versa
|
|
+ // the problem is the ticket level propagator will propagate from a set source down to zero, whereas mojang expects
|
|
+ // levels to propagate from a set value up to a maximum value. so we need to convert the levels we put into the propagator
|
|
+ // and the levels we get out of the propagator
|
|
+
|
|
+ public static int convertBetweenTicketLevels(final int level) {
|
|
+ return ChunkMap.MAX_CHUNK_DISTANCE - level + 1;
|
|
+ }
|
|
+
|
|
+ public boolean hasTickets() {
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ return !this.tickets.isEmpty();
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public String getTicketDebugString(final long coordinate) {
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ final SortedArraySet<Ticket<?>> tickets = this.tickets.get(coordinate);
|
|
+
|
|
+ return tickets != null ? tickets.first().toString() : "no_ticket";
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public Long2ObjectOpenHashMap<SortedArraySet<Ticket<?>>> getTicketsCopy() {
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ return this.tickets.clone();
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public Collection<Plugin> getPluginChunkTickets(int x, int z) {
|
|
+ ImmutableList.Builder<Plugin> ret;
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ SortedArraySet<Ticket<?>> tickets = this.tickets.get(ChunkPos.asLong(x, z));
|
|
+
|
|
+ if (tickets == null) {
|
|
+ return Collections.emptyList();
|
|
+ }
|
|
+
|
|
+ ret = ImmutableList.builder();
|
|
+ for (Ticket<?> ticket : tickets) {
|
|
+ if (ticket.getType() == TicketType.PLUGIN_TICKET) {
|
|
+ ret.add((Plugin)ticket.key);
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+
|
|
+ return ret.build();
|
|
+ }
|
|
+
|
|
+ protected final int getPropagatedTicketLevel(final long coordinate) {
|
|
+ return convertBetweenTicketLevels(this.ticketLevelPropagator.getLevel(coordinate));
|
|
+ }
|
|
+
|
|
+ protected final void updateTicketLevel(final long coordinate, final int ticketLevel) {
|
|
+ if (ticketLevel > ChunkMap.MAX_CHUNK_DISTANCE) {
|
|
+ this.ticketLevelPropagator.removeSource(coordinate);
|
|
+ } else {
|
|
+ this.ticketLevelPropagator.setSource(coordinate, convertBetweenTicketLevels(ticketLevel));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static int getTicketLevelAt(SortedArraySet<Ticket<?>> tickets) {
|
|
+ return !tickets.isEmpty() ? tickets.first().getTicketLevel() : MAX_TICKET_LEVEL + 1;
|
|
+ }
|
|
+
|
|
+ public <T> boolean addTicketAtLevel(final TicketType<T> type, final ChunkPos chunkPos, final int level,
|
|
+ final T identifier) {
|
|
+ return this.addTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkPos), level, identifier);
|
|
+ }
|
|
+
|
|
+ public <T> boolean addTicketAtLevel(final TicketType<T> type, final int chunkX, final int chunkZ, final int level,
|
|
+ final T identifier) {
|
|
+ return this.addTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkX, chunkZ), level, identifier);
|
|
+ }
|
|
+
|
|
+ // supposed to return true if the ticket was added and did not replace another
|
|
+ // but, we always return false if the ticket cannot be added
|
|
+ public <T> boolean addTicketAtLevel(final TicketType<T> type, final long chunk, final int level, final T identifier) {
|
|
+ final long removeDelay = Math.max(0, type.timeout);
|
|
+ if (level > MAX_TICKET_LEVEL) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : this.currentTick + removeDelay;
|
|
+ final Ticket<T> ticket = new Ticket<>(type, level, identifier, removeTick);
|
|
+
|
|
+ final SortedArraySet<Ticket<?>> ticketsAtChunk = this.tickets.computeIfAbsent(chunk, (final long keyInMap) -> {
|
|
+ return SortedArraySet.create(4);
|
|
+ });
|
|
+
|
|
+ final int levelBefore = getTicketLevelAt(ticketsAtChunk);
|
|
+ final Ticket<T> current = (Ticket<T>)ticketsAtChunk.replace(ticket);
|
|
+ final int levelAfter = getTicketLevelAt(ticketsAtChunk);
|
|
+
|
|
+ if (current != ticket) {
|
|
+ final long oldRemovalTick = current.removalTick;
|
|
+ if (removeTick != oldRemovalTick) {
|
|
+ if (oldRemovalTick != NO_TIMEOUT_MARKER) {
|
|
+ final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(oldRemovalTick);
|
|
+ final int prevCount = removeCounts.addTo(chunk, -1);
|
|
+
|
|
+ if (prevCount == 1) {
|
|
+ removeCounts.remove(chunk);
|
|
+ if (removeCounts.isEmpty()) {
|
|
+ this.removeTickToChunkExpireTicketCount.remove(oldRemovalTick);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ if (removeTick != NO_TIMEOUT_MARKER) {
|
|
+ this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> {
|
|
+ return new Long2IntOpenHashMap();
|
|
+ }).addTo(chunk, 1);
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ if (removeTick != NO_TIMEOUT_MARKER) {
|
|
+ this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> {
|
|
+ return new Long2IntOpenHashMap();
|
|
+ }).addTo(chunk, 1);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (levelBefore != levelAfter) {
|
|
+ this.updateTicketLevel(chunk, levelAfter);
|
|
+ }
|
|
+
|
|
+ return current == ticket;
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public <T> boolean removeTicketAtLevel(final TicketType<T> type, final ChunkPos chunkPos, final int level, final T identifier) {
|
|
+ return this.removeTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkPos), level, identifier);
|
|
+ }
|
|
+
|
|
+ public <T> boolean removeTicketAtLevel(final TicketType<T> type, final int chunkX, final int chunkZ, final int level, final T identifier) {
|
|
+ return this.removeTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkX, chunkZ), level, identifier);
|
|
+ }
|
|
+
|
|
+ public <T> boolean removeTicketAtLevel(final TicketType<T> type, final long chunk, final int level, final T identifier) {
|
|
+ if (level > MAX_TICKET_LEVEL) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ final SortedArraySet<Ticket<?>> ticketsAtChunk = this.tickets.get(chunk);
|
|
+ if (ticketsAtChunk == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final int oldLevel = getTicketLevelAt(ticketsAtChunk);
|
|
+ final Ticket<T> ticket = (Ticket<T>)ticketsAtChunk.removeAndGet(new Ticket<>(type, level, identifier, -2L));
|
|
+
|
|
+ if (ticket == null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (ticketsAtChunk.isEmpty()) {
|
|
+ this.tickets.remove(chunk);
|
|
+ }
|
|
+
|
|
+ final int newLevel = getTicketLevelAt(ticketsAtChunk);
|
|
+
|
|
+ final long removeTick = ticket.removalTick;
|
|
+ if (removeTick != NO_TIMEOUT_MARKER) {
|
|
+ final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(removeTick);
|
|
+ final int currCount = removeCounts.addTo(chunk, -1);
|
|
+
|
|
+ if (currCount == 1) {
|
|
+ removeCounts.remove(chunk);
|
|
+ if (removeCounts.isEmpty()) {
|
|
+ this.removeTickToChunkExpireTicketCount.remove(removeTick);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (oldLevel != newLevel) {
|
|
+ this.updateTicketLevel(chunk, newLevel);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // atomic with respect to all add/remove/addandremove ticket calls for the given chunk
|
|
+ public <T, V> void addAndRemoveTickets(final long chunk, final TicketType<T> addType, final int addLevel, final T addIdentifier,
|
|
+ final TicketType<V> removeType, final int removeLevel, final V removeIdentifier) {
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ this.addTicketAtLevel(addType, chunk, addLevel, addIdentifier);
|
|
+ this.removeTicketAtLevel(removeType, chunk, removeLevel, removeIdentifier);
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public <T> void removeAllTicketsFor(final TicketType<T> ticketType, final int ticketLevel, final T ticketIdentifier) {
|
|
+ if (ticketLevel > MAX_TICKET_LEVEL) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ for (final LongIterator iterator = new LongArrayList(this.tickets.keySet()).longIterator(); iterator.hasNext();) {
|
|
+ final long chunk = iterator.nextLong();
|
|
+
|
|
+ this.removeTicketAtLevel(ticketType, chunk, ticketLevel, ticketIdentifier);
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void tick() {
|
|
+ TickThread.ensureTickThread("Cannot tick ticket manager off-main");
|
|
+
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ final long tick = ++this.currentTick;
|
|
+
|
|
+ final Long2IntOpenHashMap toRemove = this.removeTickToChunkExpireTicketCount.remove(tick);
|
|
+
|
|
+ if (toRemove == null) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final Predicate<Ticket<?>> expireNow = (final Ticket<?> ticket) -> {
|
|
+ return ticket.removalTick == tick;
|
|
+ };
|
|
+
|
|
+ for (final LongIterator iterator = toRemove.keySet().longIterator(); iterator.hasNext();) {
|
|
+ final long chunk = iterator.nextLong();
|
|
+
|
|
+ final SortedArraySet<Ticket<?>> tickets = this.tickets.get(chunk);
|
|
+ tickets.removeIf(expireNow);
|
|
+ if (tickets.isEmpty()) {
|
|
+ this.tickets.remove(chunk);
|
|
+ this.ticketLevelPropagator.removeSource(chunk);
|
|
+ } else {
|
|
+ this.ticketLevelPropagator.setSource(chunk, convertBetweenTicketLevels(tickets.first().getTicketLevel()));
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+
|
|
+ this.processTicketUpdates();
|
|
+ }
|
|
+
|
|
+ public NewChunkHolder getChunkHolder(final int chunkX, final int chunkZ) {
|
|
+ return this.chunkHolders.get(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ }
|
|
+
|
|
+ public NewChunkHolder getChunkHolder(final long position) {
|
|
+ return this.chunkHolders.get(position);
|
|
+ }
|
|
+
|
|
+ public void raisePriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
|
|
+ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z);
|
|
+ if (chunkHolder != null) {
|
|
+ chunkHolder.raisePriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void setPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
|
|
+ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z);
|
|
+ if (chunkHolder != null) {
|
|
+ chunkHolder.setPriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void lowerPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
|
|
+ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z);
|
|
+ if (chunkHolder != null) {
|
|
+ chunkHolder.lowerPriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private NewChunkHolder createChunkHolder(final long position) {
|
|
+ final NewChunkHolder ret = new NewChunkHolder(this.world, CoordinateUtils.getChunkX(position), CoordinateUtils.getChunkZ(position), this.taskScheduler);
|
|
+
|
|
+ ChunkSystem.onChunkHolderCreate(this.world, ret.vanillaChunkHolder);
|
|
+ ret.vanillaChunkHolder.onChunkAdd();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ // because this function creates the chunk holder without a ticket, it is the caller's responsibility to ensure
|
|
+ // the chunk holder eventually unloads. this should only be used to avoid using processTicketUpdates to create chunkholders,
|
|
+ // as processTicketUpdates may call plugin logic; in every other case a ticket is appropriate
|
|
+ private NewChunkHolder getOrCreateChunkHolder(final int chunkX, final int chunkZ) {
|
|
+ return this.getOrCreateChunkHolder(CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ }
|
|
+
|
|
+ private NewChunkHolder getOrCreateChunkHolder(final long position) {
|
|
+ if (!this.ticketLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Must hold ticket level update lock!");
|
|
+ }
|
|
+ if (!this.taskScheduler.schedulingLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Must hold scheduler lock!!");
|
|
+ }
|
|
+
|
|
+ // we could just acquire these locks, but...
|
|
+ // must own the locks because the caller needs to ensure that no unload can occur AFTER this function returns
|
|
+
|
|
+ NewChunkHolder current = this.chunkHolders.get(position);
|
|
+ if (current != null) {
|
|
+ return current;
|
|
+ }
|
|
+
|
|
+ current = this.createChunkHolder(position);
|
|
+ this.chunkHolders.put(position, current);
|
|
+
|
|
+ return current;
|
|
+ }
|
|
+
|
|
+ private long entityLoadCounter;
|
|
+
|
|
+ public ChunkEntitySlices getOrCreateEntityChunk(final int chunkX, final int chunkZ, final boolean transientChunk) {
|
|
+ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot create entity chunk off-main");
|
|
+ ChunkEntitySlices ret;
|
|
+
|
|
+ NewChunkHolder current = this.getChunkHolder(chunkX, chunkZ);
|
|
+ if (current != null && (ret = current.getEntityChunk()) != null && (transientChunk || !ret.isTransient())) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ final AtomicBoolean isCompleted = new AtomicBoolean();
|
|
+ final Thread waiter = Thread.currentThread();
|
|
+ final Long entityLoadId;
|
|
+ NewChunkHolder.GenericDataLoadTaskCallback loadTask = null;
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ entityLoadId = Long.valueOf(this.entityLoadCounter++);
|
|
+ this.addTicketAtLevel(TicketType.ENTITY_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, entityLoadId);
|
|
+ this.taskScheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ current = this.getOrCreateChunkHolder(chunkX, chunkZ);
|
|
+ if ((ret = current.getEntityChunk()) != null && (transientChunk || !ret.isTransient())) {
|
|
+ this.removeTicketAtLevel(TicketType.ENTITY_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, entityLoadId);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (current.isEntityChunkNBTLoaded()) {
|
|
+ isCompleted.setPlain(true);
|
|
+ } else {
|
|
+ loadTask = current.getOrLoadEntityData((final GenericDataLoadTask.TaskResult<CompoundTag, Throwable> result) -> {
|
|
+ if (!transientChunk) {
|
|
+ isCompleted.set(true);
|
|
+ LockSupport.unpark(waiter);
|
|
+ }
|
|
+ });
|
|
+ final ChunkLoadTask.EntityDataLoadTask entityLoad = current.getEntityDataLoadTask();
|
|
+
|
|
+ if (entityLoad != null && !transientChunk) {
|
|
+ entityLoad.raisePriority(PrioritisedExecutor.Priority.BLOCKING);
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.taskScheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+
|
|
+ if (loadTask != null) {
|
|
+ loadTask.schedule();
|
|
+ }
|
|
+
|
|
+ if (!transientChunk) {
|
|
+ // Note: no need to busy wait on the chunk queue, entity load will complete off-main
|
|
+ boolean interrupted = false;
|
|
+ while (!isCompleted.get()) {
|
|
+ interrupted |= Thread.interrupted();
|
|
+ LockSupport.park();
|
|
+ }
|
|
+
|
|
+ if (interrupted) {
|
|
+ Thread.currentThread().interrupt();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // now that the entity data is loaded, we can load it into the world
|
|
+
|
|
+ ret = current.loadInEntityChunk(transientChunk);
|
|
+
|
|
+ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
|
|
+ this.addAndRemoveTickets(chunkKey,
|
|
+ TicketType.UNKNOWN, MAX_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ),
|
|
+ TicketType.ENTITY_LOAD, MAX_TICKET_LEVEL, entityLoadId
|
|
+ );
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public PoiChunk getPoiChunkIfLoaded(final int chunkX, final int chunkZ, final boolean checkLoadInCallback) {
|
|
+ final NewChunkHolder holder = this.getChunkHolder(chunkX, chunkZ);
|
|
+ if (holder != null) {
|
|
+ final PoiChunk ret = holder.getPoiChunk();
|
|
+ return ret == null || (checkLoadInCallback && !ret.isLoaded()) ? null : ret;
|
|
+ }
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ private long poiLoadCounter;
|
|
+
|
|
+ public PoiChunk loadPoiChunk(final int chunkX, final int chunkZ) {
|
|
+ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot create poi chunk off-main");
|
|
+ PoiChunk ret;
|
|
+
|
|
+ NewChunkHolder current = this.getChunkHolder(chunkX, chunkZ);
|
|
+ if (current != null && (ret = current.getPoiChunk()) != null) {
|
|
+ if (!ret.isLoaded()) {
|
|
+ ret.load();
|
|
+ }
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ final AtomicReference<PoiChunk> completed = new AtomicReference<>();
|
|
+ final AtomicBoolean isCompleted = new AtomicBoolean();
|
|
+ final Thread waiter = Thread.currentThread();
|
|
+ final Long poiLoadId;
|
|
+ NewChunkHolder.GenericDataLoadTaskCallback loadTask = null;
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ poiLoadId = Long.valueOf(this.poiLoadCounter++);
|
|
+ this.addTicketAtLevel(TicketType.POI_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, poiLoadId);
|
|
+ this.taskScheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ current = this.getOrCreateChunkHolder(chunkX, chunkZ);
|
|
+ if (current.isPoiChunkLoaded()) {
|
|
+ this.removeTicketAtLevel(TicketType.POI_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, poiLoadId);
|
|
+ return current.getPoiChunk();
|
|
+ }
|
|
+
|
|
+ loadTask = current.getOrLoadPoiData((final GenericDataLoadTask.TaskResult<PoiChunk, Throwable> result) -> {
|
|
+ completed.setPlain(result.left());
|
|
+ isCompleted.set(true);
|
|
+ LockSupport.unpark(waiter);
|
|
+ });
|
|
+ final ChunkLoadTask.PoiDataLoadTask poiLoad = current.getPoiDataLoadTask();
|
|
+
|
|
+ if (poiLoad != null) {
|
|
+ poiLoad.raisePriority(PrioritisedExecutor.Priority.BLOCKING);
|
|
+ }
|
|
+ } finally {
|
|
+ this.taskScheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+
|
|
+ if (loadTask != null) {
|
|
+ loadTask.schedule();
|
|
+ }
|
|
+
|
|
+ // Note: no need to busy wait on the chunk queue, poi load will complete off-main
|
|
+
|
|
+ boolean interrupted = false;
|
|
+ while (!isCompleted.get()) {
|
|
+ interrupted |= Thread.interrupted();
|
|
+ LockSupport.park();
|
|
+ }
|
|
+
|
|
+ if (interrupted) {
|
|
+ Thread.currentThread().interrupt();
|
|
+ }
|
|
+
|
|
+ ret = completed.getPlain();
|
|
+
|
|
+ ret.load();
|
|
+
|
|
+ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
|
|
+ this.addAndRemoveTickets(chunkKey,
|
|
+ TicketType.UNKNOWN, MAX_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ),
|
|
+ TicketType.POI_LOAD, MAX_TICKET_LEVEL, poiLoadId
|
|
+ );
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ void addChangedStatuses(final List<NewChunkHolder> changedFullStatus) {
|
|
+ if (changedFullStatus.isEmpty()) {
|
|
+ return;
|
|
+ }
|
|
+ if (!TickThread.isTickThread()) {
|
|
+ this.taskScheduler.scheduleChunkTask(() -> {
|
|
+ final ArrayDeque<NewChunkHolder> pendingFullLoadUpdate = ChunkHolderManager.this.pendingFullLoadUpdate;
|
|
+ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
|
|
+ pendingFullLoadUpdate.add(changedFullStatus.get(i));
|
|
+ }
|
|
+
|
|
+ ChunkHolderManager.this.processPendingFullUpdate();
|
|
+ }, PrioritisedExecutor.Priority.HIGHEST);
|
|
+ } else {
|
|
+ final ArrayDeque<NewChunkHolder> pendingFullLoadUpdate = this.pendingFullLoadUpdate;
|
|
+ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
|
|
+ pendingFullLoadUpdate.add(changedFullStatus.get(i));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final ReferenceLinkedOpenHashSet<NewChunkHolder> unloadQueue = new ReferenceLinkedOpenHashSet<>();
|
|
+
|
|
+ private void removeChunkHolder(final NewChunkHolder holder) {
|
|
+ holder.killed = true;
|
|
+ holder.vanillaChunkHolder.onChunkRemove();
|
|
+ this.autoSaveQueue.remove(holder);
|
|
+ ChunkSystem.onChunkHolderDelete(this.world, holder.vanillaChunkHolder);
|
|
+ this.chunkHolders.remove(CoordinateUtils.getChunkKey(holder.chunkX, holder.chunkZ));
|
|
+ }
|
|
+
|
|
+ // note: never call while inside the chunk system, this will absolutely break everything
|
|
+ public void processUnloads() {
|
|
+ TickThread.ensureTickThread("Cannot unload chunks off-main");
|
|
+
|
|
+ if (BLOCK_TICKET_UPDATES.get() == Boolean.TRUE) {
|
|
+ throw new IllegalStateException("Cannot unload chunks recursively");
|
|
+ }
|
|
+ if (this.ticketLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Cannot hold ticket update lock while calling processUnloads");
|
|
+ }
|
|
+ if (this.taskScheduler.schedulingLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Cannot hold scheduling lock while calling processUnloads");
|
|
+ }
|
|
+
|
|
+ final List<NewChunkHolder.UnloadState> unloadQueue;
|
|
+ final List<ChunkProgressionTask> scheduleList = new ArrayList<>();
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ this.taskScheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ if (this.unloadQueue.isEmpty()) {
|
|
+ return;
|
|
+ }
|
|
+ // in order to ensure all chunks in the unload queue do not have a pending ticket level update,
|
|
+ // process them now
|
|
+ this.processTicketUpdates(false, false, scheduleList);
|
|
+ unloadQueue = new ArrayList<>((int)(this.unloadQueue.size() * 0.05) + 1);
|
|
+
|
|
+ final int unloadCount = Math.max(50, (int)(this.unloadQueue.size() * 0.05));
|
|
+ for (int i = 0; i < unloadCount && !this.unloadQueue.isEmpty(); ++i) {
|
|
+ final NewChunkHolder chunkHolder = this.unloadQueue.removeFirst();
|
|
+ if (chunkHolder.isSafeToUnload() != null) {
|
|
+ LOGGER.error("Chunkholder " + chunkHolder + " is not safe to unload but is inside the unload queue?");
|
|
+ continue;
|
|
+ }
|
|
+ final NewChunkHolder.UnloadState state = chunkHolder.unloadStage1();
|
|
+ if (state == null) {
|
|
+ // can unload immediately
|
|
+ this.removeChunkHolder(chunkHolder);
|
|
+ continue;
|
|
+ }
|
|
+ unloadQueue.add(state);
|
|
+ }
|
|
+ } finally {
|
|
+ this.taskScheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ // schedule tasks, we can't let processTicketUpdates do this because we call it holding the schedule lock
|
|
+ for (int i = 0, len = scheduleList.size(); i < len; ++i) {
|
|
+ scheduleList.get(i).schedule();
|
|
+ }
|
|
+
|
|
+ final List<NewChunkHolder> toRemove = new ArrayList<>(unloadQueue.size());
|
|
+
|
|
+ final Boolean before = this.blockTicketUpdates();
|
|
+ try {
|
|
+ for (int i = 0, len = unloadQueue.size(); i < len; ++i) {
|
|
+ final NewChunkHolder.UnloadState state = unloadQueue.get(i);
|
|
+ final NewChunkHolder holder = state.holder();
|
|
+
|
|
+ holder.unloadStage2(state);
|
|
+ toRemove.add(holder);
|
|
+ }
|
|
+ } finally {
|
|
+ this.unblockTicketUpdates(before);
|
|
+ }
|
|
+
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ this.taskScheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ for (int i = 0, len = toRemove.size(); i < len; ++i) {
|
|
+ final NewChunkHolder holder = toRemove.get(i);
|
|
+
|
|
+ if (holder.unloadStage3()) {
|
|
+ this.removeChunkHolder(holder);
|
|
+ } else {
|
|
+ // add cooldown so the next unload check is not immediately next tick
|
|
+ this.addTicketAtLevel(TicketType.UNLOAD_COOLDOWN, holder.chunkX, holder.chunkZ, MAX_TICKET_LEVEL, Unit.INSTANCE);
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.taskScheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private final ThreadLocal<Boolean> BLOCK_TICKET_UPDATES = ThreadLocal.withInitial(() -> {
|
|
+ return Boolean.FALSE;
|
|
+ });
|
|
+
|
|
+ public Boolean blockTicketUpdates() {
|
|
+ final Boolean ret = BLOCK_TICKET_UPDATES.get();
|
|
+ BLOCK_TICKET_UPDATES.set(Boolean.TRUE);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public void unblockTicketUpdates(final Boolean before) {
|
|
+ BLOCK_TICKET_UPDATES.set(before);
|
|
+ }
|
|
+
|
|
+ public boolean processTicketUpdates() {
|
|
+ return this.processTicketUpdates(true, true, null);
|
|
+ }
|
|
+
|
|
+ private static final ThreadLocal<List<ChunkProgressionTask>> CURRENT_TICKET_UPDATE_SCHEDULING = new ThreadLocal<>();
|
|
+
|
|
+ static List<ChunkProgressionTask> getCurrentTicketUpdateScheduling() {
|
|
+ return CURRENT_TICKET_UPDATE_SCHEDULING.get();
|
|
+ }
|
|
+
|
|
+ private boolean processTicketUpdates(final boolean checkLocks, final boolean processFullUpdates, List<ChunkProgressionTask> scheduledTasks) {
|
|
+ TickThread.ensureTickThread("Cannot process ticket levels off-main");
|
|
+ if (BLOCK_TICKET_UPDATES.get() == Boolean.TRUE) {
|
|
+ throw new IllegalStateException("Cannot update ticket level while unloading chunks or updating entity manager");
|
|
+ }
|
|
+ if (checkLocks && this.ticketLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Illegal recursive processTicketUpdates!");
|
|
+ }
|
|
+ if (checkLocks && this.taskScheduler.schedulingLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Cannot update ticket levels from a scheduler context!");
|
|
+ }
|
|
+
|
|
+ List<NewChunkHolder> changedFullStatus = null;
|
|
+
|
|
+ final boolean isTickThread = TickThread.isTickThread();
|
|
+
|
|
+ boolean ret = false;
|
|
+ final boolean canProcessFullUpdates = processFullUpdates & isTickThread;
|
|
+ final boolean canProcessScheduling = scheduledTasks == null;
|
|
+
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ final boolean levelsUpdated = this.ticketLevelPropagator.propagateUpdates();
|
|
+ if (levelsUpdated) {
|
|
+ // Unlike CB, ticket level updates cannot happen recursively. Thank god.
|
|
+ if (!this.ticketLevelUpdates.isEmpty()) {
|
|
+ ret = true;
|
|
+
|
|
+ // first the necessary chunkholders must be created, so just update the ticket levels
|
|
+ for (final Iterator<Long2IntMap.Entry> iterator = this.ticketLevelUpdates.long2IntEntrySet().fastIterator(); iterator.hasNext();) {
|
|
+ final Long2IntMap.Entry entry = iterator.next();
|
|
+ final long key = entry.getLongKey();
|
|
+ final int newLevel = entry.getIntValue();
|
|
+
|
|
+ NewChunkHolder current = this.chunkHolders.get(key);
|
|
+ if (current == null && newLevel > MAX_TICKET_LEVEL) {
|
|
+ // not loaded and it shouldn't be loaded!
|
|
+ iterator.remove();
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final int currentLevel = current == null ? MAX_TICKET_LEVEL + 1 : current.getCurrentTicketLevel();
|
|
+ if (currentLevel == newLevel) {
|
|
+ // nothing to do
|
|
+ iterator.remove();
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (current == null) {
|
|
+ // must create
|
|
+ current = this.createChunkHolder(key);
|
|
+ this.chunkHolders.put(key, current);
|
|
+ current.updateTicketLevel(newLevel);
|
|
+ } else {
|
|
+ current.updateTicketLevel(newLevel);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (scheduledTasks == null) {
|
|
+ scheduledTasks = new ArrayList<>();
|
|
+ }
|
|
+ changedFullStatus = new ArrayList<>();
|
|
+
|
|
+ // allow the chunkholders to process ticket level updates without needing to acquire the schedule lock every time
|
|
+ final List<ChunkProgressionTask> prev = CURRENT_TICKET_UPDATE_SCHEDULING.get();
|
|
+ CURRENT_TICKET_UPDATE_SCHEDULING.set(scheduledTasks);
|
|
+ try {
|
|
+ this.taskScheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ for (final Iterator<Long2IntMap.Entry> iterator = this.ticketLevelUpdates.long2IntEntrySet().fastIterator(); iterator.hasNext();) {
|
|
+ final Long2IntMap.Entry entry = iterator.next();
|
|
+ final long key = entry.getLongKey();
|
|
+ final NewChunkHolder current = this.chunkHolders.get(key);
|
|
+
|
|
+ if (current == null) {
|
|
+ throw new IllegalStateException("Expected chunk holder to be created");
|
|
+ }
|
|
+
|
|
+ current.processTicketLevelUpdate(scheduledTasks, changedFullStatus);
|
|
+ }
|
|
+ } finally {
|
|
+ this.taskScheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ } finally {
|
|
+ CURRENT_TICKET_UPDATE_SCHEDULING.set(prev);
|
|
+ }
|
|
+
|
|
+ this.ticketLevelUpdates.clear();
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+
|
|
+ if (changedFullStatus != null) {
|
|
+ this.addChangedStatuses(changedFullStatus);
|
|
+ }
|
|
+
|
|
+ if (canProcessScheduling && scheduledTasks != null) {
|
|
+ for (int i = 0, len = scheduledTasks.size(); i < len; ++i) {
|
|
+ scheduledTasks.get(i).schedule();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (canProcessFullUpdates) {
|
|
+ ret |= this.processPendingFullUpdate();
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ // only call on tick thread
|
|
+ protected final boolean processPendingFullUpdate() {
|
|
+ final ArrayDeque<NewChunkHolder> pendingFullLoadUpdate = this.pendingFullLoadUpdate;
|
|
+
|
|
+ boolean ret = false;
|
|
+
|
|
+ List<NewChunkHolder> changedFullStatus = new ArrayList<>();
|
|
+
|
|
+ NewChunkHolder holder;
|
|
+ while ((holder = pendingFullLoadUpdate.poll()) != null) {
|
|
+ ret |= holder.handleFullStatusChange(changedFullStatus);
|
|
+
|
|
+ if (!changedFullStatus.isEmpty()) {
|
|
+ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) {
|
|
+ pendingFullLoadUpdate.add(changedFullStatus.get(i));
|
|
+ }
|
|
+ changedFullStatus.clear();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public JsonObject getDebugJsonForWatchdog() {
|
|
+ // try and detect any potential deadlock that would require us to read unlocked
|
|
+ try {
|
|
+ if (this.ticketLock.tryLock(10, TimeUnit.SECONDS)) {
|
|
+ try {
|
|
+ if (this.taskScheduler.schedulingLock.tryLock(10, TimeUnit.SECONDS)) {
|
|
+ try {
|
|
+ return this.getDebugJsonNoLock();
|
|
+ } finally {
|
|
+ this.taskScheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ }
|
|
+ } catch (final InterruptedException ignore) {}
|
|
+
|
|
+ LOGGER.error("Failed to acquire ticket and scheduling lock before timeout for world " + this.world.getWorld().getName());
|
|
+
|
|
+ // because we read without locks, it may throw exceptions for fastutil maps
|
|
+ // so just try until it works...
|
|
+ Throwable lastException = null;
|
|
+ for (int count = 0;count < 1000;++count) {
|
|
+ try {
|
|
+ return this.getDebugJsonNoLock();
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr) {
|
|
+ lastException = thr;
|
|
+ Thread.yield();
|
|
+ LockSupport.parkNanos(10_000L);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // failed, return
|
|
+ LOGGER.error("Failed to retrieve debug json for watchdog thread without locking", lastException);
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ private JsonObject getDebugJsonNoLock() {
|
|
+ final JsonObject ret = new JsonObject();
|
|
+ ret.addProperty("current_tick", Long.valueOf(this.currentTick));
|
|
+
|
|
+ final JsonArray unloadQueue = new JsonArray();
|
|
+ ret.add("unload_queue", unloadQueue);
|
|
+ for (final NewChunkHolder holder : this.unloadQueue) {
|
|
+ final JsonObject coordinate = new JsonObject();
|
|
+ unloadQueue.add(coordinate);
|
|
+
|
|
+ coordinate.addProperty("chunkX", Integer.valueOf(holder.chunkX));
|
|
+ coordinate.addProperty("chunkZ", Integer.valueOf(holder.chunkZ));
|
|
+ }
|
|
+
|
|
+ final JsonArray holders = new JsonArray();
|
|
+ ret.add("chunkholders", holders);
|
|
+
|
|
+ for (final NewChunkHolder holder : this.getChunkHolders()) {
|
|
+ holders.add(holder.getDebugJson());
|
|
+ }
|
|
+
|
|
+ final JsonArray removeTickToChunkExpireTicketCount = new JsonArray();
|
|
+ ret.add("remove_tick_to_chunk_expire_ticket_count", removeTickToChunkExpireTicketCount);
|
|
+
|
|
+ for (final Long2ObjectMap.Entry<Long2IntOpenHashMap> tickEntry : this.removeTickToChunkExpireTicketCount.long2ObjectEntrySet()) {
|
|
+ final long tick = tickEntry.getLongKey();
|
|
+ final Long2IntOpenHashMap coordinateToCount = tickEntry.getValue();
|
|
+
|
|
+ final JsonObject tickJson = new JsonObject();
|
|
+ removeTickToChunkExpireTicketCount.add(tickJson);
|
|
+
|
|
+ tickJson.addProperty("tick", Long.valueOf(tick));
|
|
+
|
|
+ final JsonArray tickEntries = new JsonArray();
|
|
+ tickJson.add("entries", tickEntries);
|
|
+
|
|
+ for (final Long2IntMap.Entry entry : coordinateToCount.long2IntEntrySet()) {
|
|
+ final long coordinate = entry.getLongKey();
|
|
+ final int count = entry.getIntValue();
|
|
+
|
|
+ final JsonObject entryJson = new JsonObject();
|
|
+ tickEntries.add(entryJson);
|
|
+
|
|
+ entryJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
|
|
+ entryJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
|
|
+ entryJson.addProperty("count", Integer.valueOf(count));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final JsonArray allTicketsJson = new JsonArray();
|
|
+ ret.add("tickets", allTicketsJson);
|
|
+
|
|
+ for (final Long2ObjectMap.Entry<SortedArraySet<Ticket<?>>> coordinateTickets : this.tickets.long2ObjectEntrySet()) {
|
|
+ final long coordinate = coordinateTickets.getLongKey();
|
|
+ final SortedArraySet<Ticket<?>> tickets = coordinateTickets.getValue();
|
|
+
|
|
+ final JsonObject coordinateJson = new JsonObject();
|
|
+ allTicketsJson.add(coordinateJson);
|
|
+
|
|
+ coordinateJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate)));
|
|
+ coordinateJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate)));
|
|
+
|
|
+ final JsonArray ticketsSerialized = new JsonArray();
|
|
+ coordinateJson.add("tickets", ticketsSerialized);
|
|
+
|
|
+ for (final Ticket<?> ticket : tickets) {
|
|
+ final JsonObject ticketSerialized = new JsonObject();
|
|
+ ticketsSerialized.add(ticketSerialized);
|
|
+
|
|
+ ticketSerialized.addProperty("type", ticket.getType().toString());
|
|
+ ticketSerialized.addProperty("level", Integer.valueOf(ticket.getTicketLevel()));
|
|
+ ticketSerialized.addProperty("identifier", Objects.toString(ticket.key));
|
|
+ ticketSerialized.addProperty("remove_tick", Long.valueOf(ticket.removalTick));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public JsonObject getDebugJson() {
|
|
+ final List<ChunkProgressionTask> scheduleList = new ArrayList<>();
|
|
+ try {
|
|
+ final JsonObject ret;
|
|
+ this.ticketLock.lock();
|
|
+ try {
|
|
+ this.taskScheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ this.processTicketUpdates(false, false, scheduleList);
|
|
+ ret = this.getDebugJsonNoLock();
|
|
+ } finally {
|
|
+ this.taskScheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ } finally {
|
|
+ this.ticketLock.unlock();
|
|
+ }
|
|
+ return ret;
|
|
+ } finally {
|
|
+ // schedule tasks, we can't let processTicketUpdates do this because we call it holding the schedule lock
|
|
+ for (int i = 0, len = scheduleList.size(); i < len; ++i) {
|
|
+ scheduleList.get(i).schedule();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..53ddd7e9ac05e6a9eb809f329796e6d4f6bb2ab1
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java
|
|
@@ -0,0 +1,181 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.starlight.common.light.StarLightEngine;
|
|
+import ca.spottedleaf.starlight.common.light.StarLightInterface;
|
|
+import io.papermc.paper.chunk.system.light.LightQueue;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+import net.minecraft.world.level.chunk.ProtoChunk;
|
|
+import org.apache.logging.log4j.LogManager;
|
|
+import org.apache.logging.log4j.Logger;
|
|
+import java.util.function.BooleanSupplier;
|
|
+
|
|
+public final class ChunkLightTask extends ChunkProgressionTask {
|
|
+
|
|
+ private static final Logger LOGGER = LogManager.getLogger();
|
|
+
|
|
+ protected final ChunkAccess fromChunk;
|
|
+
|
|
+ private final LightTaskPriorityHolder priorityHolder;
|
|
+
|
|
+ public ChunkLightTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final ChunkAccess chunk, final PrioritisedExecutor.Priority priority) {
|
|
+ super(scheduler, world, chunkX, chunkZ);
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.priorityHolder = new LightTaskPriorityHolder(priority, this);
|
|
+ this.fromChunk = chunk;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean isScheduled() {
|
|
+ return this.priorityHolder.isScheduled();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public ChunkStatus getTargetStatus() {
|
|
+ return ChunkStatus.LIGHT;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void schedule() {
|
|
+ this.priorityHolder.schedule();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void cancel() {
|
|
+ this.priorityHolder.cancel();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedExecutor.Priority getPriority() {
|
|
+ return this.priorityHolder.getPriority();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ this.priorityHolder.raisePriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ this.priorityHolder.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
+ this.priorityHolder.raisePriority(priority);
|
|
+ }
|
|
+
|
|
+ private static final class LightTaskPriorityHolder extends PriorityHolder {
|
|
+
|
|
+ protected final ChunkLightTask task;
|
|
+
|
|
+ protected LightTaskPriorityHolder(final PrioritisedExecutor.Priority priority, final ChunkLightTask task) {
|
|
+ super(priority);
|
|
+ this.task = task;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void cancelScheduled() {
|
|
+ final ChunkLightTask task = this.task;
|
|
+ task.complete(null, null);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected PrioritisedExecutor.Priority getScheduledPriority() {
|
|
+ final ChunkLightTask task = this.task;
|
|
+ return task.world.getChunkSource().getLightEngine().theLightEngine.lightQueue.getPriority(task.chunkX, task.chunkZ);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void scheduleTask(final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkLightTask task = this.task;
|
|
+ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
|
|
+ final LightQueue lightQueue = starLightInterface.lightQueue;
|
|
+ lightQueue.queueChunkLightTask(new ChunkPos(task.chunkX, task.chunkZ), new LightTask(starLightInterface, task), priority);
|
|
+ lightQueue.setPriority(task.chunkX, task.chunkZ, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkLightTask task = this.task;
|
|
+ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
|
|
+ final LightQueue lightQueue = starLightInterface.lightQueue;
|
|
+ lightQueue.lowerPriority(task.chunkX, task.chunkZ, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void setPriorityScheduled(final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkLightTask task = this.task;
|
|
+ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
|
|
+ final LightQueue lightQueue = starLightInterface.lightQueue;
|
|
+ lightQueue.setPriority(task.chunkX, task.chunkZ, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void raisePriorityScheduled(final PrioritisedExecutor.Priority priority) {
|
|
+ final ChunkLightTask task = this.task;
|
|
+ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine;
|
|
+ final LightQueue lightQueue = starLightInterface.lightQueue;
|
|
+ lightQueue.raisePriority(task.chunkX, task.chunkZ, priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private static final class LightTask implements BooleanSupplier {
|
|
+
|
|
+ protected final StarLightInterface lightEngine;
|
|
+ protected final ChunkLightTask task;
|
|
+
|
|
+ public LightTask(final StarLightInterface lightEngine, final ChunkLightTask task) {
|
|
+ this.lightEngine = lightEngine;
|
|
+ this.task = task;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public boolean getAsBoolean() {
|
|
+ final ChunkLightTask task = this.task;
|
|
+ // executed on light thread
|
|
+ if (!task.priorityHolder.markExecuting()) {
|
|
+ // cancelled
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ final Boolean[] emptySections = StarLightEngine.getEmptySectionsForChunk(task.fromChunk);
|
|
+
|
|
+ if (task.fromChunk.isLightCorrect() && task.fromChunk.getStatus().isOrAfter(ChunkStatus.LIGHT)) {
|
|
+ this.lightEngine.forceLoadInChunk(task.fromChunk, emptySections);
|
|
+ this.lightEngine.checkChunkEdges(task.chunkX, task.chunkZ);
|
|
+ } else {
|
|
+ task.fromChunk.setLightCorrect(false);
|
|
+ this.lightEngine.lightChunk(task.fromChunk, emptySections);
|
|
+ task.fromChunk.setLightCorrect(true);
|
|
+ }
|
|
+ // we need to advance status
|
|
+ if (task.fromChunk instanceof ProtoChunk chunk && chunk.getStatus() == ChunkStatus.LIGHT.getParent()) {
|
|
+ chunk.setStatus(ChunkStatus.LIGHT);
|
|
+ }
|
|
+ } catch (final Throwable thr) {
|
|
+ if (!(thr instanceof ThreadDeath)) {
|
|
+ LOGGER.fatal("Failed to light chunk " + task.fromChunk.getPos().toString() + " in world '" + this.lightEngine.getWorld().getWorld().getName() + "'", thr);
|
|
+ }
|
|
+
|
|
+ task.complete(null, thr);
|
|
+
|
|
+ if (thr instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)thr;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ task.complete(task.fromChunk, null);
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..3df793f7e6bb67f40e7387a72fdafb912a7b1373
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
|
|
@@ -0,0 +1,499 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import ca.spottedleaf.dataconverter.minecraft.MCDataConverter;
|
|
+import ca.spottedleaf.dataconverter.minecraft.datatypes.MCTypeRegistry;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import io.papermc.paper.chunk.system.io.RegionFileIOThread;
|
|
+import io.papermc.paper.chunk.system.poi.PoiChunk;
|
|
+import net.minecraft.SharedConstants;
|
|
+import net.minecraft.core.registries.Registries;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.server.level.ChunkMap;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+import net.minecraft.world.level.chunk.ProtoChunk;
|
|
+import net.minecraft.world.level.chunk.UpgradeData;
|
|
+import net.minecraft.world.level.chunk.storage.ChunkSerializer;
|
|
+import net.minecraft.world.level.chunk.storage.EntityStorage;
|
|
+import net.minecraft.world.level.levelgen.blending.BlendingData;
|
|
+import org.slf4j.Logger;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.Map;
|
|
+import java.util.concurrent.atomic.AtomicInteger;
|
|
+import java.util.function.Consumer;
|
|
+
|
|
+public final class ChunkLoadTask extends ChunkProgressionTask {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ private final NewChunkHolder chunkHolder;
|
|
+ private final ChunkDataLoadTask loadTask;
|
|
+
|
|
+ private boolean cancelled;
|
|
+ private NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
|
|
+ private NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
|
|
+
|
|
+ protected ChunkLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final NewChunkHolder chunkHolder, final PrioritisedExecutor.Priority priority) {
|
|
+ super(scheduler, world, chunkX, chunkZ);
|
|
+ this.chunkHolder = chunkHolder;
|
|
+ this.loadTask = new ChunkDataLoadTask(scheduler, world, chunkX, chunkZ, priority);
|
|
+ this.loadTask.addCallback((final GenericDataLoadTask.TaskResult<ChunkAccess, Throwable> result) -> {
|
|
+ ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right());
|
|
+ });
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public ChunkStatus getTargetStatus() {
|
|
+ return ChunkStatus.EMPTY;
|
|
+ }
|
|
+
|
|
+ private boolean scheduled;
|
|
+
|
|
+ @Override
|
|
+ public boolean isScheduled() {
|
|
+ return this.scheduled;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void schedule() {
|
|
+ final NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
|
|
+ final NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
|
|
+
|
|
+ final AtomicInteger count = new AtomicInteger();
|
|
+ final Consumer<GenericDataLoadTask.TaskResult<?, ?>> scheduleLoadTask = (final GenericDataLoadTask.TaskResult<?, ?> result) -> {
|
|
+ if (count.decrementAndGet() == 0) {
|
|
+ ChunkLoadTask.this.loadTask.schedule(false);
|
|
+ }
|
|
+ };
|
|
+
|
|
+ // NOTE: it is IMPOSSIBLE for getOrLoadEntityData/getOrLoadPoiData to complete synchronously, because
|
|
+ // they must schedule a task to off main or to on main to complete
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ if (this.scheduled) {
|
|
+ throw new IllegalStateException("schedule() called twice");
|
|
+ }
|
|
+ this.scheduled = true;
|
|
+ if (this.cancelled) {
|
|
+ return;
|
|
+ }
|
|
+ if (!this.chunkHolder.isEntityChunkNBTLoaded()) {
|
|
+ entityLoadTask = this.chunkHolder.getOrLoadEntityData((Consumer)scheduleLoadTask);
|
|
+ count.setPlain(count.getPlain() + 1);
|
|
+ } else {
|
|
+ entityLoadTask = null;
|
|
+ }
|
|
+
|
|
+ if (!this.chunkHolder.isPoiChunkLoaded()) {
|
|
+ poiLoadTask = this.chunkHolder.getOrLoadPoiData((Consumer)scheduleLoadTask);
|
|
+ count.setPlain(count.getPlain() + 1);
|
|
+ } else {
|
|
+ poiLoadTask = null;
|
|
+ }
|
|
+
|
|
+ this.entityLoadTask = entityLoadTask;
|
|
+ this.poiLoadTask = poiLoadTask;
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+
|
|
+ if (entityLoadTask != null) {
|
|
+ entityLoadTask.schedule();
|
|
+ }
|
|
+
|
|
+ if (poiLoadTask != null) {
|
|
+ poiLoadTask.schedule();
|
|
+ }
|
|
+
|
|
+ if (entityLoadTask == null && poiLoadTask == null) {
|
|
+ // no need to wait on those, we can schedule now
|
|
+ this.loadTask.schedule(false);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void cancel() {
|
|
+ // must be before load task access, so we can synchronise with the writes to the fields
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ this.cancelled = true;
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ Note: The entityLoadTask/poiLoadTask do not complete when cancelled,
|
|
+ but this is fine because if they are successfully cancelled then
|
|
+ we will successfully cancel the load task, which will complete when cancelled
|
|
+ */
|
|
+
|
|
+ if (this.entityLoadTask != null) {
|
|
+ this.entityLoadTask.cancel();
|
|
+ }
|
|
+ if (this.poiLoadTask != null) {
|
|
+ this.poiLoadTask.cancel();
|
|
+ }
|
|
+ this.loadTask.cancel();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedExecutor.Priority getPriority() {
|
|
+ return this.loadTask.getPriority();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask();
|
|
+ if (entityLoad != null) {
|
|
+ entityLoad.lowerPriority(priority);
|
|
+ }
|
|
+
|
|
+ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask();
|
|
+
|
|
+ if (poiLoad != null) {
|
|
+ poiLoad.lowerPriority(priority);
|
|
+ }
|
|
+
|
|
+ this.loadTask.lowerPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask();
|
|
+ if (entityLoad != null) {
|
|
+ entityLoad.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask();
|
|
+
|
|
+ if (poiLoad != null) {
|
|
+ poiLoad.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ this.loadTask.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
+ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask();
|
|
+ if (entityLoad != null) {
|
|
+ entityLoad.raisePriority(priority);
|
|
+ }
|
|
+
|
|
+ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask();
|
|
+
|
|
+ if (poiLoad != null) {
|
|
+ poiLoad.raisePriority(priority);
|
|
+ }
|
|
+
|
|
+ this.loadTask.raisePriority(priority);
|
|
+ }
|
|
+
|
|
+ protected static abstract class CallbackDataLoadTask<OnMain,FinalCompletion> extends GenericDataLoadTask<OnMain,FinalCompletion> {
|
|
+
|
|
+ private TaskResult<FinalCompletion, Throwable> result;
|
|
+ private final MultiThreadedQueue<Consumer<TaskResult<FinalCompletion, Throwable>>> waiters = new MultiThreadedQueue<>();
|
|
+
|
|
+ protected volatile boolean completed;
|
|
+ protected static final VarHandle COMPLETED_HANDLE = ConcurrentUtil.getVarHandle(CallbackDataLoadTask.class, "completed", boolean.class);
|
|
+
|
|
+ protected CallbackDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
|
|
+ final int chunkZ, final RegionFileIOThread.RegionFileType type,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ super(scheduler, world, chunkX, chunkZ, type, priority);
|
|
+ }
|
|
+
|
|
+ public void addCallback(final Consumer<TaskResult<FinalCompletion, Throwable>> consumer) {
|
|
+ if (!this.waiters.add(consumer)) {
|
|
+ try {
|
|
+ consumer.accept(this.result);
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
|
|
+ "Consumer", ChunkTaskScheduler.stringIfNull(consumer),
|
|
+ "Completed throwable", ChunkTaskScheduler.stringIfNull(this.result.right())
|
|
+ ), throwable);
|
|
+ if (throwable instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)throwable;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void onComplete(final TaskResult<FinalCompletion, Throwable> result) {
|
|
+ if ((boolean)COMPLETED_HANDLE.getAndSet((CallbackDataLoadTask)this, (boolean)true)) {
|
|
+ throw new IllegalStateException("Already completed");
|
|
+ }
|
|
+ this.result = result;
|
|
+ Consumer<TaskResult<FinalCompletion, Throwable>> consumer;
|
|
+ while ((consumer = this.waiters.pollOrBlockAdds()) != null) {
|
|
+ try {
|
|
+ consumer.accept(result);
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
|
|
+ "Consumer", ChunkTaskScheduler.stringIfNull(consumer),
|
|
+ "Completed throwable", ChunkTaskScheduler.stringIfNull(result.right())
|
|
+ ), throwable);
|
|
+ if (throwable instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)throwable;
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public final class ChunkDataLoadTask extends CallbackDataLoadTask<ChunkSerializer.InProgressChunkHolder, ChunkAccess> {
|
|
+ protected ChunkDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
|
|
+ final int chunkZ, final PrioritisedExecutor.Priority priority) {
|
|
+ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.CHUNK_DATA, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected boolean hasOffMain() {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected boolean hasOnMain() {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
+ return this.scheduler.loadExecutor.createTask(run, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
+ return this.scheduler.createChunkTask(this.chunkX, this.chunkZ, run, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected TaskResult<ChunkAccess, Throwable> completeOnMainOffMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
|
|
+ if (data != null) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
|
|
+ if (poiChunk == null) {
|
|
+ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
|
|
+ } else if (!poiChunk.isLoaded()) {
|
|
+ // need to call poiChunk.load() on main
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ return new TaskResult<>(this.getEmptyChunk(), null);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected TaskResult<ChunkSerializer.InProgressChunkHolder, Throwable> runOffMain(final CompoundTag data, final Throwable throwable) {
|
|
+ if (throwable != null) {
|
|
+ LOGGER.error("Failed to load chunk data for task: " + this.toString() + ", chunk data will be lost", throwable);
|
|
+ return new TaskResult<>(null, null);
|
|
+ }
|
|
+
|
|
+ if (data == null) {
|
|
+ return new TaskResult<>(null, null);
|
|
+ }
|
|
+
|
|
+ // need to convert data, and then deserialize it
|
|
+
|
|
+ try {
|
|
+ final ChunkPos chunkPos = new ChunkPos(this.chunkX, this.chunkZ);
|
|
+ final ChunkMap chunkMap = this.world.getChunkSource().chunkMap;
|
|
+ // run converters
|
|
+ // note: upgradeChunkTag copies the data already
|
|
+ final CompoundTag converted = chunkMap.upgradeChunkTag(
|
|
+ this.world.getTypeKey(), chunkMap.overworldDataStorage, data, chunkMap.generator.getTypeNameForDataFixer(),
|
|
+ chunkPos, this.world
|
|
+ );
|
|
+ // deserialize
|
|
+ final ChunkSerializer.InProgressChunkHolder chunkHolder = ChunkSerializer.loadChunk(
|
|
+ this.world, chunkMap.getPoiManager(), chunkPos, converted, true
|
|
+ );
|
|
+
|
|
+ return new TaskResult<>(chunkHolder, null);
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr2) {
|
|
+ LOGGER.error("Failed to parse chunk data for task: " + this.toString() + ", chunk data will be lost", thr2);
|
|
+ return new TaskResult<>(null, thr2);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private ProtoChunk getEmptyChunk() {
|
|
+ return new ProtoChunk(
|
|
+ new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world,
|
|
+ this.world.registryAccess().registryOrThrow(Registries.BIOME), (BlendingData)null
|
|
+ );
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected TaskResult<ChunkAccess, Throwable> runOnMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
|
|
+ final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
|
|
+ if (poiChunk == null) {
|
|
+ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
|
|
+ } else {
|
|
+ poiChunk.load();
|
|
+ }
|
|
+
|
|
+ if (data == null || data.protoChunk == null) {
|
|
+ // throwable could be non-null, but the off-main task will print its exceptions - so we don't need to care,
|
|
+ // it's handled already
|
|
+
|
|
+ return new TaskResult<>(this.getEmptyChunk(), null);
|
|
+ }
|
|
+
|
|
+ // have tasks to run (at this point, it's just the POI consistency checking)
|
|
+ try {
|
|
+ if (data.tasks != null) {
|
|
+ for (int i = 0, len = data.tasks.size(); i < len; ++i) {
|
|
+ data.tasks.poll().run();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return new TaskResult<>(data.protoChunk, null);
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr2) {
|
|
+ LOGGER.error("Failed to parse main tasks for task " + this.toString() + ", chunk data will be lost", thr2);
|
|
+ return new TaskResult<>(this.getEmptyChunk(), null);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class PoiDataLoadTask extends CallbackDataLoadTask<PoiChunk, PoiChunk> {
|
|
+ public PoiDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
|
|
+ final int chunkZ, final PrioritisedExecutor.Priority priority) {
|
|
+ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.POI_DATA, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected boolean hasOffMain() {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected boolean hasOnMain() {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
+ return this.scheduler.loadExecutor.createTask(run, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected TaskResult<PoiChunk, Throwable> completeOnMainOffMain(final PoiChunk data, final Throwable throwable) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected TaskResult<PoiChunk, Throwable> runOffMain(CompoundTag data, final Throwable throwable) {
|
|
+ if (throwable != null) {
|
|
+ LOGGER.error("Failed to load poi data for task: " + this.toString() + ", poi data will be lost", throwable);
|
|
+ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null);
|
|
+ }
|
|
+
|
|
+ if (data == null || data.isEmpty()) {
|
|
+ // nothing to do
|
|
+ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null);
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ data = data.copy(); // coming from the I/O thread, so we need to copy
|
|
+ // run converters
|
|
+ final int dataVersion = !data.contains(SharedConstants.DATA_VERSION_TAG, 99) ? 1945 : data.getInt(SharedConstants.DATA_VERSION_TAG);
|
|
+ final CompoundTag converted = MCDataConverter.convertTag(
|
|
+ MCTypeRegistry.POI_CHUNK, data, dataVersion, SharedConstants.getCurrentVersion().getWorldVersion()
|
|
+ );
|
|
+
|
|
+ // now we need to parse it
|
|
+ return new TaskResult<>(PoiChunk.parse(this.world, this.chunkX, this.chunkZ, converted), null);
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr2) {
|
|
+ LOGGER.error("Failed to run parse poi data for task: " + this.toString() + ", poi data will be lost", thr2);
|
|
+ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected TaskResult<PoiChunk, Throwable> runOnMain(final PoiChunk data, final Throwable throwable) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class EntityDataLoadTask extends CallbackDataLoadTask<CompoundTag, CompoundTag> {
|
|
+
|
|
+ public EntityDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
|
|
+ final int chunkZ, final PrioritisedExecutor.Priority priority) {
|
|
+ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.ENTITY_DATA, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected boolean hasOffMain() {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected boolean hasOnMain() {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
+ return this.scheduler.loadExecutor.createTask(run, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected TaskResult<CompoundTag, Throwable> completeOnMainOffMain(final CompoundTag data, final Throwable throwable) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected TaskResult<CompoundTag, Throwable> runOffMain(final CompoundTag data, final Throwable throwable) {
|
|
+ if (throwable != null) {
|
|
+ LOGGER.error("Failed to load entity data for task: " + this.toString() + ", entity data will be lost", throwable);
|
|
+ return new TaskResult<>(null, null);
|
|
+ }
|
|
+
|
|
+ if (data == null || data.isEmpty()) {
|
|
+ // nothing to do
|
|
+ return new TaskResult<>(null, null);
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ // note: data comes from the I/O thread, so we need to copy it
|
|
+ return new TaskResult<>(EntityStorage.upgradeChunkTag(data.copy()), null);
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr2) {
|
|
+ LOGGER.error("Failed to run converters for entity data for task: " + this.toString() + ", entity data will be lost", thr2);
|
|
+ return new TaskResult<>(null, thr2);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected TaskResult<CompoundTag, Throwable> runOnMain(final CompoundTag data, final Throwable throwable) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..322675a470eacbf0e5452f4009c643f2d0b4ce24
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java
|
|
@@ -0,0 +1,105 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.Map;
|
|
+import java.util.function.BiConsumer;
|
|
+
|
|
+public abstract class ChunkProgressionTask {
|
|
+
|
|
+ private final MultiThreadedQueue<BiConsumer<ChunkAccess, Throwable>> waiters = new MultiThreadedQueue<>();
|
|
+ private ChunkAccess completedChunk;
|
|
+ private Throwable completedThrowable;
|
|
+
|
|
+ protected final ChunkTaskScheduler scheduler;
|
|
+ protected final ServerLevel world;
|
|
+ protected final int chunkX;
|
|
+ protected final int chunkZ;
|
|
+
|
|
+ protected volatile boolean completed;
|
|
+ protected static final VarHandle COMPLETED_HANDLE = ConcurrentUtil.getVarHandle(ChunkProgressionTask.class, "completed", boolean.class);
|
|
+
|
|
+ protected ChunkProgressionTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ) {
|
|
+ this.scheduler = scheduler;
|
|
+ this.world = world;
|
|
+ this.chunkX = chunkX;
|
|
+ this.chunkZ = chunkZ;
|
|
+ }
|
|
+
|
|
+ // Used only for debug json
|
|
+ public abstract boolean isScheduled();
|
|
+
|
|
+ // Note: It is the responsibility of the task to set the chunk's status once it has completed
|
|
+ public abstract ChunkStatus getTargetStatus();
|
|
+
|
|
+ /* Only executed once */
|
|
+ /* Implementations must be prepared to handle cases where cancel() is called before schedule() */
|
|
+ public abstract void schedule();
|
|
+
|
|
+ /* May be called multiple times */
|
|
+ public abstract void cancel();
|
|
+
|
|
+ public abstract PrioritisedExecutor.Priority getPriority();
|
|
+
|
|
+ /* Schedule lock is always held for the priority update calls */
|
|
+
|
|
+ public abstract void lowerPriority(final PrioritisedExecutor.Priority priority);
|
|
+
|
|
+ public abstract void setPriority(final PrioritisedExecutor.Priority priority);
|
|
+
|
|
+ public abstract void raisePriority(final PrioritisedExecutor.Priority priority);
|
|
+
|
|
+ public final void onComplete(final BiConsumer<ChunkAccess, Throwable> onComplete) {
|
|
+ if (!this.waiters.add(onComplete)) {
|
|
+ try {
|
|
+ onComplete.accept(this.completedChunk, this.completedThrowable);
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
|
|
+ "Consumer", ChunkTaskScheduler.stringIfNull(onComplete),
|
|
+ "Completed throwable", ChunkTaskScheduler.stringIfNull(this.completedThrowable)
|
|
+ ), throwable);
|
|
+ if (throwable instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)throwable;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final void complete(final ChunkAccess chunk, final Throwable throwable) {
|
|
+ try {
|
|
+ this.complete0(chunk, throwable);
|
|
+ } catch (final Throwable thr2) {
|
|
+ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
|
|
+ "Completed throwable", ChunkTaskScheduler.stringIfNull(throwable)
|
|
+ ), thr2);
|
|
+ if (thr2 instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)thr2;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void complete0(final ChunkAccess chunk, final Throwable throwable) {
|
|
+ if ((boolean)COMPLETED_HANDLE.getAndSet((ChunkProgressionTask)this, (boolean)true)) {
|
|
+ throw new IllegalStateException("Already completed");
|
|
+ }
|
|
+ this.completedChunk = chunk;
|
|
+ this.completedThrowable = throwable;
|
|
+
|
|
+ BiConsumer<ChunkAccess, Throwable> consumer;
|
|
+ while ((consumer = this.waiters.pollOrBlockAdds()) != null) {
|
|
+ consumer.accept(chunk, throwable);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return "ChunkProgressionTask{class: " + this.getClass().getName() + ", for world: " + this.world.getWorld().getName() +
|
|
+ ", chunk: (" + this.chunkX + "," + this.chunkZ + "), hashcode: " + System.identityHashCode(this) + ", priority: " + this.getPriority() +
|
|
+ ", status: " + this.getTargetStatus().toString() + ", scheduled: " + this.isScheduled() + "}";
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..84cc9397237fa0c17aa1012dfb5683c90eb6d3b8
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
|
|
@@ -0,0 +1,780 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import io.papermc.paper.configuration.GlobalConfiguration;
|
|
+import io.papermc.paper.util.CoordinateUtils;
|
|
+import io.papermc.paper.util.TickThread;
|
|
+import net.minecraft.CrashReport;
|
|
+import net.minecraft.CrashReportCategory;
|
|
+import net.minecraft.ReportedException;
|
|
+import io.papermc.paper.util.MCUtil;
|
|
+import net.minecraft.server.MinecraftServer;
|
|
+import net.minecraft.server.level.ChunkHolder;
|
|
+import net.minecraft.server.level.ChunkMap;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.server.level.TicketType;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+import net.minecraft.world.level.chunk.LevelChunk;
|
|
+import org.bukkit.Bukkit;
|
|
+import org.slf4j.Logger;
|
|
+import java.io.File;
|
|
+import java.util.ArrayDeque;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Arrays;
|
|
+import java.util.Collections;
|
|
+import java.util.List;
|
|
+import java.util.Map;
|
|
+import java.util.Objects;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.concurrent.atomic.AtomicLong;
|
|
+import java.util.concurrent.locks.ReentrantLock;
|
|
+import java.util.function.BooleanSupplier;
|
|
+import java.util.function.Consumer;
|
|
+
|
|
+public final class ChunkTaskScheduler {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ static int newChunkSystemIOThreads;
|
|
+ static int newChunkSystemWorkerThreads;
|
|
+ static int newChunkSystemGenParallelism;
|
|
+ static int newChunkSystemLoadParallelism;
|
|
+
|
|
+ public static ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool workerThreads;
|
|
+
|
|
+ private static boolean initialised = false;
|
|
+
|
|
+ public static void init(final GlobalConfiguration.ChunkSystem config) {
|
|
+ if (initialised) {
|
|
+ return;
|
|
+ }
|
|
+ initialised = true;
|
|
+ newChunkSystemIOThreads = config.ioThreads;
|
|
+ newChunkSystemWorkerThreads = config.workerThreads;
|
|
+ if (newChunkSystemIOThreads < 0) {
|
|
+ newChunkSystemIOThreads = 1;
|
|
+ } else {
|
|
+ newChunkSystemIOThreads = Math.max(1, newChunkSystemIOThreads);
|
|
+ }
|
|
+ int defaultWorkerThreads = Runtime.getRuntime().availableProcessors() / 2;
|
|
+ if (defaultWorkerThreads <= 4) {
|
|
+ defaultWorkerThreads = defaultWorkerThreads <= 3 ? 1 : 2;
|
|
+ } else {
|
|
+ defaultWorkerThreads = defaultWorkerThreads / 2;
|
|
+ }
|
|
+ defaultWorkerThreads = Integer.getInteger("Paper.WorkerThreadCount", Integer.valueOf(defaultWorkerThreads));
|
|
+
|
|
+ if (newChunkSystemWorkerThreads < 0) {
|
|
+ newChunkSystemWorkerThreads = defaultWorkerThreads;
|
|
+ } else {
|
|
+ newChunkSystemWorkerThreads = Math.max(1, newChunkSystemWorkerThreads);
|
|
+ }
|
|
+
|
|
+ String newChunkSystemGenParallelism = config.genParallelism;
|
|
+ if (newChunkSystemGenParallelism.equalsIgnoreCase("default")) {
|
|
+ newChunkSystemGenParallelism = "true";
|
|
+ }
|
|
+ boolean useParallelGen;
|
|
+ if (newChunkSystemGenParallelism.equalsIgnoreCase("on") || newChunkSystemGenParallelism.equalsIgnoreCase("enabled")
|
|
+ || newChunkSystemGenParallelism.equalsIgnoreCase("true")) {
|
|
+ useParallelGen = true;
|
|
+ } else if (newChunkSystemGenParallelism.equalsIgnoreCase("off") || newChunkSystemGenParallelism.equalsIgnoreCase("disabled")
|
|
+ || newChunkSystemGenParallelism.equalsIgnoreCase("false")) {
|
|
+ useParallelGen = false;
|
|
+ } else {
|
|
+ throw new IllegalStateException("Invalid option for gen-parallelism: must be one of [on, off, enabled, disabled, true, false, default]");
|
|
+ }
|
|
+
|
|
+ ChunkTaskScheduler.newChunkSystemGenParallelism = useParallelGen ? newChunkSystemWorkerThreads : 1;
|
|
+ ChunkTaskScheduler.newChunkSystemLoadParallelism = newChunkSystemWorkerThreads;
|
|
+
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.init(newChunkSystemIOThreads);
|
|
+ workerThreads = new ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool(
|
|
+ "Paper Chunk System Worker Pool", newChunkSystemWorkerThreads,
|
|
+ (final Thread thread, final Integer id) -> {
|
|
+ thread.setPriority(Thread.NORM_PRIORITY - 2);
|
|
+ thread.setName("Tuinity Chunk System Worker #" + id.intValue());
|
|
+ thread.setUncaughtExceptionHandler(io.papermc.paper.chunk.system.scheduling.NewChunkHolder.CHUNKSYSTEM_UNCAUGHT_EXCEPTION_HANDLER);
|
|
+ }, (long)(20.0e6)); // 20ms
|
|
+
|
|
+ LOGGER.info("Chunk system is using " + newChunkSystemIOThreads + " I/O threads, " + newChunkSystemWorkerThreads + " worker threads, and gen parallelism of " + ChunkTaskScheduler.newChunkSystemGenParallelism + " threads");
|
|
+ }
|
|
+
|
|
+ public final ServerLevel world;
|
|
+ public final PrioritisedThreadPool workers;
|
|
+ public final PrioritisedThreadPool.PrioritisedPoolExecutor lightExecutor;
|
|
+ public final PrioritisedThreadPool.PrioritisedPoolExecutor genExecutor;
|
|
+ public final PrioritisedThreadPool.PrioritisedPoolExecutor parallelGenExecutor;
|
|
+ public final PrioritisedThreadPool.PrioritisedPoolExecutor loadExecutor;
|
|
+
|
|
+ private final PrioritisedThreadedTaskQueue mainThreadExecutor = new PrioritisedThreadedTaskQueue();
|
|
+
|
|
+ final ReentrantLock schedulingLock = new ReentrantLock();
|
|
+ public final ChunkHolderManager chunkHolderManager;
|
|
+
|
|
+ static {
|
|
+ ChunkStatus.EMPTY.writeRadius = 0;
|
|
+ ChunkStatus.STRUCTURE_STARTS.writeRadius = 0;
|
|
+ ChunkStatus.STRUCTURE_REFERENCES.writeRadius = 0;
|
|
+ ChunkStatus.BIOMES.writeRadius = 0;
|
|
+ ChunkStatus.NOISE.writeRadius = 0;
|
|
+ ChunkStatus.SURFACE.writeRadius = 0;
|
|
+ ChunkStatus.CARVERS.writeRadius = 0;
|
|
+ ChunkStatus.LIQUID_CARVERS.writeRadius = 0;
|
|
+ ChunkStatus.FEATURES.writeRadius = 1;
|
|
+ ChunkStatus.LIGHT.writeRadius = 1;
|
|
+ ChunkStatus.SPAWN.writeRadius = 0;
|
|
+ ChunkStatus.HEIGHTMAPS.writeRadius = 0;
|
|
+ ChunkStatus.FULL.writeRadius = 0;
|
|
+
|
|
+ /*
|
|
+ It's important that the neighbour read radius is taken into account. If _any_ later status is using some chunk as
|
|
+ a neighbour, it must be also safe if that neighbour is being generated. i.e for any status later than FEATURES,
|
|
+ for a status to be parallel safe it must not read the block data from its neighbours.
|
|
+ */
|
|
+ final List<ChunkStatus> parallelCapableStatus = Arrays.asList(
|
|
+ // No-op executor.
|
|
+ ChunkStatus.EMPTY,
|
|
+
|
|
+ // This is parallel capable, as CB has fixed the concurrency issue with stronghold generations.
|
|
+ // Does not touch neighbour chunks.
|
|
+ // TODO On another note, what the fuck is StructureFeatureManager.StructureCheck and why is it used? it's leaking
|
|
+ ChunkStatus.STRUCTURE_STARTS,
|
|
+
|
|
+ // Surprisingly this is parallel capable. It is simply reading the already-created structure starts
|
|
+ // into the structure references for the chunk. So while it reads from it neighbours, its neighbours
|
|
+ // will not change, even if executed in parallel.
|
|
+ ChunkStatus.STRUCTURE_REFERENCES,
|
|
+
|
|
+ // Safe. Mojang runs it in parallel as well.
|
|
+ ChunkStatus.BIOMES,
|
|
+
|
|
+ // Safe. Mojang runs it in parallel as well.
|
|
+ ChunkStatus.NOISE,
|
|
+
|
|
+ // Parallel safe. Only touches the target chunk. Biome retrieval is now noise based, which is
|
|
+ // completely thread-safe.
|
|
+ ChunkStatus.SURFACE,
|
|
+
|
|
+ // No global state is modified in the carvers. It only touches the specified chunk. So it is parallel safe.
|
|
+ ChunkStatus.CARVERS,
|
|
+
|
|
+ // No-op executor. Was replaced in 1.18 with carvers, I think.
|
|
+ ChunkStatus.LIQUID_CARVERS,
|
|
+
|
|
+ // FEATURES is not parallel safe. It writes to neighbours.
|
|
+
|
|
+ // LIGHT is not parallel safe. It also doesn't run on the generation executor, so no point.
|
|
+
|
|
+ // Only writes to the specified chunk. State is not read by later statuses. Parallel safe.
|
|
+ // Note: it may look unsafe because it writes to a worldgenregion, but the region size is always 0 -
|
|
+ // see the task margin.
|
|
+ // However, if the neighbouring FEATURES chunk is unloaded, but then fails to load in again (for whatever
|
|
+ // reason), then it would write to this chunk - and since this status reads blocks from itself, it's not
|
|
+ // safe to execute this in parallel.
|
|
+ // SPAWN
|
|
+
|
|
+ // No-op executor.
|
|
+ ChunkStatus.HEIGHTMAPS
|
|
+
|
|
+ // FULL is executed on main.
|
|
+ );
|
|
+
|
|
+ for (final ChunkStatus status : parallelCapableStatus) {
|
|
+ status.isParallelCapable = true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public ChunkTaskScheduler(final ServerLevel world, final PrioritisedThreadPool workers) {
|
|
+ this.world = world;
|
|
+ this.workers = workers;
|
|
+
|
|
+ final String worldName = world.getWorld().getName();
|
|
+ this.genExecutor = workers.createExecutor("Chunk single-threaded generation executor for world '" + worldName + "'", 1);
|
|
+ // same as genExecutor, as there are race conditions between updating blocks in FEATURE status while lighting chunks
|
|
+ this.lightExecutor = this.genExecutor;
|
|
+ this.parallelGenExecutor = newChunkSystemGenParallelism <= 1 ? this.genExecutor
|
|
+ : workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", newChunkSystemGenParallelism);
|
|
+ this.loadExecutor = workers.createExecutor("Chunk load executor for world '" + worldName + "'", newChunkSystemLoadParallelism);
|
|
+ this.chunkHolderManager = new ChunkHolderManager(world, this);
|
|
+ }
|
|
+
|
|
+ private final AtomicBoolean failedChunkSystem = new AtomicBoolean();
|
|
+
|
|
+ public static Object stringIfNull(final Object obj) {
|
|
+ return obj == null ? "null" : obj;
|
|
+ }
|
|
+
|
|
+ public void unrecoverableChunkSystemFailure(final int chunkX, final int chunkZ, final Map<String, Object> objectsOfInterest, final Throwable thr) {
|
|
+ final NewChunkHolder holder = this.chunkHolderManager.getChunkHolder(chunkX, chunkZ);
|
|
+ LOGGER.error("Chunk system error at chunk (" + chunkX + "," + chunkZ + "), holder: " + holder + ", exception:", new Throwable(thr));
|
|
+
|
|
+ if (this.failedChunkSystem.getAndSet(true)) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final ReportedException reportedException = thr instanceof ReportedException ? (ReportedException)thr : new ReportedException(new CrashReport("Chunk system error", thr));
|
|
+
|
|
+ CrashReportCategory crashReportCategory = reportedException.getReport().addCategory("Chunk system details");
|
|
+ crashReportCategory.setDetail("Chunk coordinate", new ChunkPos(chunkX, chunkZ).toString());
|
|
+ crashReportCategory.setDetail("ChunkHolder", Objects.toString(holder));
|
|
+ crashReportCategory.setDetail("unrecoverableChunkSystemFailure caller thread", Thread.currentThread().getName());
|
|
+
|
|
+ crashReportCategory = reportedException.getReport().addCategory("Chunk System Objects of Interest");
|
|
+ for (final Map.Entry<String, Object> entry : objectsOfInterest.entrySet()) {
|
|
+ if (entry.getValue() instanceof Throwable thrObject) {
|
|
+ crashReportCategory.setDetailError(Objects.toString(entry.getKey()), thrObject);
|
|
+ } else {
|
|
+ crashReportCategory.setDetail(Objects.toString(entry.getKey()), Objects.toString(entry.getValue()));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final Runnable crash = () -> {
|
|
+ throw new RuntimeException("Chunk system crash propagated from unrecoverableChunkSystemFailure", reportedException);
|
|
+ };
|
|
+
|
|
+ // this may not be good enough, specifically thanks to stupid ass plugins swallowing exceptions
|
|
+ this.scheduleChunkTask(chunkX, chunkZ, crash, PrioritisedExecutor.Priority.BLOCKING);
|
|
+ // so, make the main thread pick it up
|
|
+ MinecraftServer.chunkSystemCrash = new RuntimeException("Chunk system crash propagated from unrecoverableChunkSystemFailure", reportedException);
|
|
+ }
|
|
+
|
|
+ public boolean executeMainThreadTask() {
|
|
+ TickThread.ensureTickThread("Cannot execute main thread task off-main");
|
|
+ return this.mainThreadExecutor.executeTask();
|
|
+ }
|
|
+
|
|
+ public void raisePriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
|
|
+ this.chunkHolderManager.raisePriority(x, z, priority);
|
|
+ }
|
|
+
|
|
+ public void setPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
|
|
+ this.chunkHolderManager.setPriority(x, z, priority);
|
|
+ }
|
|
+
|
|
+ public void lowerPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) {
|
|
+ this.chunkHolderManager.lowerPriority(x, z, priority);
|
|
+ }
|
|
+
|
|
+ private final AtomicLong chunkLoadCounter = new AtomicLong();
|
|
+
|
|
+ public void scheduleTickingState(final int chunkX, final int chunkZ, final ChunkHolder.FullChunkStatus toStatus,
|
|
+ final boolean addTicket, final PrioritisedExecutor.Priority priority,
|
|
+ final Consumer<LevelChunk> onComplete) {
|
|
+ if (!TickThread.isTickThread()) {
|
|
+ this.scheduleChunkTask(chunkX, chunkZ, () -> {
|
|
+ ChunkTaskScheduler.this.scheduleTickingState(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
|
|
+ }, priority);
|
|
+ return;
|
|
+ }
|
|
+ if (this.chunkHolderManager.ticketLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Cannot schedule chunk load during ticket level update");
|
|
+ }
|
|
+ if (this.schedulingLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Cannot schedule chunk loading recursively");
|
|
+ }
|
|
+
|
|
+ if (toStatus == ChunkHolder.FullChunkStatus.INACCESSIBLE) {
|
|
+ throw new IllegalArgumentException("Cannot wait for INACCESSIBLE status");
|
|
+ }
|
|
+
|
|
+ final int minLevel = 33 - (toStatus.ordinal() - 1);
|
|
+ final Long chunkReference = addTicket ? Long.valueOf(this.chunkLoadCounter.getAndIncrement()) : null;
|
|
+ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
|
|
+
|
|
+ if (addTicket) {
|
|
+ this.chunkHolderManager.addTicketAtLevel(TicketType.CHUNK_LOAD, chunkKey, minLevel, chunkReference);
|
|
+ this.chunkHolderManager.processTicketUpdates();
|
|
+ }
|
|
+
|
|
+ final Consumer<LevelChunk> loadCallback = (final LevelChunk chunk) -> {
|
|
+ try {
|
|
+ if (onComplete != null) {
|
|
+ onComplete.accept(chunk);
|
|
+ }
|
|
+ } finally {
|
|
+ if (addTicket) {
|
|
+ ChunkTaskScheduler.this.chunkHolderManager.addAndRemoveTickets(chunkKey,
|
|
+ TicketType.UNKNOWN, minLevel, new ChunkPos(chunkKey),
|
|
+ TicketType.CHUNK_LOAD, minLevel, chunkReference
|
|
+ );
|
|
+ }
|
|
+ }
|
|
+ };
|
|
+
|
|
+ final boolean scheduled;
|
|
+ final LevelChunk chunk;
|
|
+ this.chunkHolderManager.ticketLock.lock();
|
|
+ try {
|
|
+ this.schedulingLock.lock();
|
|
+ try {
|
|
+ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkKey);
|
|
+ if (chunkHolder == null || chunkHolder.getTicketLevel() > minLevel) {
|
|
+ scheduled = false;
|
|
+ chunk = null;
|
|
+ } else {
|
|
+ final ChunkHolder.FullChunkStatus currStatus = chunkHolder.getChunkStatus();
|
|
+ if (currStatus.isOrAfter(toStatus)) {
|
|
+ scheduled = false;
|
|
+ chunk = (LevelChunk)chunkHolder.getCurrentChunk();
|
|
+ } else {
|
|
+ scheduled = true;
|
|
+ chunk = null;
|
|
+
|
|
+ final int radius = toStatus.ordinal() - 1; // 0 -> BORDER, 1 -> TICKING, 2 -> ENTITY_TICKING
|
|
+ for (int dz = -radius; dz <= radius; ++dz) {
|
|
+ for (int dx = -radius; dx <= radius; ++dx) {
|
|
+ final NewChunkHolder neighbour =
|
|
+ (dx | dz) == 0 ? chunkHolder : this.chunkHolderManager.getChunkHolder(dx + chunkX, dz + chunkZ);
|
|
+ if (neighbour != null) {
|
|
+ neighbour.raisePriority(priority);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // ticket level should schedule for us
|
|
+ chunkHolder.addFullStatusConsumer(toStatus, loadCallback);
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.schedulingLock.unlock();
|
|
+ }
|
|
+ } finally {
|
|
+ this.chunkHolderManager.ticketLock.unlock();
|
|
+ }
|
|
+
|
|
+ if (!scheduled) {
|
|
+ // couldn't schedule
|
|
+ try {
|
|
+ loadCallback.accept(chunk);
|
|
+ } catch (final ThreadDeath thr) {
|
|
+ throw thr;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Failed to process chunk full status callback", thr);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void scheduleChunkLoad(final int chunkX, final int chunkZ, final boolean gen, final ChunkStatus toStatus, final boolean addTicket,
|
|
+ final PrioritisedExecutor.Priority priority, final Consumer<ChunkAccess> onComplete) {
|
|
+ if (gen) {
|
|
+ this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
|
|
+ return;
|
|
+ }
|
|
+ this.scheduleChunkLoad(chunkX, chunkZ, ChunkStatus.EMPTY, addTicket, priority, (final ChunkAccess chunk) -> {
|
|
+ if (chunk == null) {
|
|
+ onComplete.accept(null);
|
|
+ } else {
|
|
+ if (chunk.getStatus().isOrAfter(toStatus)) {
|
|
+ this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
|
|
+ } else {
|
|
+ onComplete.accept(null);
|
|
+ }
|
|
+ }
|
|
+ });
|
|
+ }
|
|
+
|
|
+ public void scheduleChunkLoad(final int chunkX, final int chunkZ, final ChunkStatus toStatus, final boolean addTicket,
|
|
+ final PrioritisedExecutor.Priority priority, final Consumer<ChunkAccess> onComplete) {
|
|
+ if (!TickThread.isTickThread()) {
|
|
+ this.scheduleChunkTask(chunkX, chunkZ, () -> {
|
|
+ ChunkTaskScheduler.this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete);
|
|
+ }, priority);
|
|
+ return;
|
|
+ }
|
|
+ if (this.chunkHolderManager.ticketLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Cannot schedule chunk load during ticket level update");
|
|
+ }
|
|
+ if (this.schedulingLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Cannot schedule chunk loading recursively");
|
|
+ }
|
|
+
|
|
+ if (toStatus == ChunkStatus.FULL) {
|
|
+ this.scheduleTickingState(chunkX, chunkZ, ChunkHolder.FullChunkStatus.BORDER, addTicket, priority, (Consumer)onComplete);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final int minLevel = 33 + ChunkStatus.getDistance(toStatus);
|
|
+ final Long chunkReference = addTicket ? Long.valueOf(this.chunkLoadCounter.getAndIncrement()) : null;
|
|
+ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ);
|
|
+
|
|
+ if (addTicket) {
|
|
+ this.chunkHolderManager.addTicketAtLevel(TicketType.CHUNK_LOAD, chunkKey, minLevel, chunkReference);
|
|
+ this.chunkHolderManager.processTicketUpdates();
|
|
+ }
|
|
+
|
|
+ final Consumer<ChunkAccess> loadCallback = (final ChunkAccess chunk) -> {
|
|
+ try {
|
|
+ if (onComplete != null) {
|
|
+ onComplete.accept(chunk);
|
|
+ }
|
|
+ } finally {
|
|
+ if (addTicket) {
|
|
+ ChunkTaskScheduler.this.chunkHolderManager.addAndRemoveTickets(chunkKey,
|
|
+ TicketType.UNKNOWN, minLevel, new ChunkPos(chunkKey),
|
|
+ TicketType.CHUNK_LOAD, minLevel, chunkReference
|
|
+ );
|
|
+ }
|
|
+ }
|
|
+ };
|
|
+
|
|
+ final List<ChunkProgressionTask> tasks = new ArrayList<>();
|
|
+
|
|
+ final boolean scheduled;
|
|
+ final ChunkAccess chunk;
|
|
+ this.chunkHolderManager.ticketLock.lock();
|
|
+ try {
|
|
+ this.schedulingLock.lock();
|
|
+ try {
|
|
+ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkKey);
|
|
+ if (chunkHolder == null || chunkHolder.getTicketLevel() > minLevel) {
|
|
+ scheduled = false;
|
|
+ chunk = null;
|
|
+ } else {
|
|
+ final ChunkStatus genStatus = chunkHolder.getCurrentGenStatus();
|
|
+ if (genStatus != null && genStatus.isOrAfter(toStatus)) {
|
|
+ scheduled = false;
|
|
+ chunk = chunkHolder.getCurrentChunk();
|
|
+ } else {
|
|
+ scheduled = true;
|
|
+ chunk = null;
|
|
+ chunkHolder.raisePriority(priority);
|
|
+
|
|
+ if (!chunkHolder.upgradeGenTarget(toStatus)) {
|
|
+ this.schedule(chunkX, chunkZ, toStatus, chunkHolder, tasks);
|
|
+ }
|
|
+ chunkHolder.addStatusConsumer(toStatus, loadCallback);
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.schedulingLock.unlock();
|
|
+ }
|
|
+ } finally {
|
|
+ this.chunkHolderManager.ticketLock.unlock();
|
|
+ }
|
|
+
|
|
+ for (int i = 0, len = tasks.size(); i < len; ++i) {
|
|
+ tasks.get(i).schedule();
|
|
+ }
|
|
+
|
|
+ if (!scheduled) {
|
|
+ // couldn't schedule
|
|
+ try {
|
|
+ loadCallback.accept(chunk);
|
|
+ } catch (final ThreadDeath thr) {
|
|
+ throw thr;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Failed to process chunk status callback", thr);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private ChunkProgressionTask createTask(final int chunkX, final int chunkZ, final ChunkAccess chunk,
|
|
+ final NewChunkHolder chunkHolder, final List<ChunkAccess> neighbours,
|
|
+ final ChunkStatus toStatus, final PrioritisedExecutor.Priority initialPriority) {
|
|
+ if (toStatus == ChunkStatus.EMPTY) {
|
|
+ return new ChunkLoadTask(this, this.world, chunkX, chunkZ, chunkHolder, initialPriority);
|
|
+ }
|
|
+ if (toStatus == ChunkStatus.LIGHT) {
|
|
+ return new ChunkLightTask(this, this.world, chunkX, chunkZ, chunk, initialPriority);
|
|
+ }
|
|
+ if (toStatus == ChunkStatus.FULL) {
|
|
+ return new ChunkFullTask(this, this.world, chunkX, chunkZ, chunkHolder, chunk, initialPriority);
|
|
+ }
|
|
+
|
|
+ return new ChunkUpgradeGenericStatusTask(this, this.world, chunkX, chunkZ, chunk, neighbours, toStatus, initialPriority);
|
|
+ }
|
|
+
|
|
+ ChunkProgressionTask schedule(final int chunkX, final int chunkZ, final ChunkStatus targetStatus, final NewChunkHolder chunkHolder,
|
|
+ final List<ChunkProgressionTask> allTasks) {
|
|
+ return this.schedule(chunkX, chunkZ, targetStatus, chunkHolder, allTasks, chunkHolder.getEffectivePriority());
|
|
+ }
|
|
+
|
|
+ // rets new task scheduled for the _specified_ chunk
|
|
+ // note: this must hold the scheduling lock
|
|
+ // minPriority is only used to pass the priority through to neighbours, as priority calculation has not yet been done
|
|
+ // schedule will ignore the generation target, so it should be checked by the caller to ensure the target is not regressed!
|
|
+ private ChunkProgressionTask schedule(final int chunkX, final int chunkZ, final ChunkStatus targetStatus,
|
|
+ final NewChunkHolder chunkHolder, final List<ChunkProgressionTask> allTasks,
|
|
+ final PrioritisedExecutor.Priority minPriority) {
|
|
+ if (!this.schedulingLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Not holding scheduling lock");
|
|
+ }
|
|
+
|
|
+ if (chunkHolder.hasGenerationTask()) {
|
|
+ chunkHolder.upgradeGenTarget(targetStatus);
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final PrioritisedExecutor.Priority requestedPriority = PrioritisedExecutor.Priority.max(minPriority, chunkHolder.getEffectivePriority());
|
|
+ final ChunkStatus currentGenStatus = chunkHolder.getCurrentGenStatus();
|
|
+ final ChunkAccess chunk = chunkHolder.getCurrentChunk();
|
|
+
|
|
+ if (currentGenStatus == null) {
|
|
+ // not yet loaded
|
|
+ final ChunkProgressionTask task = this.createTask(
|
|
+ chunkX, chunkZ, chunk, chunkHolder, Collections.emptyList(), ChunkStatus.EMPTY, requestedPriority
|
|
+ );
|
|
+
|
|
+ allTasks.add(task);
|
|
+
|
|
+ final List<NewChunkHolder> chunkHolderNeighbours = new ArrayList<>(1);
|
|
+ chunkHolderNeighbours.add(chunkHolder);
|
|
+
|
|
+ chunkHolder.setGenerationTarget(targetStatus);
|
|
+ chunkHolder.setGenerationTask(task, ChunkStatus.EMPTY, chunkHolderNeighbours);
|
|
+
|
|
+ return task;
|
|
+ }
|
|
+
|
|
+ if (currentGenStatus.isOrAfter(targetStatus)) {
|
|
+ // nothing to do
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ // we know for sure now that we want to schedule _something_, so set the target
|
|
+ chunkHolder.setGenerationTarget(targetStatus);
|
|
+
|
|
+ final ChunkStatus chunkRealStatus = chunk.getStatus();
|
|
+ final ChunkStatus toStatus = currentGenStatus.getNextStatus();
|
|
+
|
|
+ // if this chunk has already generated up to or past the specified status, then we don't
|
|
+ // need the neighbours AT ALL.
|
|
+ final int neighbourReadRadius = chunkRealStatus.isOrAfter(toStatus) ? toStatus.loadRange : toStatus.getRange();
|
|
+
|
|
+ boolean unGeneratedNeighbours = false;
|
|
+
|
|
+ // copied from MCUtil.getSpiralOutChunks
|
|
+ for (int r = 1; r <= neighbourReadRadius; r++) {
|
|
+ int x = -r;
|
|
+ int z = r;
|
|
+
|
|
+ // Iterates the edge of half of the box; then negates for other half.
|
|
+ while (x <= r && z > -r) {
|
|
+ final int radius = Math.max(Math.abs(x), Math.abs(z));
|
|
+ final ChunkStatus requiredNeighbourStatus = ChunkMap.getDependencyStatus(toStatus, radius);
|
|
+
|
|
+ unGeneratedNeighbours |= this.checkNeighbour(
|
|
+ chunkX + x, chunkZ + z, requiredNeighbourStatus, chunkHolder, allTasks, requestedPriority
|
|
+ );
|
|
+ unGeneratedNeighbours |= this.checkNeighbour(
|
|
+ chunkX - x, chunkZ - z, requiredNeighbourStatus, chunkHolder, allTasks, requestedPriority
|
|
+ );
|
|
+
|
|
+ if (x < r) {
|
|
+ x++;
|
|
+ } else {
|
|
+ z--;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (unGeneratedNeighbours) {
|
|
+ // can't schedule, but neighbour completion will schedule for us when they're ALL done
|
|
+
|
|
+ // propagate our priority to neighbours
|
|
+ chunkHolder.recalculateNeighbourPriorities();
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ // need to gather neighbours
|
|
+
|
|
+ final List<ChunkAccess> neighbours;
|
|
+ final List<NewChunkHolder> chunkHolderNeighbours;
|
|
+ if (neighbourReadRadius <= 0) {
|
|
+ neighbours = new ArrayList<>(1);
|
|
+ chunkHolderNeighbours = new ArrayList<>(1);
|
|
+ neighbours.add(chunk);
|
|
+ chunkHolderNeighbours.add(chunkHolder);
|
|
+ } else {
|
|
+ // the iteration order is _very_ important, as all generation statuses expect a certain order such that:
|
|
+ // chunkAtRelative = neighbours.get(relX + relZ * (2 * radius + 1))
|
|
+ neighbours = new ArrayList<>((2 * neighbourReadRadius + 1) * (2 * neighbourReadRadius + 1));
|
|
+ chunkHolderNeighbours = new ArrayList<>((2 * neighbourReadRadius + 1) * (2 * neighbourReadRadius + 1));
|
|
+ for (int dz = -neighbourReadRadius; dz <= neighbourReadRadius; ++dz) {
|
|
+ for (int dx = -neighbourReadRadius; dx <= neighbourReadRadius; ++dx) {
|
|
+ final NewChunkHolder holder = (dx | dz) == 0 ? chunkHolder : this.chunkHolderManager.getChunkHolder(dx + chunkX, dz + chunkZ);
|
|
+ neighbours.add(holder.getChunkForNeighbourAccess());
|
|
+ chunkHolderNeighbours.add(holder);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final ChunkProgressionTask task = this.createTask(chunkX, chunkZ, chunk, chunkHolder, neighbours, toStatus, chunkHolder.getEffectivePriority());
|
|
+ allTasks.add(task);
|
|
+
|
|
+ chunkHolder.setGenerationTask(task, toStatus, chunkHolderNeighbours);
|
|
+
|
|
+ return task;
|
|
+ }
|
|
+
|
|
+ // rets true if the neighbour is not at the required status, false otherwise
|
|
+ private boolean checkNeighbour(final int chunkX, final int chunkZ, final ChunkStatus requiredStatus, final NewChunkHolder center,
|
|
+ final List<ChunkProgressionTask> tasks, final PrioritisedExecutor.Priority minPriority) {
|
|
+ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkX, chunkZ);
|
|
+
|
|
+ if (chunkHolder == null) {
|
|
+ throw new IllegalStateException("Missing chunkholder when required");
|
|
+ }
|
|
+
|
|
+ final ChunkStatus holderStatus = chunkHolder.getCurrentGenStatus();
|
|
+ if (holderStatus != null && holderStatus.isOrAfter(requiredStatus)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (chunkHolder.hasFailedGeneration()) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ center.addGenerationBlockingNeighbour(chunkHolder);
|
|
+ chunkHolder.addWaitingNeighbour(center, requiredStatus);
|
|
+
|
|
+ if (chunkHolder.upgradeGenTarget(requiredStatus)) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ // not at status required, so we need to schedule its generation
|
|
+ this.schedule(
|
|
+ chunkX, chunkZ, requiredStatus, chunkHolder, tasks, minPriority
|
|
+ );
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * @deprecated Chunk tasks must be tied to coordinates in the future
|
|
+ */
|
|
+ @Deprecated
|
|
+ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run) {
|
|
+ return this.scheduleChunkTask(run, PrioritisedExecutor.Priority.NORMAL);
|
|
+ }
|
|
+
|
|
+ /**
|
|
+ * @deprecated Chunk tasks must be tied to coordinates in the future
|
|
+ */
|
|
+ @Deprecated
|
|
+ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
|
+ return this.mainThreadExecutor.queueRunnable(run, priority);
|
|
+ }
|
|
+
|
|
+ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run) {
|
|
+ return this.createChunkTask(chunkX, chunkZ, run, PrioritisedExecutor.Priority.NORMAL);
|
|
+ }
|
|
+
|
|
+ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ return this.mainThreadExecutor.createTask(run, priority);
|
|
+ }
|
|
+
|
|
+ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run) {
|
|
+ return this.mainThreadExecutor.queueRunnable(run);
|
|
+ }
|
|
+
|
|
+ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ return this.mainThreadExecutor.queueRunnable(run, priority);
|
|
+ }
|
|
+
|
|
+ public void executeTasksUntil(final BooleanSupplier exit) {
|
|
+ if (Bukkit.isPrimaryThread()) {
|
|
+ this.mainThreadExecutor.executeConditionally(exit);
|
|
+ } else {
|
|
+ long counter = 1L;
|
|
+ while (!exit.getAsBoolean()) {
|
|
+ counter = ConcurrentUtil.linearLongBackoff(counter, 100_000L, 5_000_000L); // 100us, 5ms
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean halt(final boolean sync, final long maxWaitNS) {
|
|
+ this.lightExecutor.halt();
|
|
+ this.genExecutor.halt();
|
|
+ this.parallelGenExecutor.halt();
|
|
+ this.loadExecutor.halt();
|
|
+ final long time = System.nanoTime();
|
|
+ if (sync) {
|
|
+ for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) {
|
|
+ if (
|
|
+ !this.lightExecutor.isActive() &&
|
|
+ !this.genExecutor.isActive() &&
|
|
+ !this.parallelGenExecutor.isActive() &&
|
|
+ !this.loadExecutor.isActive()
|
|
+ ) {
|
|
+ return true;
|
|
+ }
|
|
+ if ((System.nanoTime() - time) >= maxWaitNS) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public static final ArrayDeque<ChunkInfo> WAITING_CHUNKS = new ArrayDeque<>(); // stack
|
|
+
|
|
+ public static final class ChunkInfo {
|
|
+
|
|
+ public final int chunkX;
|
|
+ public final int chunkZ;
|
|
+ public final ServerLevel world;
|
|
+
|
|
+ public ChunkInfo(final int chunkX, final int chunkZ, final ServerLevel world) {
|
|
+ this.chunkX = chunkX;
|
|
+ this.chunkZ = chunkZ;
|
|
+ this.world = world;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return "[( " + this.chunkX + "," + this.chunkZ + ") in '" + this.world.getWorld().getName() + "']";
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static void pushChunkWait(final ServerLevel world, final int chunkX, final int chunkZ) {
|
|
+ synchronized (WAITING_CHUNKS) {
|
|
+ WAITING_CHUNKS.push(new ChunkInfo(chunkX, chunkZ, world));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static void popChunkWait() {
|
|
+ synchronized (WAITING_CHUNKS) {
|
|
+ WAITING_CHUNKS.pop();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static ChunkInfo[] getChunkInfos() {
|
|
+ synchronized (WAITING_CHUNKS) {
|
|
+ return WAITING_CHUNKS.toArray(new ChunkInfo[0]);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static void dumpAllChunkLoadInfo(final boolean longPrint) {
|
|
+ final ChunkInfo[] chunkInfos = getChunkInfos();
|
|
+ if (chunkInfos.length > 0) {
|
|
+ LOGGER.error("Chunk wait task info below: ");
|
|
+ for (final ChunkInfo chunkInfo : chunkInfos) {
|
|
+ final NewChunkHolder holder = chunkInfo.world.chunkTaskScheduler.chunkHolderManager.getChunkHolder(chunkInfo.chunkX, chunkInfo.chunkZ);
|
|
+ LOGGER.error("Chunk wait: " + chunkInfo);
|
|
+ LOGGER.error("Chunk holder: " + holder);
|
|
+ }
|
|
+
|
|
+ if (longPrint) {
|
|
+ final File file = new File(new File(new File("."), "debug"), "chunks-watchdog.txt");
|
|
+ LOGGER.error("Writing chunk information dump to " + file);
|
|
+ try {
|
|
+ MCUtil.dumpChunks(file, true);
|
|
+ LOGGER.error("Successfully written chunk information!");
|
|
+ } catch (final Throwable thr) {
|
|
+ MinecraftServer.LOGGER.warn("Failed to dump chunk information to file " + file.toString(), thr);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..73ce0909bd89244835a0d0f2030a25871461f1e0
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
|
|
@@ -0,0 +1,209 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import com.mojang.datafixers.util.Either;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import net.minecraft.server.level.ChunkHolder;
|
|
+import net.minecraft.server.level.ChunkMap;
|
|
+import net.minecraft.server.level.ServerChunkCache;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+import net.minecraft.world.level.chunk.ProtoChunk;
|
|
+import org.slf4j.Logger;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.List;
|
|
+import java.util.Map;
|
|
+import java.util.concurrent.CompletableFuture;
|
|
+
|
|
+public final class ChunkUpgradeGenericStatusTask extends ChunkProgressionTask implements Runnable {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ protected final ChunkAccess fromChunk;
|
|
+ protected final ChunkStatus fromStatus;
|
|
+ protected final ChunkStatus toStatus;
|
|
+ protected final List<ChunkAccess> neighbours;
|
|
+
|
|
+ protected final PrioritisedExecutor.PrioritisedTask generateTask;
|
|
+
|
|
+ public ChunkUpgradeGenericStatusTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
|
|
+ final int chunkZ, final ChunkAccess chunk, final List<ChunkAccess> neighbours,
|
|
+ final ChunkStatus toStatus, final PrioritisedExecutor.Priority priority) {
|
|
+ super(scheduler, world, chunkX, chunkZ);
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.fromChunk = chunk;
|
|
+ this.fromStatus = chunk.getStatus();
|
|
+ this.toStatus = toStatus;
|
|
+ this.neighbours = neighbours;
|
|
+ this.generateTask = (this.toStatus.isParallelCapable ? this.scheduler.parallelGenExecutor : this.scheduler.genExecutor)
|
|
+ .createTask(this, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public ChunkStatus getTargetStatus() {
|
|
+ return this.toStatus;
|
|
+ }
|
|
+
|
|
+ private boolean isEmptyTask() {
|
|
+ // must use fromStatus here to avoid any race condition with run() overwriting the status
|
|
+ final boolean generation = !this.fromStatus.isOrAfter(this.toStatus);
|
|
+ return (generation && this.toStatus.isEmptyGenStatus()) || (!generation && this.toStatus.isEmptyLoadStatus());
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final ChunkAccess chunk = this.fromChunk;
|
|
+
|
|
+ final ServerChunkCache serverChunkCache = this.world.chunkSource;
|
|
+ final ChunkMap chunkMap = serverChunkCache.chunkMap;
|
|
+
|
|
+ final CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completeFuture;
|
|
+
|
|
+ final boolean generation;
|
|
+ boolean completing = false;
|
|
+
|
|
+ // note: should optimise the case where the chunk does not need to execute the status, because
|
|
+ // schedule() calls this synchronously if it will run through that path
|
|
+
|
|
+ try {
|
|
+ generation = !chunk.getStatus().isOrAfter(this.toStatus);
|
|
+ if (generation) {
|
|
+ if (this.toStatus.isEmptyGenStatus()) {
|
|
+ if (chunk instanceof ProtoChunk) {
|
|
+ ((ProtoChunk)chunk).setStatus(this.toStatus);
|
|
+ }
|
|
+ completing = true;
|
|
+ this.complete(chunk, null);
|
|
+ return;
|
|
+ }
|
|
+ completeFuture = this.toStatus.generate(Runnable::run, this.world, chunkMap.generator, chunkMap.structureTemplateManager,
|
|
+ serverChunkCache.getLightEngine(), null, this.neighbours, false)
|
|
+ .whenComplete((final Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either, final Throwable throwable) -> {
|
|
+ final ChunkAccess newChunk = (either == null) ? null : either.left().orElse(null);
|
|
+ if (newChunk instanceof ProtoChunk) {
|
|
+ ((ProtoChunk)newChunk).setStatus(ChunkUpgradeGenericStatusTask.this.toStatus);
|
|
+ }
|
|
+ }
|
|
+ );
|
|
+ } else {
|
|
+ if (this.toStatus.isEmptyLoadStatus()) {
|
|
+ completing = true;
|
|
+ this.complete(chunk, null);
|
|
+ return;
|
|
+ }
|
|
+ completeFuture = this.toStatus.load(this.world, chunkMap.structureTemplateManager, serverChunkCache.getLightEngine(), null, chunk);
|
|
+ }
|
|
+ } catch (final Throwable throwable) {
|
|
+ if (!completing) {
|
|
+ this.complete(null, throwable);
|
|
+
|
|
+ if (throwable instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)throwable;
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
|
|
+ "Target status", ChunkTaskScheduler.stringIfNull(this.toStatus),
|
|
+ "From status", ChunkTaskScheduler.stringIfNull(this.fromStatus),
|
|
+ "Generation task", this
|
|
+ ), throwable);
|
|
+
|
|
+ if (!(throwable instanceof ThreadDeath)) {
|
|
+ LOGGER.error("Failed to complete status for chunk: status:" + this.toStatus + ", chunk: (" + this.chunkX + "," + this.chunkZ + "), world: " + this.world.getWorld().getName(), throwable);
|
|
+ } else {
|
|
+ // ensure the chunk system can respond, then die
|
|
+ throw (ThreadDeath)throwable;
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!completeFuture.isDone() && !this.toStatus.warnedAboutNoImmediateComplete.getAndSet(true)) {
|
|
+ LOGGER.warn("Future status not complete after scheduling: " + this.toStatus.toString() + ", generate: " + generation);
|
|
+ }
|
|
+
|
|
+ final Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either;
|
|
+ final ChunkAccess newChunk;
|
|
+
|
|
+ try {
|
|
+ either = completeFuture.join();
|
|
+ newChunk = (either == null) ? null : either.left().orElse(null);
|
|
+ } catch (final Throwable throwable) {
|
|
+ this.complete(null, throwable);
|
|
+ // ensure the chunk system can respond, then die
|
|
+ if (throwable instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)throwable;
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (newChunk == null) {
|
|
+ this.complete(null, new IllegalStateException("Chunk for status: " + ChunkUpgradeGenericStatusTask.this.toStatus.toString() + ", generation: " + generation + " should not be null! Either: " + either).fillInStackTrace());
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.complete(newChunk, null);
|
|
+ }
|
|
+
|
|
+ protected volatile boolean scheduled;
|
|
+ protected static final VarHandle SCHEDULED_HANDLE = ConcurrentUtil.getVarHandle(ChunkUpgradeGenericStatusTask.class, "scheduled", boolean.class);
|
|
+
|
|
+ @Override
|
|
+ public boolean isScheduled() {
|
|
+ return this.scheduled;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void schedule() {
|
|
+ if ((boolean)SCHEDULED_HANDLE.getAndSet((ChunkUpgradeGenericStatusTask)this, true)) {
|
|
+ throw new IllegalStateException("Cannot double call schedule()");
|
|
+ }
|
|
+ if (this.isEmptyTask()) {
|
|
+ if (this.generateTask.cancel()) {
|
|
+ this.run();
|
|
+ }
|
|
+ } else {
|
|
+ this.generateTask.queue();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void cancel() {
|
|
+ if (this.generateTask.cancel()) {
|
|
+ this.complete(null, null);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public PrioritisedExecutor.Priority getPriority() {
|
|
+ return this.generateTask.getPriority();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.generateTask.lowerPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.generateTask.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.generateTask.raisePriority(priority);
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..396d72c00e47cf1669ae20dc839c1c961b1f262a
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java
|
|
@@ -0,0 +1,746 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.completable.Completable;
|
|
+import ca.spottedleaf.concurrentutil.executor.Cancellable;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.DelayedPrioritisedTask;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import io.papermc.paper.chunk.system.io.RegionFileIOThread;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import org.slf4j.Logger;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.Map;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.concurrent.atomic.AtomicLong;
|
|
+import java.util.function.BiConsumer;
|
|
+
|
|
+public abstract class GenericDataLoadTask<OnMain,FinalCompletion> {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ protected static final CompoundTag CANCELLED_DATA = new CompoundTag();
|
|
+
|
|
+ // reference count is the upper 32 bits
|
|
+ protected final AtomicLong stageAndReferenceCount = new AtomicLong(STAGE_NOT_STARTED);
|
|
+
|
|
+ protected static final long STAGE_MASK = 0xFFFFFFFFL;
|
|
+ protected static final long STAGE_CANCELLED = 0xFFFFFFFFL;
|
|
+ protected static final long STAGE_NOT_STARTED = 0L;
|
|
+ protected static final long STAGE_LOADING = 1L;
|
|
+ protected static final long STAGE_PROCESSING = 2L;
|
|
+ protected static final long STAGE_COMPLETED = 3L;
|
|
+
|
|
+ // for loading data off disk
|
|
+ protected final LoadDataFromDiskTask loadDataFromDiskTask;
|
|
+ // processing off-main
|
|
+ protected final PrioritisedExecutor.PrioritisedTask processOffMain;
|
|
+ // processing on-main
|
|
+ protected final PrioritisedExecutor.PrioritisedTask processOnMain;
|
|
+
|
|
+ protected final ChunkTaskScheduler scheduler;
|
|
+ protected final ServerLevel world;
|
|
+ protected final int chunkX;
|
|
+ protected final int chunkZ;
|
|
+ protected final RegionFileIOThread.RegionFileType type;
|
|
+
|
|
+ public GenericDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
|
|
+ final int chunkZ, final RegionFileIOThread.RegionFileType type,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ this.scheduler = scheduler;
|
|
+ this.world = world;
|
|
+ this.chunkX = chunkX;
|
|
+ this.chunkZ = chunkZ;
|
|
+ this.type = type;
|
|
+
|
|
+ final ProcessOnMainTask mainTask;
|
|
+ if (this.hasOnMain()) {
|
|
+ mainTask = new ProcessOnMainTask();
|
|
+ this.processOnMain = this.createOnMain(mainTask, priority);
|
|
+ } else {
|
|
+ mainTask = null;
|
|
+ this.processOnMain = null;
|
|
+ }
|
|
+
|
|
+ final ProcessOffMainTask offMainTask;
|
|
+ if (this.hasOffMain()) {
|
|
+ offMainTask = new ProcessOffMainTask(mainTask);
|
|
+ this.processOffMain = this.createOffMain(offMainTask, priority);
|
|
+ } else {
|
|
+ offMainTask = null;
|
|
+ this.processOffMain = null;
|
|
+ }
|
|
+
|
|
+ if (this.processOffMain == null && this.processOnMain == null) {
|
|
+ throw new IllegalStateException("Illegal class implementation: " + this.getClass().getName() + ", should be able to schedule at least one task!");
|
|
+ }
|
|
+
|
|
+ this.loadDataFromDiskTask = new LoadDataFromDiskTask(world, chunkX, chunkZ, type, new DataLoadCallback(offMainTask, mainTask), priority);
|
|
+ }
|
|
+
|
|
+ public static final record TaskResult<L, R>(L left, R right) {}
|
|
+
|
|
+ protected abstract boolean hasOffMain();
|
|
+
|
|
+ protected abstract boolean hasOnMain();
|
|
+
|
|
+ protected abstract PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority);
|
|
+
|
|
+ protected abstract PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority);
|
|
+
|
|
+ protected abstract TaskResult<OnMain, Throwable> runOffMain(final CompoundTag data, final Throwable throwable);
|
|
+
|
|
+ protected abstract TaskResult<FinalCompletion, Throwable> runOnMain(final OnMain data, final Throwable throwable);
|
|
+
|
|
+ protected abstract void onComplete(final TaskResult<FinalCompletion,Throwable> result);
|
|
+
|
|
+ protected abstract TaskResult<FinalCompletion, Throwable> completeOnMainOffMain(final OnMain data, final Throwable throwable);
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return "GenericDataLoadTask{class: " + this.getClass().getName() + ", world: " + this.world.getWorld().getName() +
|
|
+ ", chunk: (" + this.chunkX + "," + this.chunkZ + "), hashcode: " + System.identityHashCode(this) + ", priority: " + this.getPriority() +
|
|
+ ", type: " + this.type.toString() + "}";
|
|
+ }
|
|
+
|
|
+ public PrioritisedExecutor.Priority getPriority() {
|
|
+ if (this.processOnMain != null) {
|
|
+ return this.processOnMain.getPriority();
|
|
+ } else {
|
|
+ return this.processOffMain.getPriority();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ // can't lower I/O tasks, we don't know what they affect
|
|
+ if (this.processOffMain != null) {
|
|
+ this.processOffMain.lowerPriority(priority);
|
|
+ }
|
|
+ if (this.processOnMain != null) {
|
|
+ this.processOnMain.lowerPriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void setPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ // can't lower I/O tasks, we don't know what they affect
|
|
+ this.loadDataFromDiskTask.raisePriority(priority);
|
|
+ if (this.processOffMain != null) {
|
|
+ this.processOffMain.setPriority(priority);
|
|
+ }
|
|
+ if (this.processOnMain != null) {
|
|
+ this.processOnMain.setPriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
+ // can't lower I/O tasks, we don't know what they affect
|
|
+ this.loadDataFromDiskTask.raisePriority(priority);
|
|
+ if (this.processOffMain != null) {
|
|
+ this.processOffMain.raisePriority(priority);
|
|
+ }
|
|
+ if (this.processOnMain != null) {
|
|
+ this.processOnMain.raisePriority(priority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // returns whether scheduleNow() needs to be called
|
|
+ public boolean schedule(final boolean delay) {
|
|
+ if (this.stageAndReferenceCount.get() != STAGE_NOT_STARTED ||
|
|
+ !this.stageAndReferenceCount.compareAndSet(STAGE_NOT_STARTED, (1L << 32) | STAGE_LOADING)) {
|
|
+ // try and increment reference count
|
|
+ int failures = 0;
|
|
+ for (long curr = this.stageAndReferenceCount.get();;) {
|
|
+ if ((curr & STAGE_MASK) == STAGE_CANCELLED || (curr & STAGE_MASK) == STAGE_COMPLETED) {
|
|
+ // cancelled or completed, nothing to do here
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, curr + (1L << 32)))) {
|
|
+ // successful
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!delay) {
|
|
+ this.scheduleNow();
|
|
+ return false;
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public void scheduleNow() {
|
|
+ this.loadDataFromDiskTask.schedule(); // will schedule the rest
|
|
+ }
|
|
+
|
|
+ // assumes the current stage cannot be completed
|
|
+ // returns false if cancelled, returns true if can proceed
|
|
+ private boolean advanceStage(final long expect, final long to) {
|
|
+ int failures = 0;
|
|
+ for (long curr = this.stageAndReferenceCount.get();;) {
|
|
+ if ((curr & STAGE_MASK) != expect) {
|
|
+ // must be cancelled
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final long newVal = (curr & ~STAGE_MASK) | to;
|
|
+ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean cancel() {
|
|
+ int failures = 0;
|
|
+ for (long curr = this.stageAndReferenceCount.get();;) {
|
|
+ if ((curr & STAGE_MASK) == STAGE_COMPLETED || (curr & STAGE_MASK) == STAGE_CANCELLED) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if ((curr & STAGE_MASK) == STAGE_NOT_STARTED || (curr & ~STAGE_MASK) == (1L << 32)) {
|
|
+ // no other references, so we can cancel
|
|
+ final long newVal = STAGE_CANCELLED;
|
|
+ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) {
|
|
+ this.loadDataFromDiskTask.cancel();
|
|
+ if (this.processOffMain != null) {
|
|
+ this.processOffMain.cancel();
|
|
+ }
|
|
+ if (this.processOnMain != null) {
|
|
+ this.processOnMain.cancel();
|
|
+ }
|
|
+ this.onComplete(null);
|
|
+ return true;
|
|
+ }
|
|
+ } else {
|
|
+ if ((curr & ~STAGE_MASK) == (0L << 32)) {
|
|
+ throw new IllegalStateException("Reference count cannot be zero here");
|
|
+ }
|
|
+ // just decrease the reference count
|
|
+ final long newVal = curr - (1L << 32);
|
|
+ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) {
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final class DataLoadCallback implements BiConsumer<CompoundTag, Throwable> {
|
|
+
|
|
+ protected final ProcessOffMainTask offMainTask;
|
|
+ protected final ProcessOnMainTask onMainTask;
|
|
+
|
|
+ public DataLoadCallback(final ProcessOffMainTask offMainTask, final ProcessOnMainTask onMainTask) {
|
|
+ this.offMainTask = offMainTask;
|
|
+ this.onMainTask = onMainTask;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void accept(final CompoundTag compoundTag, final Throwable throwable) {
|
|
+ if (GenericDataLoadTask.this.stageAndReferenceCount.get() == STAGE_CANCELLED) {
|
|
+ // don't try to schedule further
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ try {
|
|
+ if (compoundTag == CANCELLED_DATA) {
|
|
+ // cancelled, except this isn't possible
|
|
+ LOGGER.error("Data callback says cancelled, but stage does not?");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // get off of the regionfile callback ASAP, no clue what locks are held right now...
|
|
+ if (GenericDataLoadTask.this.processOffMain != null) {
|
|
+ this.offMainTask.data = compoundTag;
|
|
+ this.offMainTask.throwable = throwable;
|
|
+ GenericDataLoadTask.this.processOffMain.queue();
|
|
+ return;
|
|
+ } else {
|
|
+ // no off-main task, so go straight to main
|
|
+ this.onMainTask.data = (OnMain)compoundTag;
|
|
+ this.onMainTask.throwable = throwable;
|
|
+ GenericDataLoadTask.this.processOnMain.queue();
|
|
+ }
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr2) {
|
|
+ LOGGER.error("Failed I/O callback for task: " + GenericDataLoadTask.this.toString(), thr2);
|
|
+ GenericDataLoadTask.this.scheduler.unrecoverableChunkSystemFailure(
|
|
+ GenericDataLoadTask.this.chunkX, GenericDataLoadTask.this.chunkZ, Map.of(
|
|
+ "Callback throwable", ChunkTaskScheduler.stringIfNull(throwable)
|
|
+ ), thr2);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final class ProcessOffMainTask implements Runnable {
|
|
+
|
|
+ protected CompoundTag data;
|
|
+ protected Throwable throwable;
|
|
+ protected final ProcessOnMainTask schedule;
|
|
+
|
|
+ public ProcessOffMainTask(final ProcessOnMainTask schedule) {
|
|
+ this.schedule = schedule;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ if (!GenericDataLoadTask.this.advanceStage(STAGE_LOADING, this.schedule == null ? STAGE_COMPLETED : STAGE_PROCESSING)) {
|
|
+ // cancelled
|
|
+ return;
|
|
+ }
|
|
+ final TaskResult<OnMain, Throwable> newData = GenericDataLoadTask.this.runOffMain(this.data, this.throwable);
|
|
+
|
|
+ if (GenericDataLoadTask.this.stageAndReferenceCount.get() == STAGE_CANCELLED) {
|
|
+ // don't try to schedule further
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (this.schedule != null) {
|
|
+ final TaskResult<FinalCompletion, Throwable> syncComplete = GenericDataLoadTask.this.completeOnMainOffMain(newData.left, newData.right);
|
|
+
|
|
+ if (syncComplete != null) {
|
|
+ if (GenericDataLoadTask.this.advanceStage(STAGE_PROCESSING, STAGE_COMPLETED)) {
|
|
+ GenericDataLoadTask.this.onComplete(syncComplete);
|
|
+ } // else: cancelled
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.schedule.data = newData.left;
|
|
+ this.schedule.throwable = newData.right;
|
|
+
|
|
+ GenericDataLoadTask.this.processOnMain.queue();
|
|
+ } else {
|
|
+ GenericDataLoadTask.this.onComplete((TaskResult<FinalCompletion, Throwable>)newData);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final class ProcessOnMainTask implements Runnable {
|
|
+
|
|
+ protected OnMain data;
|
|
+ protected Throwable throwable;
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ if (!GenericDataLoadTask.this.advanceStage(STAGE_PROCESSING, STAGE_COMPLETED)) {
|
|
+ // cancelled
|
|
+ return;
|
|
+ }
|
|
+ final TaskResult<FinalCompletion, Throwable> result = GenericDataLoadTask.this.runOnMain(this.data, this.throwable);
|
|
+
|
|
+ GenericDataLoadTask.this.onComplete(result);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static final class LoadDataFromDiskTask {
|
|
+
|
|
+ protected volatile int priority;
|
|
+ protected static final VarHandle PRIORITY_HANDLE = ConcurrentUtil.getVarHandle(LoadDataFromDiskTask.class, "priority", int.class);
|
|
+
|
|
+ protected static final int PRIORITY_EXECUTED = Integer.MIN_VALUE >>> 0;
|
|
+ protected static final int PRIORITY_LOAD_SCHEDULED = Integer.MIN_VALUE >>> 1;
|
|
+ protected static final int PRIORITY_UNLOAD_SCHEDULED = Integer.MIN_VALUE >>> 2;
|
|
+
|
|
+ protected static final int PRIORITY_FLAGS = ~Character.MAX_VALUE;
|
|
+
|
|
+ protected final int getPriorityVolatile() {
|
|
+ return (int)PRIORITY_HANDLE.getVolatile((LoadDataFromDiskTask)this);
|
|
+ }
|
|
+
|
|
+ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) {
|
|
+ return (int)PRIORITY_HANDLE.compareAndExchange((LoadDataFromDiskTask)this, (int)expect, (int)update);
|
|
+ }
|
|
+
|
|
+ protected final int getAndOrPriorityVolatile(final int val) {
|
|
+ return (int)PRIORITY_HANDLE.getAndBitwiseOr((LoadDataFromDiskTask)this, (int)val);
|
|
+ }
|
|
+
|
|
+ protected final void setPriorityPlain(final int val) {
|
|
+ PRIORITY_HANDLE.set((LoadDataFromDiskTask)this, (int)val);
|
|
+ }
|
|
+
|
|
+ private final ServerLevel world;
|
|
+ private final int chunkX;
|
|
+ private final int chunkZ;
|
|
+
|
|
+ private final RegionFileIOThread.RegionFileType type;
|
|
+ private Cancellable dataLoadTask;
|
|
+ private Cancellable dataUnloadCancellable;
|
|
+ private DelayedPrioritisedTask dataUnloadTask;
|
|
+
|
|
+ private final BiConsumer<CompoundTag, Throwable> onComplete;
|
|
+
|
|
+ // onComplete should be caller sensitive, it may complete synchronously with schedule() - which does
|
|
+ // hold a priority lock.
|
|
+ public LoadDataFromDiskTask(final ServerLevel world, final int chunkX, final int chunkZ,
|
|
+ final RegionFileIOThread.RegionFileType type,
|
|
+ final BiConsumer<CompoundTag, Throwable> onComplete,
|
|
+ final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.world = world;
|
|
+ this.chunkX = chunkX;
|
|
+ this.chunkZ = chunkZ;
|
|
+ this.type = type;
|
|
+ this.onComplete = onComplete;
|
|
+ this.setPriorityPlain(priority.priority);
|
|
+ }
|
|
+
|
|
+ private void complete(final CompoundTag data, final Throwable throwable) {
|
|
+ try {
|
|
+ this.onComplete.accept(data, throwable);
|
|
+ } catch (final Throwable thr2) {
|
|
+ this.world.chunkTaskScheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
|
|
+ "Completed throwable", ChunkTaskScheduler.stringIfNull(throwable),
|
|
+ "Regionfile type", ChunkTaskScheduler.stringIfNull(this.type)
|
|
+ ), thr2);
|
|
+ if (thr2 instanceof ThreadDeath) {
|
|
+ throw (ThreadDeath)thr2;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected boolean markExecuting() {
|
|
+ return (this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) == 0;
|
|
+ }
|
|
+
|
|
+ protected boolean isMarkedExecuted() {
|
|
+ return (this.getPriorityVolatile() & PRIORITY_EXECUTED) != 0;
|
|
+ }
|
|
+
|
|
+ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+
|
|
+ int failures = 0;
|
|
+ for (int curr = this.getPriorityVolatile();;) {
|
|
+ if ((curr & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled or executed
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) {
|
|
+ RegionFileIOThread.lowerPriority(this.world, this.chunkX, this.chunkZ, this.type, priority);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) {
|
|
+ if (this.dataUnloadTask != null) {
|
|
+ this.dataUnloadTask.lowerPriority(priority);
|
|
+ }
|
|
+ // no return - we need to propagate priority
|
|
+ }
|
|
+
|
|
+ if (!priority.isHigherPriority(curr & ~PRIORITY_FLAGS)) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // failed, retry
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void setPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+
|
|
+ int failures = 0;
|
|
+ for (int curr = this.getPriorityVolatile();;) {
|
|
+ if ((curr & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled or executed
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) {
|
|
+ RegionFileIOThread.setPriority(this.world, this.chunkX, this.chunkZ, this.type, priority);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) {
|
|
+ if (this.dataUnloadTask != null) {
|
|
+ this.dataUnloadTask.setPriority(priority);
|
|
+ }
|
|
+ // no return - we need to propagate priority
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // failed, retry
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+
|
|
+ int failures = 0;
|
|
+ for (int curr = this.getPriorityVolatile();;) {
|
|
+ if ((curr & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled or executed
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) {
|
|
+ RegionFileIOThread.raisePriority(this.world, this.chunkX, this.chunkZ, this.type, priority);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) {
|
|
+ if (this.dataUnloadTask != null) {
|
|
+ this.dataUnloadTask.raisePriority(priority);
|
|
+ }
|
|
+ // no return - we need to propagate priority
|
|
+ }
|
|
+
|
|
+ if (!priority.isLowerPriority(curr & ~PRIORITY_FLAGS)) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // failed, retry
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void cancel() {
|
|
+ if ((this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled or executed already
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // OK if we miss the field read, the task cannot complete if the cancelled bit is set and
|
|
+ // the write to dataLoadTask will check for the cancelled bit
|
|
+ if (this.dataUnloadCancellable != null) {
|
|
+ this.dataUnloadCancellable.cancel();
|
|
+ }
|
|
+
|
|
+ if (this.dataLoadTask != null) {
|
|
+ this.dataLoadTask.cancel();
|
|
+ }
|
|
+
|
|
+ this.complete(CANCELLED_DATA, null);
|
|
+ }
|
|
+
|
|
+ private final AtomicBoolean scheduled = new AtomicBoolean();
|
|
+
|
|
+ public void schedule() {
|
|
+ if (this.scheduled.getAndSet(true)) {
|
|
+ throw new IllegalStateException("schedule() called twice");
|
|
+ }
|
|
+ int priority = this.getPriorityVolatile();
|
|
+
|
|
+ if ((priority & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final BiConsumer<CompoundTag, Throwable> consumer = (final CompoundTag data, final Throwable thr) -> {
|
|
+ // because cancelScheduled() cannot actually stop this task from executing in every case, we need
|
|
+ // to mark complete here to ensure we do not double complete
|
|
+ if (LoadDataFromDiskTask.this.markExecuting()) {
|
|
+ LoadDataFromDiskTask.this.complete(data, thr);
|
|
+ } // else: cancelled
|
|
+ };
|
|
+
|
|
+ final PrioritisedExecutor.Priority initialPriority = PrioritisedExecutor.Priority.getPriority(priority);
|
|
+ boolean scheduledUnload = false;
|
|
+
|
|
+ final NewChunkHolder holder = this.world.chunkTaskScheduler.chunkHolderManager.getChunkHolder(this.chunkX, this.chunkZ);
|
|
+ if (holder != null) {
|
|
+ final BiConsumer<CompoundTag, Throwable> unloadConsumer = (final CompoundTag data, final Throwable thr) -> {
|
|
+ if (data != null) {
|
|
+ consumer.accept(data, null);
|
|
+ } else {
|
|
+ // need to schedule task
|
|
+ LoadDataFromDiskTask.this.schedule(false, consumer, PrioritisedExecutor.Priority.getPriority(LoadDataFromDiskTask.this.getPriorityVolatile() & ~PRIORITY_FLAGS));
|
|
+ }
|
|
+ };
|
|
+ Cancellable unloadCancellable = null;
|
|
+ CompoundTag syncComplete = null;
|
|
+ final NewChunkHolder.UnloadTask unloadTask = holder.getUnloadTask(this.type); // can be null if no task exists
|
|
+ final Completable<CompoundTag> unloadCompletable = unloadTask == null ? null : unloadTask.completable();
|
|
+ if (unloadCompletable != null) {
|
|
+ unloadCancellable = unloadCompletable.addAsynchronousWaiter(unloadConsumer);
|
|
+ if (unloadCancellable == null) {
|
|
+ syncComplete = unloadCompletable.getResult();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (syncComplete != null) {
|
|
+ consumer.accept(syncComplete, null);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (unloadCancellable != null) {
|
|
+ scheduledUnload = true;
|
|
+ this.dataUnloadCancellable = unloadCancellable;
|
|
+ this.dataUnloadTask = unloadTask.task();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.schedule(scheduledUnload, consumer, initialPriority);
|
|
+ }
|
|
+
|
|
+ private void schedule(final boolean scheduledUnload, final BiConsumer<CompoundTag, Throwable> consumer, final PrioritisedExecutor.Priority initialPriority) {
|
|
+ int priority = this.getPriorityVolatile();
|
|
+
|
|
+ if ((priority & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!scheduledUnload) {
|
|
+ this.dataLoadTask = RegionFileIOThread.loadDataAsync(
|
|
+ this.world, this.chunkX, this.chunkZ, this.type, consumer,
|
|
+ initialPriority.isHigherPriority(PrioritisedExecutor.Priority.NORMAL), initialPriority
|
|
+ );
|
|
+ }
|
|
+
|
|
+ int failures = 0;
|
|
+ for (;;) {
|
|
+ if (priority == (priority = this.compareAndExchangePriorityVolatile(priority, priority | (scheduledUnload ? PRIORITY_UNLOAD_SCHEDULED : PRIORITY_LOAD_SCHEDULED)))) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((priority & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled or executed
|
|
+ if (this.dataUnloadCancellable != null) {
|
|
+ this.dataUnloadCancellable.cancel();
|
|
+ }
|
|
+
|
|
+ if (this.dataLoadTask != null) {
|
|
+ this.dataLoadTask.cancel();
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (scheduledUnload) {
|
|
+ if (this.dataUnloadTask != null) {
|
|
+ this.dataUnloadTask.setPriority(PrioritisedExecutor.Priority.getPriority(priority & ~PRIORITY_FLAGS));
|
|
+ }
|
|
+ } else {
|
|
+ RegionFileIOThread.setPriority(this.world, this.chunkX, this.chunkZ, this.type, PrioritisedExecutor.Priority.getPriority(priority & ~PRIORITY_FLAGS));
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ private static final class LoadDataPriorityHolder extends PriorityHolder {
|
|
+
|
|
+ protected final LoadDataFromDiskTask task;
|
|
+
|
|
+ protected LoadDataPriorityHolder(final PrioritisedExecutor.Priority priority, final LoadDataFromDiskTask task) {
|
|
+ super(priority);
|
|
+ this.task = task;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void cancelScheduled() {
|
|
+ final Cancellable dataLoadTask = this.task.dataLoadTask;
|
|
+ if (dataLoadTask != null) {
|
|
+ // OK if we miss the field read, the task cannot complete if the cancelled bit is set and
|
|
+ // the write to dataLoadTask will check for the cancelled bit
|
|
+ this.task.dataLoadTask.cancel();
|
|
+ }
|
|
+ this.task.complete(CANCELLED_DATA, null);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected PrioritisedExecutor.Priority getScheduledPriority() {
|
|
+ final LoadDataFromDiskTask task = this.task;
|
|
+ return RegionFileIOThread.getPriority(task.world, task.chunkX, task.chunkZ, task.type);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void scheduleTask(final PrioritisedExecutor.Priority priority) {
|
|
+ final LoadDataFromDiskTask task = this.task;
|
|
+ final BiConsumer<CompoundTag, Throwable> consumer = (final CompoundTag data, final Throwable thr) -> {
|
|
+ // because cancelScheduled() cannot actually stop this task from executing in every case, we need
|
|
+ // to mark complete here to ensure we do not double complete
|
|
+ if (LoadDataPriorityHolder.this.markExecuting()) {
|
|
+ LoadDataPriorityHolder.this.task.complete(data, thr);
|
|
+ } // else: cancelled
|
|
+ };
|
|
+ task.dataLoadTask = RegionFileIOThread.loadDataAsync(
|
|
+ task.world, task.chunkX, task.chunkZ, task.type, consumer,
|
|
+ priority.isHigherPriority(PrioritisedExecutor.Priority.NORMAL), priority
|
|
+ );
|
|
+ if (this.isMarkedExecuted()) {
|
|
+ // if we are marked as completed, it could be:
|
|
+ // 1. we were cancelled
|
|
+ // 2. the consumer was completed
|
|
+ // in the 2nd case, cancel() does nothing
|
|
+ // in the 1st case, we ensure cancel() is called as it is possible for the cancelling thread
|
|
+ // to miss the field write here
|
|
+ task.dataLoadTask.cancel();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority) {
|
|
+ final LoadDataFromDiskTask task = this.task;
|
|
+ RegionFileIOThread.lowerPriority(task.world, task.chunkX, task.chunkZ, task.type, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void setPriorityScheduled(final PrioritisedExecutor.Priority priority) {
|
|
+ final LoadDataFromDiskTask task = this.task;
|
|
+ RegionFileIOThread.setPriority(task.world, task.chunkX, task.chunkZ, task.type, priority);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected void raisePriorityScheduled(final PrioritisedExecutor.Priority priority) {
|
|
+ final LoadDataFromDiskTask task = this.task;
|
|
+ RegionFileIOThread.raisePriority(task.world, task.chunkX, task.chunkZ, task.type, priority);
|
|
+ }
|
|
+ }
|
|
+ */
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..8013dd333e27aa5fd0beb431fa32491eec9f5246
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
|
|
@@ -0,0 +1,2077 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.completable.Completable;
|
|
+import ca.spottedleaf.concurrentutil.executor.Cancellable;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.DelayedPrioritisedTask;
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import com.google.gson.JsonArray;
|
|
+import com.google.gson.JsonElement;
|
|
+import com.google.gson.JsonObject;
|
|
+import com.google.gson.JsonPrimitive;
|
|
+import com.mojang.logging.LogUtils;
|
|
+import io.papermc.paper.chunk.system.io.RegionFileIOThread;
|
|
+import io.papermc.paper.chunk.system.poi.PoiChunk;
|
|
+import io.papermc.paper.util.CoordinateUtils;
|
|
+import io.papermc.paper.util.TickThread;
|
|
+import io.papermc.paper.util.WorldUtil;
|
|
+import io.papermc.paper.world.ChunkEntitySlices;
|
|
+import it.unimi.dsi.fastutil.objects.Reference2ObjectLinkedOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.objects.Reference2ObjectMap;
|
|
+import it.unimi.dsi.fastutil.objects.Reference2ObjectOpenHashMap;
|
|
+import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.server.level.ChunkHolder;
|
|
+import net.minecraft.server.level.ChunkMap;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.server.level.TicketType;
|
|
+import net.minecraft.world.entity.Entity;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
+import net.minecraft.world.level.chunk.ImposterProtoChunk;
|
|
+import net.minecraft.world.level.chunk.LevelChunk;
|
|
+import net.minecraft.world.level.chunk.storage.ChunkSerializer;
|
|
+import net.minecraft.world.level.chunk.storage.EntityStorage;
|
|
+import org.slf4j.Logger;
|
|
+import java.lang.invoke.VarHandle;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Iterator;
|
|
+import java.util.List;
|
|
+import java.util.Map;
|
|
+import java.util.Objects;
|
|
+import java.util.concurrent.atomic.AtomicBoolean;
|
|
+import java.util.function.Consumer;
|
|
+
|
|
+public final class NewChunkHolder {
|
|
+
|
|
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
|
+
|
|
+ public static final Thread.UncaughtExceptionHandler CHUNKSYSTEM_UNCAUGHT_EXCEPTION_HANDLER = new Thread.UncaughtExceptionHandler() {
|
|
+ @Override
|
|
+ public void uncaughtException(final Thread thread, final Throwable throwable) {
|
|
+ if (!(throwable instanceof ThreadDeath)) {
|
|
+ LOGGER.error("Uncaught exception in thread " + thread.getName(), throwable);
|
|
+ }
|
|
+ }
|
|
+ };
|
|
+
|
|
+ public final ServerLevel world;
|
|
+ public final int chunkX;
|
|
+ public final int chunkZ;
|
|
+
|
|
+ public final ChunkTaskScheduler scheduler;
|
|
+
|
|
+ // load/unload state
|
|
+
|
|
+ // chunk data state
|
|
+
|
|
+ private ChunkEntitySlices entityChunk;
|
|
+ // entity chunk that is loaded, but not yet deserialized
|
|
+ private CompoundTag pendingEntityChunk;
|
|
+
|
|
+ ChunkEntitySlices loadInEntityChunk(final boolean transientChunk) {
|
|
+ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot sync load entity data off-main");
|
|
+ final CompoundTag entityChunk;
|
|
+ final ChunkEntitySlices ret;
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ if (this.entityChunk != null && (transientChunk || !this.entityChunk.isTransient())) {
|
|
+ return this.entityChunk;
|
|
+ }
|
|
+ final CompoundTag pendingEntityChunk = this.pendingEntityChunk;
|
|
+ if (!transientChunk && pendingEntityChunk == null) {
|
|
+ throw new IllegalStateException("Must load entity data from disk before loading in the entity chunk!");
|
|
+ }
|
|
+
|
|
+ if (this.entityChunk == null) {
|
|
+ ret = this.entityChunk = new ChunkEntitySlices(
|
|
+ this.world, this.chunkX, this.chunkZ, this.getChunkStatus(),
|
|
+ WorldUtil.getMinSection(this.world), WorldUtil.getMaxSection(this.world)
|
|
+ );
|
|
+
|
|
+ ret.setTransient(transientChunk);
|
|
+
|
|
+ this.world.getEntityLookup().entitySectionLoad(this.chunkX, this.chunkZ, ret);
|
|
+ } else {
|
|
+ // transientChunk = false here
|
|
+ ret = this.entityChunk;
|
|
+ this.entityChunk.setTransient(false);
|
|
+ }
|
|
+
|
|
+ if (!transientChunk) {
|
|
+ this.pendingEntityChunk = null;
|
|
+ entityChunk = pendingEntityChunk == EMPTY_ENTITY_CHUNK ? null : pendingEntityChunk;
|
|
+ } else {
|
|
+ entityChunk = null;
|
|
+ }
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+
|
|
+ if (!transientChunk) {
|
|
+ if (entityChunk != null) {
|
|
+ final List<Entity> entities = EntityStorage.readEntities(this.world, entityChunk);
|
|
+
|
|
+ this.world.getEntityLookup().addEntityChunkEntities(entities);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ // needed to distinguish whether the entity chunk has been read from disk but is empty or whether it has _not_
|
|
+ // been read from disk
|
|
+ private static final CompoundTag EMPTY_ENTITY_CHUNK = new CompoundTag();
|
|
+
|
|
+ private ChunkLoadTask.EntityDataLoadTask entityDataLoadTask;
|
|
+ // note: if entityDataLoadTask is cancelled, but on its completion entityDataLoadTaskWaiters.size() != 0,
|
|
+ // then the task is rescheduled
|
|
+ private List<GenericDataLoadTaskCallback> entityDataLoadTaskWaiters;
|
|
+
|
|
+ public ChunkLoadTask.EntityDataLoadTask getEntityDataLoadTask() {
|
|
+ return this.entityDataLoadTask;
|
|
+ }
|
|
+
|
|
+ // must hold schedule lock for the two below functions
|
|
+
|
|
+ // returns only if the data has been loaded from disk, DOES NOT relate to whether it has been deserialized
|
|
+ // or added into the world (or even into entityChunk)
|
|
+ public boolean isEntityChunkNBTLoaded() {
|
|
+ return (this.entityChunk != null && !this.entityChunk.isTransient()) || this.pendingEntityChunk != null;
|
|
+ }
|
|
+
|
|
+ private void completeEntityLoad(final GenericDataLoadTask.TaskResult<CompoundTag, Throwable> result) {
|
|
+ final List<GenericDataLoadTaskCallback> completeWaiters;
|
|
+ ChunkLoadTask.EntityDataLoadTask entityDataLoadTask = null;
|
|
+ boolean scheduleEntityTask = false;
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ final List<GenericDataLoadTaskCallback> waiters = this.entityDataLoadTaskWaiters;
|
|
+ this.entityDataLoadTask = null;
|
|
+ if (result != null) {
|
|
+ this.entityDataLoadTaskWaiters = null;
|
|
+ this.pendingEntityChunk = result.left() == null ? EMPTY_ENTITY_CHUNK : result.left();
|
|
+ if (result.right() != null) {
|
|
+ LOGGER.error("Unhandled entity data load exception, data data will be lost: ", result.right());
|
|
+ }
|
|
+
|
|
+ completeWaiters = waiters;
|
|
+ } else {
|
|
+ // cancelled
|
|
+ completeWaiters = null;
|
|
+
|
|
+ // need to re-schedule?
|
|
+ if (waiters.isEmpty()) {
|
|
+ this.entityDataLoadTaskWaiters = null;
|
|
+ // no tasks to schedule _for_
|
|
+ } else {
|
|
+ entityDataLoadTask = this.entityDataLoadTask = new ChunkLoadTask.EntityDataLoadTask(
|
|
+ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority()
|
|
+ );
|
|
+ entityDataLoadTask.addCallback(this::completeEntityLoad);
|
|
+ // need one schedule() per waiter
|
|
+ for (final GenericDataLoadTaskCallback callback : waiters) {
|
|
+ scheduleEntityTask |= entityDataLoadTask.schedule(true);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+
|
|
+ if (scheduleEntityTask) {
|
|
+ entityDataLoadTask.scheduleNow();
|
|
+ }
|
|
+
|
|
+ // avoid holding the scheduling lock while completing
|
|
+ if (completeWaiters != null) {
|
|
+ for (final GenericDataLoadTaskCallback callback : completeWaiters) {
|
|
+ callback.accept(result);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ this.checkUnload();
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // note: it is guaranteed that the consumer cannot be called for the entirety that the schedule lock is held
|
|
+ // however, when the consumer is invoked, it will hold the schedule lock
|
|
+ public GenericDataLoadTaskCallback getOrLoadEntityData(final Consumer<GenericDataLoadTask.TaskResult<CompoundTag, Throwable>> consumer) {
|
|
+ if (this.isEntityChunkNBTLoaded()) {
|
|
+ throw new IllegalStateException("Cannot load entity data, it is already loaded");
|
|
+ }
|
|
+ // why not just acquire the lock? because the caller NEEDS to call isEntityChunkNBTLoaded before this!
|
|
+ if (!this.scheduler.schedulingLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Must hold scheduling lock");
|
|
+ }
|
|
+
|
|
+ final GenericDataLoadTaskCallback ret = new EntityDataLoadTaskCallback((Consumer)consumer, this);
|
|
+
|
|
+ if (this.entityDataLoadTask == null) {
|
|
+ this.entityDataLoadTask = new ChunkLoadTask.EntityDataLoadTask(
|
|
+ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority()
|
|
+ );
|
|
+ this.entityDataLoadTask.addCallback(this::completeEntityLoad);
|
|
+ this.entityDataLoadTaskWaiters = new ArrayList<>();
|
|
+ }
|
|
+ this.entityDataLoadTaskWaiters.add(ret);
|
|
+ if (this.entityDataLoadTask.schedule(true)) {
|
|
+ ret.schedule = this.entityDataLoadTask;
|
|
+ }
|
|
+ this.checkUnload();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ private static final class EntityDataLoadTaskCallback extends GenericDataLoadTaskCallback {
|
|
+
|
|
+ public EntityDataLoadTaskCallback(final Consumer<GenericDataLoadTask.TaskResult<?, Throwable>> consumer, final NewChunkHolder chunkHolder) {
|
|
+ super(consumer, chunkHolder);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ void internalCancel() {
|
|
+ this.chunkHolder.entityDataLoadTaskWaiters.remove(this);
|
|
+ this.chunkHolder.entityDataLoadTask.cancel();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private PoiChunk poiChunk;
|
|
+
|
|
+ private ChunkLoadTask.PoiDataLoadTask poiDataLoadTask;
|
|
+ // note: if entityDataLoadTask is cancelled, but on its completion entityDataLoadTaskWaiters.size() != 0,
|
|
+ // then the task is rescheduled
|
|
+ private List<GenericDataLoadTaskCallback> poiDataLoadTaskWaiters;
|
|
+
|
|
+ public ChunkLoadTask.PoiDataLoadTask getPoiDataLoadTask() {
|
|
+ return this.poiDataLoadTask;
|
|
+ }
|
|
+
|
|
+ // must hold schedule lock for the two below functions
|
|
+
|
|
+ public boolean isPoiChunkLoaded() {
|
|
+ return this.poiChunk != null;
|
|
+ }
|
|
+
|
|
+ private void completePoiLoad(final GenericDataLoadTask.TaskResult<PoiChunk, Throwable> result) {
|
|
+ final List<GenericDataLoadTaskCallback> completeWaiters;
|
|
+ ChunkLoadTask.PoiDataLoadTask poiDataLoadTask = null;
|
|
+ boolean schedulePoiTask = false;
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ final List<GenericDataLoadTaskCallback> waiters = this.poiDataLoadTaskWaiters;
|
|
+ this.poiDataLoadTask = null;
|
|
+ if (result != null) {
|
|
+ this.poiDataLoadTaskWaiters = null;
|
|
+ this.poiChunk = result.left();
|
|
+ if (result.right() != null) {
|
|
+ LOGGER.error("Unhandled poi load exception, poi data will be lost: ", result.right());
|
|
+ }
|
|
+
|
|
+ completeWaiters = waiters;
|
|
+ } else {
|
|
+ // cancelled
|
|
+ completeWaiters = null;
|
|
+
|
|
+ // need to re-schedule?
|
|
+ if (waiters.isEmpty()) {
|
|
+ this.poiDataLoadTaskWaiters = null;
|
|
+ // no tasks to schedule _for_
|
|
+ } else {
|
|
+ poiDataLoadTask = this.poiDataLoadTask = new ChunkLoadTask.PoiDataLoadTask(
|
|
+ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority()
|
|
+ );
|
|
+ poiDataLoadTask.addCallback(this::completePoiLoad);
|
|
+ // need one schedule() per waiter
|
|
+ for (final GenericDataLoadTaskCallback callback : waiters) {
|
|
+ schedulePoiTask |= poiDataLoadTask.schedule(true);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+
|
|
+ if (schedulePoiTask) {
|
|
+ poiDataLoadTask.scheduleNow();
|
|
+ }
|
|
+
|
|
+ // avoid holding the scheduling lock while completing
|
|
+ if (completeWaiters != null) {
|
|
+ for (final GenericDataLoadTaskCallback callback : completeWaiters) {
|
|
+ callback.accept(result);
|
|
+ }
|
|
+ }
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ this.checkUnload();
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // note: it is guaranteed that the consumer cannot be called for the entirety that the schedule lock is held
|
|
+ // however, when the consumer is invoked, it will hold the schedule lock
|
|
+ public GenericDataLoadTaskCallback getOrLoadPoiData(final Consumer<GenericDataLoadTask.TaskResult<PoiChunk, Throwable>> consumer) {
|
|
+ if (this.isPoiChunkLoaded()) {
|
|
+ throw new IllegalStateException("Cannot load poi data, it is already loaded");
|
|
+ }
|
|
+ // why not just acquire the lock? because the caller NEEDS to call isPoiChunkLoaded before this!
|
|
+ if (!this.scheduler.schedulingLock.isHeldByCurrentThread()) {
|
|
+ throw new IllegalStateException("Must hold scheduling lock");
|
|
+ }
|
|
+
|
|
+ final GenericDataLoadTaskCallback ret = new PoiDataLoadTaskCallback((Consumer)consumer, this);
|
|
+
|
|
+ if (this.poiDataLoadTask == null) {
|
|
+ this.poiDataLoadTask = new ChunkLoadTask.PoiDataLoadTask(
|
|
+ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority()
|
|
+ );
|
|
+ this.poiDataLoadTask.addCallback(this::completePoiLoad);
|
|
+ this.poiDataLoadTaskWaiters = new ArrayList<>();
|
|
+ }
|
|
+ this.poiDataLoadTaskWaiters.add(ret);
|
|
+ if (this.poiDataLoadTask.schedule(true)) {
|
|
+ ret.schedule = this.poiDataLoadTask;
|
|
+ }
|
|
+ this.checkUnload();
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ private static final class PoiDataLoadTaskCallback extends GenericDataLoadTaskCallback {
|
|
+
|
|
+ public PoiDataLoadTaskCallback(final Consumer<GenericDataLoadTask.TaskResult<?, Throwable>> consumer, final NewChunkHolder chunkHolder) {
|
|
+ super(consumer, chunkHolder);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ void internalCancel() {
|
|
+ this.chunkHolder.poiDataLoadTaskWaiters.remove(this);
|
|
+ this.chunkHolder.poiDataLoadTask.cancel();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public static abstract class GenericDataLoadTaskCallback implements Cancellable, Consumer<GenericDataLoadTask.TaskResult<?, Throwable>> {
|
|
+
|
|
+ protected final Consumer<GenericDataLoadTask.TaskResult<?, Throwable>> consumer;
|
|
+ protected final NewChunkHolder chunkHolder;
|
|
+ protected boolean completed;
|
|
+ protected GenericDataLoadTask<?, ?> schedule;
|
|
+ protected final AtomicBoolean scheduled = new AtomicBoolean();
|
|
+
|
|
+ public GenericDataLoadTaskCallback(final Consumer<GenericDataLoadTask.TaskResult<?, Throwable>> consumer,
|
|
+ final NewChunkHolder chunkHolder) {
|
|
+ this.consumer = consumer;
|
|
+ this.chunkHolder = chunkHolder;
|
|
+ }
|
|
+
|
|
+ public void schedule() {
|
|
+ if (this.scheduled.getAndSet(true)) {
|
|
+ throw new IllegalStateException("Double calling schedule()");
|
|
+ }
|
|
+ if (this.schedule != null) {
|
|
+ this.schedule.scheduleNow();
|
|
+ this.schedule = null;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ boolean isCompleted() {
|
|
+ return this.completed;
|
|
+ }
|
|
+
|
|
+ // must hold scheduling lock
|
|
+ private boolean setCompleted() {
|
|
+ if (this.completed) {
|
|
+ return false;
|
|
+ }
|
|
+ return this.completed = true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void accept(final GenericDataLoadTask.TaskResult<?, Throwable> result) {
|
|
+ if (result != null) {
|
|
+ if (this.setCompleted()) {
|
|
+ this.consumer.accept(result);
|
|
+ } else {
|
|
+ throw new IllegalStateException("Cannot be cancelled at this point");
|
|
+ }
|
|
+ } else {
|
|
+ throw new NullPointerException("Result cannot be null (cancelled)");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // holds scheduling lock
|
|
+ abstract void internalCancel();
|
|
+
|
|
+ @Override
|
|
+ public boolean cancel() {
|
|
+ this.chunkHolder.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ if (!this.completed) {
|
|
+ this.completed = true;
|
|
+ this.internalCancel();
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+ } finally {
|
|
+ this.chunkHolder.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private ChunkAccess currentChunk;
|
|
+
|
|
+ // generation status state
|
|
+
|
|
+ /**
|
|
+ * Current status the chunk has been brought up to by the chunk system. null indicates no work at all
|
|
+ */
|
|
+ private ChunkStatus currentGenStatus;
|
|
+
|
|
+ // This allows unsynchronised access to the chunk and last gen status
|
|
+ private volatile ChunkCompletion lastChunkCompletion;
|
|
+
|
|
+ public ChunkCompletion getLastChunkCompletion() {
|
|
+ return this.lastChunkCompletion;
|
|
+ }
|
|
+
|
|
+ public static final record ChunkCompletion(ChunkAccess chunk, ChunkStatus genStatus) {};
|
|
+
|
|
+ /**
|
|
+ * The target final chunk status the chunk system will bring the chunk to.
|
|
+ */
|
|
+ private ChunkStatus requestedGenStatus;
|
|
+
|
|
+ private ChunkProgressionTask generationTask;
|
|
+ private ChunkStatus generationTaskStatus;
|
|
+
|
|
+ /**
|
|
+ * contains the neighbours that this chunk generation is blocking on
|
|
+ */
|
|
+ protected final ReferenceLinkedOpenHashSet<NewChunkHolder> neighboursBlockingGenTask = new ReferenceLinkedOpenHashSet<>(4);
|
|
+
|
|
+ /**
|
|
+ * map of ChunkHolder -> Required Status for this chunk
|
|
+ */
|
|
+ protected final Reference2ObjectLinkedOpenHashMap<NewChunkHolder, ChunkStatus> neighboursWaitingForUs = new Reference2ObjectLinkedOpenHashMap<>();
|
|
+
|
|
+ public void addGenerationBlockingNeighbour(final NewChunkHolder neighbour) {
|
|
+ this.neighboursBlockingGenTask.add(neighbour);
|
|
+ }
|
|
+
|
|
+ public void addWaitingNeighbour(final NewChunkHolder neighbour, final ChunkStatus requiredStatus) {
|
|
+ final boolean wasEmpty = this.neighboursWaitingForUs.isEmpty();
|
|
+ this.neighboursWaitingForUs.put(neighbour, requiredStatus);
|
|
+ if (wasEmpty) {
|
|
+ this.checkUnload();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // priority state
|
|
+
|
|
+ // the target priority for this chunk to generate at
|
|
+ // TODO this will screw over scheduling at lower priorities to neighbours, fix
|
|
+ private PrioritisedExecutor.Priority priority = PrioritisedExecutor.Priority.NORMAL;
|
|
+ private boolean priorityLocked;
|
|
+
|
|
+ // the priority neighbouring chunks have requested this chunk generate at
|
|
+ private PrioritisedExecutor.Priority neighbourRequestedPriority = PrioritisedExecutor.Priority.IDLE;
|
|
+
|
|
+ public PrioritisedExecutor.Priority getEffectivePriority() {
|
|
+ return PrioritisedExecutor.Priority.max(this.priority, this.neighbourRequestedPriority);
|
|
+ }
|
|
+
|
|
+ protected void recalculateNeighbourRequestedPriority() {
|
|
+ if (this.neighboursWaitingForUs.isEmpty()) {
|
|
+ this.neighbourRequestedPriority = PrioritisedExecutor.Priority.IDLE;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ PrioritisedExecutor.Priority max = PrioritisedExecutor.Priority.IDLE;
|
|
+
|
|
+ for (final NewChunkHolder holder : this.neighboursWaitingForUs.keySet()) {
|
|
+ final PrioritisedExecutor.Priority neighbourPriority = holder.getEffectivePriority();
|
|
+ if (neighbourPriority.isHigherPriority(max)) {
|
|
+ max = neighbourPriority;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final PrioritisedExecutor.Priority current = this.getEffectivePriority();
|
|
+ this.neighbourRequestedPriority = max;
|
|
+ final PrioritisedExecutor.Priority next = this.getEffectivePriority();
|
|
+
|
|
+ if (current == next) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // our effective priority has changed, so change our task
|
|
+ if (this.generationTask != null) {
|
|
+ this.generationTask.setPriority(next);
|
|
+ }
|
|
+
|
|
+ // now propagate this to our neighbours
|
|
+ this.recalculateNeighbourPriorities();
|
|
+ }
|
|
+
|
|
+ public void recalculateNeighbourPriorities() {
|
|
+ for (final NewChunkHolder holder : this.neighboursBlockingGenTask) {
|
|
+ holder.recalculateNeighbourRequestedPriority();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // must hold scheduling lock
|
|
+ public void raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (this.priority != null && this.priority.isHigherOrEqualPriority(priority)) {
|
|
+ return;
|
|
+ }
|
|
+ this.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ private void lockPriority() {
|
|
+ this.priority = PrioritisedExecutor.Priority.NORMAL;
|
|
+ this.priorityLocked = true;
|
|
+ }
|
|
+
|
|
+ // must hold scheduling lock
|
|
+ public void setPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (this.priorityLocked) {
|
|
+ return;
|
|
+ }
|
|
+ final PrioritisedExecutor.Priority old = this.getEffectivePriority();
|
|
+ this.priority = priority;
|
|
+ final PrioritisedExecutor.Priority newPriority = this.getEffectivePriority();
|
|
+
|
|
+ if (old != newPriority) {
|
|
+ if (this.generationTask != null) {
|
|
+ this.generationTask.setPriority(newPriority);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ this.recalculateNeighbourPriorities();
|
|
+ }
|
|
+
|
|
+ // must hold scheduling lock
|
|
+ public void lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (this.priority != null && this.priority.isLowerOrEqualPriority(priority)) {
|
|
+ return;
|
|
+ }
|
|
+ this.setPriority(priority);
|
|
+ }
|
|
+
|
|
+ // error handling state
|
|
+ private ChunkStatus failedGenStatus;
|
|
+ private Throwable genTaskException;
|
|
+ private Thread genTaskFailedThread;
|
|
+
|
|
+ private boolean failedLightUpdate;
|
|
+
|
|
+ public void failedLightUpdate() {
|
|
+ this.failedLightUpdate = true;
|
|
+ }
|
|
+
|
|
+ public boolean hasFailedGeneration() {
|
|
+ return this.genTaskException != null;
|
|
+ }
|
|
+
|
|
+ // ticket level state
|
|
+ private int oldTicketLevel = ChunkMap.MAX_CHUNK_DISTANCE + 1;
|
|
+ private int currentTicketLevel = ChunkMap.MAX_CHUNK_DISTANCE + 1;
|
|
+
|
|
+ public int getTicketLevel() {
|
|
+ return this.currentTicketLevel;
|
|
+ }
|
|
+
|
|
+ public final ChunkHolder vanillaChunkHolder;
|
|
+
|
|
+ public NewChunkHolder(final ServerLevel world, final int chunkX, final int chunkZ, final ChunkTaskScheduler scheduler) {
|
|
+ this.world = world;
|
|
+ this.chunkX = chunkX;
|
|
+ this.chunkZ = chunkZ;
|
|
+ this.scheduler = scheduler;
|
|
+ this.vanillaChunkHolder = new ChunkHolder(new ChunkPos(chunkX, chunkZ), world, world.getLightEngine(), world.chunkSource.chunkMap, this);
|
|
+ }
|
|
+
|
|
+ protected ImposterProtoChunk wrappedChunkForNeighbour;
|
|
+
|
|
+ // holds scheduling lock
|
|
+ public ChunkAccess getChunkForNeighbourAccess() {
|
|
+ // Vanilla overrides the status futures with an imposter chunk to prevent writes to full chunks
|
|
+ // But we don't store per-status futures, so we need this hack
|
|
+ if (this.wrappedChunkForNeighbour != null) {
|
|
+ return this.wrappedChunkForNeighbour;
|
|
+ }
|
|
+ final ChunkAccess ret = this.currentChunk;
|
|
+ return ret instanceof LevelChunk fullChunk ? this.wrappedChunkForNeighbour = new ImposterProtoChunk(fullChunk, false) : ret;
|
|
+ }
|
|
+
|
|
+ public ChunkAccess getCurrentChunk() {
|
|
+ return this.currentChunk;
|
|
+ }
|
|
+
|
|
+ int getCurrentTicketLevel() {
|
|
+ return this.currentTicketLevel;
|
|
+ }
|
|
+
|
|
+ void updateTicketLevel(final int toLevel) {
|
|
+ this.currentTicketLevel = toLevel;
|
|
+ }
|
|
+
|
|
+ private int totalNeighboursUsingThisChunk = 0;
|
|
+
|
|
+ // holds schedule lock
|
|
+ public void addNeighbourUsingChunk() {
|
|
+ final int now = ++this.totalNeighboursUsingThisChunk;
|
|
+
|
|
+ if (now == 1) {
|
|
+ this.checkUnload();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // holds schedule lock
|
|
+ public void removeNeighbourUsingChunk() {
|
|
+ final int now = --this.totalNeighboursUsingThisChunk;
|
|
+
|
|
+ if (now == 0) {
|
|
+ this.checkUnload();
|
|
+ }
|
|
+
|
|
+ if (now < 0) {
|
|
+ throw new IllegalStateException("Neighbours using this chunk cannot be negative");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // must hold scheduling lock
|
|
+ // returns string reason for why chunk should remain loaded, null otherwise
|
|
+ public final String isSafeToUnload() {
|
|
+ // is ticket level below threshold?
|
|
+ if (this.oldTicketLevel <= ChunkHolderManager.MAX_TICKET_LEVEL) {
|
|
+ return "ticket_level";
|
|
+ }
|
|
+
|
|
+ // are we being used by another chunk for generation?
|
|
+ if (this.totalNeighboursUsingThisChunk != 0) {
|
|
+ return "neighbours_generating";
|
|
+ }
|
|
+
|
|
+ // are we going to be used by another chunk for generation?
|
|
+ if (!this.neighboursWaitingForUs.isEmpty()) {
|
|
+ return "neighbours_waiting";
|
|
+ }
|
|
+
|
|
+ // chunk must be marked inaccessible (i.e unloaded to plugins)
|
|
+ if (this.getChunkStatus() != ChunkHolder.FullChunkStatus.INACCESSIBLE) {
|
|
+ return "fullchunkstatus";
|
|
+ }
|
|
+
|
|
+ // are we currently generating anything, or have requested generation?
|
|
+ if (this.generationTask != null) {
|
|
+ return "generating";
|
|
+ }
|
|
+ if (this.requestedGenStatus != null) {
|
|
+ return "requested_generation";
|
|
+ }
|
|
+
|
|
+ // entity data requested?
|
|
+ if (this.entityDataLoadTask != null) {
|
|
+ return "entity_data_requested";
|
|
+ }
|
|
+
|
|
+ // poi data requested?
|
|
+ if (this.poiDataLoadTask != null) {
|
|
+ return "poi_data_requested";
|
|
+ }
|
|
+
|
|
+ // are we pending serialization?
|
|
+ if (this.entityDataUnload != null) {
|
|
+ return "entity_serialization";
|
|
+ }
|
|
+ if (this.poiDataUnload != null) {
|
|
+ return "poi_serialization";
|
|
+ }
|
|
+ if (this.chunkDataUnload != null) {
|
|
+ return "chunk_serialization";
|
|
+ }
|
|
+
|
|
+ // Note: light tasks do not need a check, as they add a ticket.
|
|
+
|
|
+ // nothing is using this chunk, so it should be unloaded
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ /** Unloaded from chunk map */
|
|
+ boolean killed;
|
|
+
|
|
+ // must hold scheduling lock
|
|
+ private void checkUnload() {
|
|
+ if (this.killed) {
|
|
+ return;
|
|
+ }
|
|
+ if (this.isSafeToUnload() == null) {
|
|
+ // ensure in unload queue
|
|
+ this.scheduler.chunkHolderManager.unloadQueue.add(this);
|
|
+ } else {
|
|
+ // ensure not in unload queue
|
|
+ this.scheduler.chunkHolderManager.unloadQueue.remove(this);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ static final record UnloadState(NewChunkHolder holder, ChunkAccess chunk, ChunkEntitySlices entityChunk, PoiChunk poiChunk) {};
|
|
+
|
|
+ // note: these are completed with null to indicate that no write occurred
|
|
+ // they are also completed with null to indicate a null write occurred
|
|
+ private UnloadTask chunkDataUnload;
|
|
+ private UnloadTask entityDataUnload;
|
|
+ private UnloadTask poiDataUnload;
|
|
+
|
|
+ public static final record UnloadTask(Completable<CompoundTag> completable, DelayedPrioritisedTask task) {}
|
|
+
|
|
+ public UnloadTask getUnloadTask(final RegionFileIOThread.RegionFileType type) {
|
|
+ switch (type) {
|
|
+ case CHUNK_DATA:
|
|
+ return this.chunkDataUnload;
|
|
+ case ENTITY_DATA:
|
|
+ return this.entityDataUnload;
|
|
+ case POI_DATA:
|
|
+ return this.poiDataUnload;
|
|
+ default:
|
|
+ throw new IllegalStateException("Unknown regionfile type " + type);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private UnloadState unloadState;
|
|
+
|
|
+ // holds schedule lock
|
|
+ UnloadState unloadStage1() {
|
|
+ // because we hold the scheduling lock, we cannot actually unload anything
|
|
+ // so we need to null this chunk's state
|
|
+ ChunkAccess chunk = this.currentChunk;
|
|
+ ChunkEntitySlices entityChunk = this.entityChunk;
|
|
+ PoiChunk poiChunk = this.poiChunk;
|
|
+ // chunk state
|
|
+ this.currentChunk = null;
|
|
+ this.currentGenStatus = null;
|
|
+ this.wrappedChunkForNeighbour = null;
|
|
+ this.lastChunkCompletion = null;
|
|
+ // entity chunk state
|
|
+ this.entityChunk = null;
|
|
+ this.pendingEntityChunk = null;
|
|
+
|
|
+ // poi chunk state
|
|
+ this.poiChunk = null;
|
|
+
|
|
+ // priority state
|
|
+ this.priorityLocked = false;
|
|
+
|
|
+ if (chunk != null) {
|
|
+ this.chunkDataUnload = new UnloadTask(new Completable<>(), new DelayedPrioritisedTask(PrioritisedExecutor.Priority.NORMAL));
|
|
+ }
|
|
+ if (poiChunk != null) {
|
|
+ this.poiDataUnload = new UnloadTask(new Completable<>(), null);
|
|
+ }
|
|
+ if (entityChunk != null) {
|
|
+ this.entityDataUnload = new UnloadTask(new Completable<>(), null);
|
|
+ }
|
|
+
|
|
+ return this.unloadState = (chunk != null || entityChunk != null || poiChunk != null) ? new UnloadState(this, chunk, entityChunk, poiChunk) : null;
|
|
+ }
|
|
+
|
|
+ // data is null if failed or does not need to be saved
|
|
+ void completeAsyncChunkDataSave(final CompoundTag data) {
|
|
+ if (data != null) {
|
|
+ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, data, RegionFileIOThread.RegionFileType.CHUNK_DATA);
|
|
+ }
|
|
+ this.chunkDataUnload.completable().complete(data);
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ // can only write to these fields while holding the schedule lock
|
|
+ this.chunkDataUnload = null;
|
|
+ this.checkUnload();
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ void unloadStage2(final UnloadState state) {
|
|
+ this.unloadState = null;
|
|
+ final ChunkAccess chunk = state.chunk();
|
|
+ final ChunkEntitySlices entityChunk = state.entityChunk();
|
|
+ final PoiChunk poiChunk = state.poiChunk();
|
|
+
|
|
+ final boolean shouldLevelChunkNotSave = (chunk instanceof LevelChunk levelChunk && levelChunk.mustNotSave);
|
|
+
|
|
+ // unload chunk data
|
|
+ if (chunk != null) {
|
|
+ if (chunk instanceof LevelChunk levelChunk) {
|
|
+ levelChunk.setLoaded(false);
|
|
+ }
|
|
+
|
|
+ if (!shouldLevelChunkNotSave) {
|
|
+ this.saveChunk(chunk, true);
|
|
+ } else {
|
|
+ this.completeAsyncChunkDataSave(null);
|
|
+ }
|
|
+
|
|
+ if (chunk instanceof LevelChunk levelChunk) {
|
|
+ this.world.unload(levelChunk);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // unload entity data
|
|
+ if (entityChunk != null) {
|
|
+ this.saveEntities(entityChunk, true);
|
|
+ // yes this is a hack to pass the compound tag through...
|
|
+ final CompoundTag lastEntityUnload = this.lastEntityUnload;
|
|
+ this.lastEntityUnload = null;
|
|
+
|
|
+ if (entityChunk.unload()) {
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ entityChunk.setTransient(true);
|
|
+ this.entityChunk = entityChunk;
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ } else {
|
|
+ this.world.getEntityLookup().entitySectionUnload(this.chunkX, this.chunkZ);
|
|
+ }
|
|
+ // we need to delay the callback until after determining transience, otherwise a potential loader could
|
|
+ // set entityChunk before we do
|
|
+ this.entityDataUnload.completable().complete(lastEntityUnload);
|
|
+ }
|
|
+
|
|
+ // unload poi data
|
|
+ if (poiChunk != null) {
|
|
+ if (poiChunk.isDirty() && !shouldLevelChunkNotSave) {
|
|
+ this.savePOI(poiChunk, true);
|
|
+ } else {
|
|
+ this.poiDataUnload.completable().complete(null);
|
|
+ }
|
|
+
|
|
+ if (poiChunk.isLoaded()) {
|
|
+ this.world.getPoiManager().onUnload(CoordinateUtils.getChunkKey(this.chunkX, this.chunkZ));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ boolean unloadStage3() {
|
|
+ // can only write to these while holding the schedule lock, and we instantly complete them in stage2
|
|
+ this.poiDataUnload = null;
|
|
+ this.entityDataUnload = null;
|
|
+
|
|
+ // we need to check if anything has been loaded in the meantime (or if we have transient entities)
|
|
+ if (this.entityChunk != null || this.poiChunk != null || this.currentChunk != null) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return this.isSafeToUnload() == null;
|
|
+ }
|
|
+
|
|
+ private void cancelGenTask() {
|
|
+ if (this.generationTask != null) {
|
|
+ this.generationTask.cancel();
|
|
+ } else {
|
|
+ // otherwise, we are blocking on neighbours, so remove them
|
|
+ if (!this.neighboursBlockingGenTask.isEmpty()) {
|
|
+ for (final NewChunkHolder neighbour : this.neighboursBlockingGenTask) {
|
|
+ if (neighbour.neighboursWaitingForUs.remove(this) == null) {
|
|
+ throw new IllegalStateException("Corrupt state");
|
|
+ }
|
|
+ if (neighbour.neighboursWaitingForUs.isEmpty()) {
|
|
+ neighbour.checkUnload();
|
|
+ }
|
|
+ }
|
|
+ this.neighboursBlockingGenTask.clear();
|
|
+ this.checkUnload();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // holds: ticket level update lock
|
|
+ // holds: schedule lock
|
|
+ public void processTicketLevelUpdate(final List<ChunkProgressionTask> scheduledTasks, final List<NewChunkHolder> changedLoadStatus) {
|
|
+ final int oldLevel = this.oldTicketLevel;
|
|
+ final int newLevel = this.currentTicketLevel;
|
|
+
|
|
+ if (oldLevel == newLevel) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.oldTicketLevel = newLevel;
|
|
+
|
|
+ final ChunkHolder.FullChunkStatus oldState = ChunkHolder.getFullChunkStatus(oldLevel);
|
|
+ final ChunkHolder.FullChunkStatus newState = ChunkHolder.getFullChunkStatus(newLevel);
|
|
+ final boolean oldUnloaded = oldLevel > ChunkHolderManager.MAX_TICKET_LEVEL;
|
|
+ final boolean newUnloaded = newLevel > ChunkHolderManager.MAX_TICKET_LEVEL;
|
|
+
|
|
+ final ChunkStatus maxGenerationStatusOld = ChunkHolder.getStatus(oldLevel);
|
|
+ final ChunkStatus maxGenerationStatusNew = ChunkHolder.getStatus(newLevel);
|
|
+
|
|
+ // check for cancellations from downgrading ticket level
|
|
+ if (this.requestedGenStatus != null && !newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && newLevel > oldLevel) {
|
|
+ // note: cancel() may invoke onChunkGenComplete synchronously here
|
|
+ if (newUnloaded) {
|
|
+ // need to cancel all tasks
|
|
+ // note: requested status must be set to null here before cancellation, to indicate to the
|
|
+ // completion logic that we do not want rescheduling to occur
|
|
+ this.requestedGenStatus = null;
|
|
+ this.cancelGenTask();
|
|
+ } else {
|
|
+ final ChunkStatus toCancel = maxGenerationStatusNew.getNextStatus();
|
|
+ final ChunkStatus currentRequestedStatus = this.requestedGenStatus;
|
|
+
|
|
+ if (currentRequestedStatus.isOrAfter(toCancel)) {
|
|
+ // we do have to cancel something here
|
|
+ // clamp requested status to the maximum
|
|
+ if (this.currentGenStatus != null && this.currentGenStatus.isOrAfter(maxGenerationStatusNew)) {
|
|
+ // already generated to status, so we must cancel
|
|
+ this.requestedGenStatus = null;
|
|
+ this.cancelGenTask();
|
|
+ } else {
|
|
+ // not generated to status, so we may have to cancel
|
|
+ // note: gen task is always 1 status above current gen status if not null
|
|
+ this.requestedGenStatus = maxGenerationStatusNew;
|
|
+ if (this.generationTaskStatus != null && this.generationTaskStatus.isOrAfter(toCancel)) {
|
|
+ // TOOD is this even possible? i don't think so
|
|
+ throw new IllegalStateException("?????");
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (newState != oldState) {
|
|
+ if (newState.isOrAfter(oldState)) {
|
|
+ // status upgrade
|
|
+ if (!oldState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ // may need to schedule full load
|
|
+ if (this.currentGenStatus != ChunkStatus.FULL) {
|
|
+ if (this.requestedGenStatus != null) {
|
|
+ this.requestedGenStatus = ChunkStatus.FULL;
|
|
+ } else {
|
|
+ this.scheduler.schedule(
|
|
+ this.chunkX, this.chunkZ, ChunkStatus.FULL, this, scheduledTasks
|
|
+ );
|
|
+ }
|
|
+ } else {
|
|
+ // now we are fully loaded
|
|
+ this.queueBorderFullStatus(true, changedLoadStatus);
|
|
+ }
|
|
+ }
|
|
+ } else {
|
|
+ // status downgrade
|
|
+ if (!newState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING) && oldState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING)) {
|
|
+ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.ENTITY_TICKING, null);
|
|
+ }
|
|
+
|
|
+ if (!newState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING) && oldState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING)) {
|
|
+ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.TICKING, null);
|
|
+ }
|
|
+
|
|
+ if (!newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && oldState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.BORDER, null);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (oldState != newState) {
|
|
+ if (this.onTicketUpdate(oldState, newState)) {
|
|
+ changedLoadStatus.add(this);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (oldUnloaded != newUnloaded) {
|
|
+ this.checkUnload();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ For full chunks, vanilla just loads chunks around it up to FEATURES, 1 radius
|
|
+
|
|
+ For ticking chunks, it updates the persistent entity manager (soon to be completely nuked by EntitySliceManager, which
|
|
+ will also need to be updated but with far less implications)
|
|
+ It also shoves the scheduled block ticks into the tick scheduler
|
|
+
|
|
+ For entity ticking chunks, updates the entity manager (see above)
|
|
+ */
|
|
+
|
|
+ static final int NEIGHBOUR_RADIUS = 2;
|
|
+ private long fullNeighbourChunksLoadedBitset;
|
|
+
|
|
+ private static int getFullNeighbourIndex(final int relativeX, final int relativeZ) {
|
|
+ // index = (relativeX + NEIGHBOUR_CACHE_RADIUS) + (relativeZ + NEIGHBOUR_CACHE_RADIUS) * (NEIGHBOUR_CACHE_RADIUS * 2 + 1)
|
|
+ // optimised variant of the above by moving some of the ops to compile time
|
|
+ return relativeX + (relativeZ * (NEIGHBOUR_RADIUS * 2 + 1)) + (NEIGHBOUR_RADIUS + NEIGHBOUR_RADIUS * ((NEIGHBOUR_RADIUS * 2 + 1)));
|
|
+ }
|
|
+ public final boolean isNeighbourFullLoaded(final int relativeX, final int relativeZ) {
|
|
+ return (this.fullNeighbourChunksLoadedBitset & (1L << getFullNeighbourIndex(relativeX, relativeZ))) != 0;
|
|
+ }
|
|
+
|
|
+ // returns true if this chunk changed full status
|
|
+ public final boolean setNeighbourFullLoaded(final int relativeX, final int relativeZ) {
|
|
+ final long before = this.fullNeighbourChunksLoadedBitset;
|
|
+ final int index = getFullNeighbourIndex(relativeX, relativeZ);
|
|
+ this.fullNeighbourChunksLoadedBitset |= (1L << index);
|
|
+ return this.onNeighbourChange(before, this.fullNeighbourChunksLoadedBitset);
|
|
+ }
|
|
+
|
|
+ // returns true if this chunk changed full status
|
|
+ public final boolean setNeighbourFullUnloaded(final int relativeX, final int relativeZ) {
|
|
+ final long before = this.fullNeighbourChunksLoadedBitset;
|
|
+ final int index = getFullNeighbourIndex(relativeX, relativeZ);
|
|
+ this.fullNeighbourChunksLoadedBitset &= ~(1L << index);
|
|
+ return this.onNeighbourChange(before, this.fullNeighbourChunksLoadedBitset);
|
|
+ }
|
|
+
|
|
+ public static boolean areNeighboursFullLoaded(final long bitset, final int radius) {
|
|
+ // index = relativeX + (relativeZ * (NEIGHBOUR_CACHE_RADIUS * 2 + 1)) + (NEIGHBOUR_CACHE_RADIUS + NEIGHBOUR_CACHE_RADIUS * ((NEIGHBOUR_CACHE_RADIUS * 2 + 1)))
|
|
+ switch (radius) {
|
|
+ case 0: {
|
|
+ return (bitset & (1L << getFullNeighbourIndex(0, 0))) != 0L;
|
|
+ }
|
|
+ case 1: {
|
|
+ long mask = 0L;
|
|
+ for (int dx = -1; dx <= 1; ++dx) {
|
|
+ for (int dz = -1; dz <= 1; ++dz) {
|
|
+ mask |= (1L << getFullNeighbourIndex(dx, dz));
|
|
+ }
|
|
+ }
|
|
+ return (bitset & mask) == mask;
|
|
+ }
|
|
+ case 2: {
|
|
+ long mask = 0L;
|
|
+ for (int dx = -2; dx <= 2; ++dx) {
|
|
+ for (int dz = -2; dz <= 2; ++dz) {
|
|
+ mask |= (1L << getFullNeighbourIndex(dx, dz));
|
|
+ }
|
|
+ }
|
|
+ return (bitset & mask) == mask;
|
|
+ }
|
|
+
|
|
+ default: {
|
|
+ throw new IllegalArgumentException("Radius not recognized: " + radius);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // upper 16 bits are pending status, lower 16 bits are current status
|
|
+ private volatile long chunkStatus;
|
|
+ private static final long PENDING_STATUS_MASK = Long.MIN_VALUE >> 31;
|
|
+ private static final ChunkHolder.FullChunkStatus[] CHUNK_STATUS_BY_ID = ChunkHolder.FullChunkStatus.values();
|
|
+ private static final VarHandle CHUNK_STATUS_HANDLE = ConcurrentUtil.getVarHandle(NewChunkHolder.class, "chunkStatus", long.class);
|
|
+
|
|
+ public static ChunkHolder.FullChunkStatus getCurrentChunkStatus(final long encoded) {
|
|
+ return CHUNK_STATUS_BY_ID[(int)encoded];
|
|
+ }
|
|
+
|
|
+ public static ChunkHolder.FullChunkStatus getPendingChunkStatus(final long encoded) {
|
|
+ return CHUNK_STATUS_BY_ID[(int)(encoded >>> 32)];
|
|
+ }
|
|
+
|
|
+ public ChunkHolder.FullChunkStatus getChunkStatus() {
|
|
+ return getCurrentChunkStatus(((long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this)));
|
|
+ }
|
|
+
|
|
+ public boolean isEntityTickingReady() {
|
|
+ return this.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING);
|
|
+ }
|
|
+
|
|
+ public boolean isTickingReady() {
|
|
+ return this.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.TICKING);
|
|
+ }
|
|
+
|
|
+ public boolean isFullChunkReady() {
|
|
+ return this.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER);
|
|
+ }
|
|
+
|
|
+ private static ChunkHolder.FullChunkStatus getStatusForBitset(final long bitset) {
|
|
+ if (areNeighboursFullLoaded(bitset, 2)) {
|
|
+ return ChunkHolder.FullChunkStatus.ENTITY_TICKING;
|
|
+ } else if (areNeighboursFullLoaded(bitset, 1)) {
|
|
+ return ChunkHolder.FullChunkStatus.TICKING;
|
|
+ } else if (areNeighboursFullLoaded(bitset, 0)) {
|
|
+ return ChunkHolder.FullChunkStatus.BORDER;
|
|
+ } else {
|
|
+ return ChunkHolder.FullChunkStatus.INACCESSIBLE;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // note: only while updating ticket level, so holds ticket update lock + scheduling lock
|
|
+ protected final boolean onTicketUpdate(final ChunkHolder.FullChunkStatus oldState, final ChunkHolder.FullChunkStatus newState) {
|
|
+ if (oldState == newState) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ // preserve border request after full status complete, as it does not set anything in the bitset
|
|
+ ChunkHolder.FullChunkStatus byNeighbours = getStatusForBitset(this.fullNeighbourChunksLoadedBitset);
|
|
+ if (byNeighbours == ChunkHolder.FullChunkStatus.INACCESSIBLE && newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && this.currentGenStatus == ChunkStatus.FULL) {
|
|
+ byNeighbours = ChunkHolder.FullChunkStatus.BORDER;
|
|
+ }
|
|
+
|
|
+ final ChunkHolder.FullChunkStatus toSet;
|
|
+
|
|
+ if (newState.isOrAfter(byNeighbours)) {
|
|
+ // must clamp to neighbours level, even though we have the ticket level
|
|
+ toSet = byNeighbours;
|
|
+ } else {
|
|
+ // must clamp to ticket level, even though we have the neighbours
|
|
+ toSet = newState;
|
|
+ }
|
|
+
|
|
+ long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);
|
|
+
|
|
+ if (curr == ((long)toSet.ordinal() | ((long)toSet.ordinal() << 32))) {
|
|
+ // nothing to do
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ int failures = 0;
|
|
+ for (;;) {
|
|
+ final long update = (curr & ~PENDING_STATUS_MASK) | ((long)toSet.ordinal() << 32);
|
|
+ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected final boolean onNeighbourChange(final long bitsetBefore, final long bitsetAfter) {
|
|
+ ChunkHolder.FullChunkStatus oldState = getStatusForBitset(bitsetBefore);
|
|
+ ChunkHolder.FullChunkStatus newState = getStatusForBitset(bitsetAfter);
|
|
+ final ChunkHolder.FullChunkStatus currStateTicketLevel = ChunkHolder.getFullChunkStatus(this.oldTicketLevel);
|
|
+ if (oldState.isOrAfter(currStateTicketLevel)) {
|
|
+ oldState = currStateTicketLevel;
|
|
+ }
|
|
+ if (newState.isOrAfter(currStateTicketLevel)) {
|
|
+ newState = currStateTicketLevel;
|
|
+ }
|
|
+ // preserve border request after full status complete, as it does not set anything in the bitset
|
|
+ if (newState == ChunkHolder.FullChunkStatus.INACCESSIBLE && currStateTicketLevel.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && this.currentGenStatus == ChunkStatus.FULL) {
|
|
+ newState = ChunkHolder.FullChunkStatus.BORDER;
|
|
+ }
|
|
+
|
|
+ if (oldState == newState) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ int failures = 0;
|
|
+ for (long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);;) {
|
|
+ final long update = (curr & ~PENDING_STATUS_MASK) | ((long)newState.ordinal() << 32);
|
|
+ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) {
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private boolean queueBorderFullStatus(final boolean loaded, final List<NewChunkHolder> changedFullStatus) {
|
|
+ final ChunkHolder.FullChunkStatus toStatus = loaded ? ChunkHolder.FullChunkStatus.BORDER : ChunkHolder.FullChunkStatus.INACCESSIBLE;
|
|
+
|
|
+ int failures = 0;
|
|
+ for (long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);;) {
|
|
+ final ChunkHolder.FullChunkStatus currPending = getPendingChunkStatus(curr);
|
|
+ if (loaded && currPending != ChunkHolder.FullChunkStatus.INACCESSIBLE) {
|
|
+ throw new IllegalStateException("Expected " + ChunkHolder.FullChunkStatus.INACCESSIBLE + " for pending, but got " + currPending);
|
|
+ }
|
|
+
|
|
+ final long update = (curr & ~PENDING_STATUS_MASK) | ((long)toStatus.ordinal() << 32);
|
|
+ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) {
|
|
+ if ((int)(update) != (int)(update >>> 32)) {
|
|
+ changedFullStatus.add(this);
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // only call on main thread, must hold ticket level and scheduling lock
|
|
+ private void onFullChunkLoadChange(final boolean loaded, final List<NewChunkHolder> changedFullStatus) {
|
|
+ for (int dz = -NEIGHBOUR_RADIUS; dz <= NEIGHBOUR_RADIUS; ++dz) {
|
|
+ for (int dx = -NEIGHBOUR_RADIUS; dx <= NEIGHBOUR_RADIUS; ++dx) {
|
|
+ final NewChunkHolder holder = (dx | dz) == 0 ? this : this.scheduler.chunkHolderManager.getChunkHolder(dx + this.chunkX, dz + this.chunkZ);
|
|
+ if (loaded) {
|
|
+ if (holder.setNeighbourFullLoaded(-dx, -dz)) {
|
|
+ changedFullStatus.add(holder);
|
|
+ }
|
|
+ } else {
|
|
+ if (holder != null && holder.setNeighbourFullUnloaded(-dx, -dz)) {
|
|
+ changedFullStatus.add(holder);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private ChunkHolder.FullChunkStatus updateCurrentState(final ChunkHolder.FullChunkStatus to) {
|
|
+ int failures = 0;
|
|
+ for (long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);;) {
|
|
+ final long update = (curr & PENDING_STATUS_MASK) | (long)to.ordinal();
|
|
+ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) {
|
|
+ return getPendingChunkStatus(curr);
|
|
+ }
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void changeEntityChunkStatus(final ChunkHolder.FullChunkStatus toStatus) {
|
|
+ this.world.getEntityLookup().chunkStatusChange(this.chunkX, this.chunkZ, toStatus);
|
|
+ }
|
|
+
|
|
+ private boolean processingFullStatus = false;
|
|
+
|
|
+ // only to be called on the main thread, no locks need to be held
|
|
+ public boolean handleFullStatusChange(final List<NewChunkHolder> changedFullStatus) {
|
|
+ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot update full status thread off-main");
|
|
+
|
|
+ boolean ret = false;
|
|
+
|
|
+ if (this.processingFullStatus) {
|
|
+ // we cannot process updates recursively
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ // note: use opaque reads for chunk status read since we need it to be atomic
|
|
+
|
|
+ // test if anything changed
|
|
+ final long statusCheck = (long)CHUNK_STATUS_HANDLE.getOpaque((NewChunkHolder)this);
|
|
+ if ((int)statusCheck == (int)(statusCheck >>> 32)) {
|
|
+ // nothing changed
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ final ChunkTaskScheduler scheduler = this.scheduler;
|
|
+ final ChunkHolderManager holderManager = scheduler.chunkHolderManager;
|
|
+ final int ticketKeep;
|
|
+ final Long ticketId;
|
|
+ holderManager.ticketLock.lock();
|
|
+ try {
|
|
+ ticketKeep = this.currentTicketLevel;
|
|
+ ticketId = Long.valueOf(holderManager.getNextStatusUpgradeId());
|
|
+ holderManager.addTicketAtLevel(TicketType.STATUS_UPGRADE, this.chunkX, this.chunkZ, ticketKeep, ticketId);
|
|
+ } finally {
|
|
+ holderManager.ticketLock.unlock();
|
|
+ }
|
|
+
|
|
+ this.processingFullStatus = true;
|
|
+ try {
|
|
+ for (;;) {
|
|
+ final long currStateEncoded = (long)CHUNK_STATUS_HANDLE.getOpaque((NewChunkHolder)this);
|
|
+ final ChunkHolder.FullChunkStatus currState = getCurrentChunkStatus(currStateEncoded);
|
|
+ ChunkHolder.FullChunkStatus nextState = getPendingChunkStatus(currStateEncoded);
|
|
+ if (currState == nextState) {
|
|
+ if (nextState == ChunkHolder.FullChunkStatus.INACCESSIBLE) {
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ this.checkUnload();
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ // chunks cannot downgrade state while status is pending a change
|
|
+ final LevelChunk chunk = (LevelChunk)this.currentChunk;
|
|
+
|
|
+ // Note: we assume that only load/unload contain plugin logic
|
|
+ // plugin logic is anything stupid enough to possibly change the chunk status while it is already
|
|
+ // being changed (i.e during load it is possible it will try to set to full ticking)
|
|
+ // in order to allow this change, we also need this plugin logic to be contained strictly after all
|
|
+ // of the chunk system load callbacks are invoked
|
|
+ if (nextState.isOrAfter(currState)) {
|
|
+ // state upgrade
|
|
+ if (!currState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && nextState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.BORDER);
|
|
+ holderManager.ensureInAutosave(this);
|
|
+ chunk.pushChunkIntoLoadedMap();
|
|
+ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.BORDER);
|
|
+ chunk.onChunkLoad(this);
|
|
+ this.onFullChunkLoadChange(true, changedFullStatus);
|
|
+ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.BORDER, chunk);
|
|
+ }
|
|
+
|
|
+ if (!currState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING) && nextState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING)) {
|
|
+ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.TICKING);
|
|
+ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.TICKING);
|
|
+ chunk.onChunkTicking(this);
|
|
+ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.TICKING, chunk);
|
|
+ }
|
|
+
|
|
+ if (!currState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING) && nextState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING)) {
|
|
+ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.ENTITY_TICKING);
|
|
+ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.ENTITY_TICKING);
|
|
+ chunk.onChunkEntityTicking(this);
|
|
+ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.ENTITY_TICKING, chunk);
|
|
+ }
|
|
+ } else {
|
|
+ if (currState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING) && !nextState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING)) {
|
|
+ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.TICKING);
|
|
+ chunk.onChunkNotEntityTicking(this);
|
|
+ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.TICKING);
|
|
+ }
|
|
+
|
|
+ if (currState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING) && !nextState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING)) {
|
|
+ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.BORDER);
|
|
+ chunk.onChunkNotTicking(this);
|
|
+ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.BORDER);
|
|
+ }
|
|
+
|
|
+ if (currState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && !nextState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ this.onFullChunkLoadChange(false, changedFullStatus);
|
|
+ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.INACCESSIBLE);
|
|
+ chunk.onChunkUnload(this);
|
|
+ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.INACCESSIBLE);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = true;
|
|
+ }
|
|
+ } finally {
|
|
+ this.processingFullStatus = false;
|
|
+ holderManager.removeTicketAtLevel(TicketType.STATUS_UPGRADE, this.chunkX, this.chunkZ, ticketKeep, ticketId);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ // note: must hold scheduling lock
|
|
+ // rets true if the current requested gen status is not null (effectively, whether further scheduling is not needed)
|
|
+ boolean upgradeGenTarget(final ChunkStatus toStatus) {
|
|
+ if (toStatus == null) {
|
|
+ throw new NullPointerException("toStatus cannot be null");
|
|
+ }
|
|
+ if (this.requestedGenStatus == null && this.generationTask == null) {
|
|
+ return false;
|
|
+ }
|
|
+ if (this.requestedGenStatus == null || !this.requestedGenStatus.isOrAfter(toStatus)) {
|
|
+ this.requestedGenStatus = toStatus;
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public void setGenerationTarget(final ChunkStatus toStatus) {
|
|
+ this.requestedGenStatus = toStatus;
|
|
+ }
|
|
+
|
|
+ public boolean hasGenerationTask() {
|
|
+ return this.generationTask != null;
|
|
+ }
|
|
+
|
|
+ public ChunkStatus getCurrentGenStatus() {
|
|
+ return this.currentGenStatus;
|
|
+ }
|
|
+
|
|
+ public ChunkStatus getRequestedGenStatus() {
|
|
+ return this.requestedGenStatus;
|
|
+ }
|
|
+
|
|
+ private final Reference2ObjectOpenHashMap<ChunkStatus, List<Consumer<ChunkAccess>>> statusWaiters = new Reference2ObjectOpenHashMap<>();
|
|
+
|
|
+ void addStatusConsumer(final ChunkStatus status, final Consumer<ChunkAccess> consumer) {
|
|
+ this.statusWaiters.computeIfAbsent(status, (final ChunkStatus keyInMap) -> {
|
|
+ return new ArrayList<>(4);
|
|
+ }).add(consumer);
|
|
+ }
|
|
+
|
|
+ private void completeStatusConsumers(ChunkStatus status, final ChunkAccess chunk) {
|
|
+ // need to tell future statuses to complete if cancelled
|
|
+ do {
|
|
+ this.completeStatusConsumers0(status, chunk);
|
|
+ } while (chunk == null && status != (status = status.getNextStatus()));
|
|
+ }
|
|
+
|
|
+ private void completeStatusConsumers0(final ChunkStatus status, final ChunkAccess chunk) {
|
|
+ final List<Consumer<ChunkAccess>> consumers;
|
|
+ consumers = this.statusWaiters.remove(status);
|
|
+
|
|
+ if (consumers == null) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // must be scheduled to main, we do not trust the callback to not do anything stupid
|
|
+ this.scheduler.scheduleChunkTask(this.chunkX, this.chunkZ, () -> {
|
|
+ for (final Consumer<ChunkAccess> consumer : consumers) {
|
|
+ try {
|
|
+ consumer.accept(chunk);
|
|
+ } catch (final ThreadDeath thr) {
|
|
+ throw thr;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Failed to process chunk status callback", thr);
|
|
+ }
|
|
+ }
|
|
+ }, PrioritisedExecutor.Priority.HIGHEST);
|
|
+ }
|
|
+
|
|
+ private final Reference2ObjectOpenHashMap<ChunkHolder.FullChunkStatus, List<Consumer<LevelChunk>>> fullStatusWaiters = new Reference2ObjectOpenHashMap<>();
|
|
+
|
|
+ void addFullStatusConsumer(final ChunkHolder.FullChunkStatus status, final Consumer<LevelChunk> consumer) {
|
|
+ this.fullStatusWaiters.computeIfAbsent(status, (final ChunkHolder.FullChunkStatus keyInMap) -> {
|
|
+ return new ArrayList<>(4);
|
|
+ }).add(consumer);
|
|
+ }
|
|
+
|
|
+ private void completeFullStatusConsumers(ChunkHolder.FullChunkStatus status, final LevelChunk chunk) {
|
|
+ // need to tell future statuses to complete if cancelled
|
|
+ final ChunkHolder.FullChunkStatus max = CHUNK_STATUS_BY_ID[CHUNK_STATUS_BY_ID.length - 1];
|
|
+
|
|
+ for (;;) {
|
|
+ this.completeFullStatusConsumers0(status, chunk);
|
|
+ if (chunk != null || status == max) {
|
|
+ break;
|
|
+ }
|
|
+ status = CHUNK_STATUS_BY_ID[status.ordinal() + 1];
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void completeFullStatusConsumers0(final ChunkHolder.FullChunkStatus status, final LevelChunk chunk) {
|
|
+ final List<Consumer<LevelChunk>> consumers;
|
|
+ consumers = this.fullStatusWaiters.remove(status);
|
|
+
|
|
+ if (consumers == null) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // must be scheduled to main, we do not trust the callback to not do anything stupid
|
|
+ this.scheduler.scheduleChunkTask(this.chunkX, this.chunkZ, () -> {
|
|
+ for (final Consumer<LevelChunk> consumer : consumers) {
|
|
+ try {
|
|
+ consumer.accept(chunk);
|
|
+ } catch (final ThreadDeath thr) {
|
|
+ throw thr;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Failed to process chunk status callback", thr);
|
|
+ }
|
|
+ }
|
|
+ }, PrioritisedExecutor.Priority.HIGHEST);
|
|
+ }
|
|
+
|
|
+ // note: must hold scheduling lock
|
|
+ private void onChunkGenComplete(final ChunkAccess newChunk, final ChunkStatus newStatus,
|
|
+ final List<ChunkProgressionTask> scheduleList, final List<NewChunkHolder> changedLoadStatus) {
|
|
+ if (!this.neighboursBlockingGenTask.isEmpty()) {
|
|
+ throw new IllegalStateException("Cannot have neighbours blocking this gen task");
|
|
+ }
|
|
+ if (newChunk != null || (this.requestedGenStatus == null || !this.requestedGenStatus.isOrAfter(newStatus))) {
|
|
+ this.completeStatusConsumers(newStatus, newChunk);
|
|
+ }
|
|
+ // done now, clear state (must be done before scheduling new tasks)
|
|
+ this.generationTask = null;
|
|
+ this.generationTaskStatus = null;
|
|
+ if (newChunk == null) {
|
|
+ // task was cancelled
|
|
+ // should be careful as this could be called while holding the schedule lock and/or inside the
|
|
+ // ticket level update
|
|
+ // while a task may be cancelled, it is possible for it to be later re-scheduled
|
|
+ // however, because generationTask is only set to null on _completion_, the scheduler leaves
|
|
+ // the rescheduling logic to us here
|
|
+ final ChunkStatus requestedGenStatus = this.requestedGenStatus;
|
|
+ this.requestedGenStatus = null;
|
|
+ if (requestedGenStatus != null) {
|
|
+ // it looks like it has been requested, so we must reschedule
|
|
+ if (!this.neighboursWaitingForUs.isEmpty()) {
|
|
+ for (final Iterator<Reference2ObjectMap.Entry<NewChunkHolder, ChunkStatus>> iterator = this.neighboursWaitingForUs.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) {
|
|
+ final Reference2ObjectMap.Entry<NewChunkHolder, ChunkStatus> entry = iterator.next();
|
|
+
|
|
+ final NewChunkHolder chunkHolder = entry.getKey();
|
|
+ final ChunkStatus toStatus = entry.getValue();
|
|
+
|
|
+ if (!requestedGenStatus.isOrAfter(toStatus)) {
|
|
+ // if we were cancelled, we are responsible for removing the waiter
|
|
+ if (!chunkHolder.neighboursBlockingGenTask.remove(this)) {
|
|
+ throw new IllegalStateException("Corrupt state");
|
|
+ }
|
|
+ if (chunkHolder.neighboursBlockingGenTask.isEmpty()) {
|
|
+ chunkHolder.checkUnload();
|
|
+ }
|
|
+ iterator.remove();
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // note: only after generationTask -> null, generationTaskStatus -> null, and requestedGenStatus -> null
|
|
+ this.scheduler.schedule(
|
|
+ this.chunkX, this.chunkZ, requestedGenStatus, this, scheduleList
|
|
+ );
|
|
+
|
|
+ // return, can't do anything further
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!this.neighboursWaitingForUs.isEmpty()) {
|
|
+ for (final NewChunkHolder chunkHolder : this.neighboursWaitingForUs.keySet()) {
|
|
+ if (!chunkHolder.neighboursBlockingGenTask.remove(this)) {
|
|
+ throw new IllegalStateException("Corrupt state");
|
|
+ }
|
|
+ if (chunkHolder.neighboursBlockingGenTask.isEmpty()) {
|
|
+ chunkHolder.checkUnload();
|
|
+ }
|
|
+ }
|
|
+ this.neighboursWaitingForUs.clear();
|
|
+ }
|
|
+ // reset priority, we have nothing left to generate to
|
|
+ this.setPriority(PrioritisedExecutor.Priority.NORMAL);
|
|
+ this.checkUnload();
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.currentChunk = newChunk;
|
|
+ this.currentGenStatus = newStatus;
|
|
+ this.lastChunkCompletion = new ChunkCompletion(newChunk, newStatus);
|
|
+
|
|
+ final ChunkStatus requestedGenStatus = this.requestedGenStatus;
|
|
+
|
|
+ List<NewChunkHolder> needsScheduling = null;
|
|
+ boolean recalculatePriority = false;
|
|
+ for (final Iterator<Reference2ObjectMap.Entry<NewChunkHolder, ChunkStatus>> iterator
|
|
+ = this.neighboursWaitingForUs.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) {
|
|
+ final Reference2ObjectMap.Entry<NewChunkHolder, ChunkStatus> entry = iterator.next();
|
|
+ final NewChunkHolder neighbour = entry.getKey();
|
|
+ final ChunkStatus requiredStatus = entry.getValue();
|
|
+
|
|
+ if (!newStatus.isOrAfter(requiredStatus)) {
|
|
+ if (requestedGenStatus == null || !requestedGenStatus.isOrAfter(requiredStatus)) {
|
|
+ // if we're cancelled, still need to clear this map
|
|
+ if (!neighbour.neighboursBlockingGenTask.remove(this)) {
|
|
+ throw new IllegalStateException("Neighbour is not waiting for us?");
|
|
+ }
|
|
+ if (neighbour.neighboursBlockingGenTask.isEmpty()) {
|
|
+ neighbour.checkUnload();
|
|
+ }
|
|
+
|
|
+ iterator.remove();
|
|
+ }
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ // doesn't matter what isCancelled is here, we need to schedule if we can
|
|
+
|
|
+ recalculatePriority = true;
|
|
+ if (!neighbour.neighboursBlockingGenTask.remove(this)) {
|
|
+ throw new IllegalStateException("Neighbour is not waiting for us?");
|
|
+ }
|
|
+
|
|
+ if (neighbour.neighboursBlockingGenTask.isEmpty()) {
|
|
+ if (neighbour.requestedGenStatus != null) {
|
|
+ if (needsScheduling == null) {
|
|
+ needsScheduling = new ArrayList<>();
|
|
+ }
|
|
+ needsScheduling.add(neighbour);
|
|
+ } else {
|
|
+ neighbour.checkUnload();
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // remove last; access to entry will throw if removed
|
|
+ iterator.remove();
|
|
+ }
|
|
+
|
|
+ if (newStatus == ChunkStatus.FULL) {
|
|
+ this.lockPriority();
|
|
+ // must use oldTicketLevel, we hold the schedule lock but not the ticket level lock
|
|
+ // however, schedule lock needs to be held for ticket level callback, so we're fine here
|
|
+ if (ChunkHolder.getFullChunkStatus(this.oldTicketLevel).isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
+ this.queueBorderFullStatus(true, changedLoadStatus);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (recalculatePriority) {
|
|
+ this.recalculateNeighbourRequestedPriority();
|
|
+ }
|
|
+
|
|
+ if (requestedGenStatus != null && !newStatus.isOrAfter(requestedGenStatus)) {
|
|
+ this.scheduleNeighbours(needsScheduling, scheduleList);
|
|
+
|
|
+ // we need to schedule more tasks now
|
|
+ this.scheduler.schedule(
|
|
+ this.chunkX, this.chunkZ, requestedGenStatus, this, scheduleList
|
|
+ );
|
|
+ } else {
|
|
+ // we're done now
|
|
+ if (requestedGenStatus != null) {
|
|
+ this.requestedGenStatus = null;
|
|
+ }
|
|
+ // reached final stage, so stop scheduling now
|
|
+ this.setPriority(PrioritisedExecutor.Priority.NORMAL);
|
|
+ this.checkUnload();
|
|
+
|
|
+ this.scheduleNeighbours(needsScheduling, scheduleList);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void scheduleNeighbours(final List<NewChunkHolder> needsScheduling, final List<ChunkProgressionTask> scheduleList) {
|
|
+ if (needsScheduling != null) {
|
|
+ for (int i = 0, len = needsScheduling.size(); i < len; ++i) {
|
|
+ final NewChunkHolder neighbour = needsScheduling.get(i);
|
|
+
|
|
+ this.scheduler.schedule(
|
|
+ neighbour.chunkX, neighbour.chunkZ, neighbour.requestedGenStatus, neighbour, scheduleList
|
|
+ );
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void setGenerationTask(final ChunkProgressionTask generationTask, final ChunkStatus taskStatus,
|
|
+ final List<NewChunkHolder> neighbours) {
|
|
+ if (this.generationTask != null || (this.currentGenStatus != null && this.currentGenStatus.isOrAfter(taskStatus))) {
|
|
+ throw new IllegalStateException("Currently generating or provided task is trying to generate to a level we are already at!");
|
|
+ }
|
|
+ if (this.requestedGenStatus == null || !this.requestedGenStatus.isOrAfter(taskStatus)) {
|
|
+ throw new IllegalStateException("Cannot schedule generation task when not requested");
|
|
+ }
|
|
+ this.generationTask = generationTask;
|
|
+ this.generationTaskStatus = taskStatus;
|
|
+
|
|
+ for (int i = 0, len = neighbours.size(); i < len; ++i) {
|
|
+ neighbours.get(i).addNeighbourUsingChunk();
|
|
+ }
|
|
+
|
|
+ this.checkUnload();
|
|
+
|
|
+ generationTask.onComplete((final ChunkAccess access, final Throwable thr) -> {
|
|
+ if (generationTask != this.generationTask) {
|
|
+ throw new IllegalStateException(
|
|
+ "Cannot complete generation task '" + generationTask + "' because we are waiting on '" + this.generationTask + "' instead!"
|
|
+ );
|
|
+ }
|
|
+ if (thr != null) {
|
|
+ if (this.genTaskException != null) {
|
|
+ // first one is probably the TRUE problem
|
|
+ return;
|
|
+ }
|
|
+ // don't set generation task to null, so that scheduling will not attempt to create another task and it
|
|
+ // will automatically block any further scheduling usage of this chunk as it will wait forever for a failed
|
|
+ // task to complete
|
|
+ this.genTaskException = thr;
|
|
+ this.failedGenStatus = taskStatus;
|
|
+ this.genTaskFailedThread = Thread.currentThread();
|
|
+
|
|
+ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of(
|
|
+ "Generation task", ChunkTaskScheduler.stringIfNull(generationTask),
|
|
+ "Task to status", ChunkTaskScheduler.stringIfNull(taskStatus)
|
|
+ ), thr);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final boolean scheduleTasks;
|
|
+ List<ChunkProgressionTask> tasks = ChunkHolderManager.getCurrentTicketUpdateScheduling();
|
|
+ if (tasks == null) {
|
|
+ scheduleTasks = true;
|
|
+ tasks = new ArrayList<>();
|
|
+ } else {
|
|
+ scheduleTasks = false;
|
|
+ // we are currently updating ticket levels, so we already hold the schedule lock
|
|
+ // this means we have to leave the ticket level update to handle the scheduling
|
|
+ }
|
|
+ final List<NewChunkHolder> changedLoadStatus = new ArrayList<>();
|
|
+ this.scheduler.schedulingLock.lock();
|
|
+ try {
|
|
+ for (int i = 0, len = neighbours.size(); i < len; ++i) {
|
|
+ neighbours.get(i).removeNeighbourUsingChunk();
|
|
+ }
|
|
+ this.onChunkGenComplete(access, taskStatus, tasks, changedLoadStatus);
|
|
+ } finally {
|
|
+ this.scheduler.schedulingLock.unlock();
|
|
+ }
|
|
+ this.scheduler.chunkHolderManager.addChangedStatuses(changedLoadStatus);
|
|
+
|
|
+ if (scheduleTasks) {
|
|
+ // can't hold the lock while scheduling, so we have to build the tasks and then schedule after
|
|
+ for (int i = 0, len = tasks.size(); i < len; ++i) {
|
|
+ tasks.get(i).schedule();
|
|
+ }
|
|
+ }
|
|
+ });
|
|
+ }
|
|
+
|
|
+ public PoiChunk getPoiChunk() {
|
|
+ return this.poiChunk;
|
|
+ }
|
|
+
|
|
+ public ChunkEntitySlices getEntityChunk() {
|
|
+ return this.entityChunk;
|
|
+ }
|
|
+
|
|
+ public long lastAutoSave;
|
|
+
|
|
+ public static final record SaveStat(boolean savedChunk, boolean savedEntityChunk, boolean savedPoiChunk) {}
|
|
+
|
|
+ public SaveStat save(final boolean shutdown, final boolean unloading) {
|
|
+ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot save data off-main");
|
|
+
|
|
+ ChunkAccess chunk = this.getCurrentChunk();
|
|
+ PoiChunk poi = this.getPoiChunk();
|
|
+ ChunkEntitySlices entities = this.getEntityChunk();
|
|
+ boolean executedUnloadTask = false;
|
|
+
|
|
+ if (shutdown) {
|
|
+ // make sure that the async unloads complete
|
|
+ if (this.unloadState != null) {
|
|
+ // must have errored during unload
|
|
+ chunk = this.unloadState.chunk();
|
|
+ poi = this.unloadState.poiChunk();
|
|
+ entities = this.unloadState.entityChunk();
|
|
+ }
|
|
+ final UnloadTask chunkUnloadTask = this.chunkDataUnload;
|
|
+ final DelayedPrioritisedTask chunkDataUnloadTask = chunkUnloadTask == null ? null : chunkUnloadTask.task();
|
|
+ if (chunkDataUnloadTask != null) {
|
|
+ final PrioritisedExecutor.PrioritisedTask unloadTask = chunkDataUnloadTask.getTask();
|
|
+ if (unloadTask != null) {
|
|
+ executedUnloadTask = unloadTask.execute();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ boolean canSaveChunk = !(chunk instanceof LevelChunk levelChunk && levelChunk.mustNotSave) &&
|
|
+ (chunk != null && ((shutdown || chunk instanceof LevelChunk) && chunk.isUnsaved()));
|
|
+ boolean canSavePOI = !(chunk instanceof LevelChunk levelChunk && levelChunk.mustNotSave) && (poi != null && poi.isDirty());
|
|
+ boolean canSaveEntities = entities != null;
|
|
+
|
|
+ try (co.aikar.timings.Timing ignored = this.world.timings.chunkSave.startTiming()) { // Paper
|
|
+ if (canSaveChunk) {
|
|
+ canSaveChunk = this.saveChunk(chunk, unloading);
|
|
+ }
|
|
+ if (canSavePOI) {
|
|
+ canSavePOI = this.savePOI(poi, unloading);
|
|
+ }
|
|
+ if (canSaveEntities) {
|
|
+ // on shutdown, we need to force transient entity chunks to save
|
|
+ canSaveEntities = this.saveEntities(entities, unloading || shutdown);
|
|
+ if (unloading || shutdown) {
|
|
+ this.lastEntityUnload = null;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return executedUnloadTask | canSaveChunk | canSaveEntities | canSavePOI ? new SaveStat(executedUnloadTask || canSaveChunk, canSaveEntities, canSavePOI): null;
|
|
+ }
|
|
+
|
|
+ static final class AsyncChunkSerializeTask implements Runnable {
|
|
+
|
|
+ private final ServerLevel world;
|
|
+ private final ChunkAccess chunk;
|
|
+ private final ChunkSerializer.AsyncSaveData asyncSaveData;
|
|
+ private final NewChunkHolder toComplete;
|
|
+
|
|
+ public AsyncChunkSerializeTask(final ServerLevel world, final ChunkAccess chunk, final ChunkSerializer.AsyncSaveData asyncSaveData,
|
|
+ final NewChunkHolder toComplete) {
|
|
+ this.world = world;
|
|
+ this.chunk = chunk;
|
|
+ this.asyncSaveData = asyncSaveData;
|
|
+ this.toComplete = toComplete;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void run() {
|
|
+ final CompoundTag toSerialize;
|
|
+ try {
|
|
+ toSerialize = ChunkSerializer.saveChunk(this.world, this.chunk, this.asyncSaveData);
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable throwable) {
|
|
+ LOGGER.error("Failed to asynchronously save chunk " + this.chunk.getPos() + " for world '" + this.world.getWorld().getName() + "', falling back to synchronous save", throwable);
|
|
+ this.world.chunkTaskScheduler.scheduleChunkTask(this.chunk.locX, this.chunk.locZ, () -> {
|
|
+ final CompoundTag synchronousSave;
|
|
+ try {
|
|
+ synchronousSave = ChunkSerializer.saveChunk(AsyncChunkSerializeTask.this.world, AsyncChunkSerializeTask.this.chunk, AsyncChunkSerializeTask.this.asyncSaveData);
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable throwable2) {
|
|
+ LOGGER.error("Failed to synchronously save chunk " + AsyncChunkSerializeTask.this.chunk.getPos() + " for world '" + AsyncChunkSerializeTask.this.world.getWorld().getName() + "', chunk data will be lost", throwable2);
|
|
+ AsyncChunkSerializeTask.this.toComplete.completeAsyncChunkDataSave(null);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ AsyncChunkSerializeTask.this.toComplete.completeAsyncChunkDataSave(synchronousSave);
|
|
+ LOGGER.info("Successfully serialized chunk " + AsyncChunkSerializeTask.this.chunk.getPos() + " for world '" + AsyncChunkSerializeTask.this.world.getWorld().getName() + "' synchronously");
|
|
+
|
|
+ }, PrioritisedExecutor.Priority.HIGHEST);
|
|
+ return;
|
|
+ }
|
|
+ this.toComplete.completeAsyncChunkDataSave(toSerialize);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ return "AsyncChunkSerializeTask{" +
|
|
+ "chunk={pos=" + this.chunk.getPos() + ",world=\"" + this.world.getWorld().getName() + "\"}" +
|
|
+ "}";
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private boolean saveChunk(final ChunkAccess chunk, final boolean unloading) {
|
|
+ if (!chunk.isUnsaved()) {
|
|
+ if (unloading) {
|
|
+ this.completeAsyncChunkDataSave(null);
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+ boolean completing = false;
|
|
+ try {
|
|
+ if (unloading) {
|
|
+ try {
|
|
+ final ChunkSerializer.AsyncSaveData asyncSaveData = ChunkSerializer.getAsyncSaveData(this.world, chunk);
|
|
+
|
|
+ final PrioritisedExecutor.PrioritisedTask task = this.scheduler.loadExecutor.createTask(new AsyncChunkSerializeTask(this.world, chunk, asyncSaveData, this));
|
|
+
|
|
+ this.chunkDataUnload.task().setTask(task);
|
|
+
|
|
+ task.queue();
|
|
+
|
|
+ chunk.setUnsaved(false);
|
|
+
|
|
+ return true;
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Failed to prepare async chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "', falling back to synchronous save", thr);
|
|
+ // fall through to synchronous save
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final CompoundTag save = ChunkSerializer.saveChunk(this.world, chunk, null);
|
|
+
|
|
+ if (unloading) {
|
|
+ completing = true;
|
|
+ this.completeAsyncChunkDataSave(save);
|
|
+ LOGGER.info("Successfully serialized chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "' synchronously");
|
|
+ } else {
|
|
+ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, save, RegionFileIOThread.RegionFileType.CHUNK_DATA);
|
|
+ }
|
|
+ chunk.setUnsaved(false);
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Failed to save chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'");
|
|
+ if (unloading && !completing) {
|
|
+ this.completeAsyncChunkDataSave(null);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ private boolean lastEntitySaveNull;
|
|
+ private CompoundTag lastEntityUnload;
|
|
+ private boolean saveEntities(final ChunkEntitySlices entities, final boolean unloading) {
|
|
+ try {
|
|
+ CompoundTag mergeFrom = null;
|
|
+ if (entities.isTransient()) {
|
|
+ if (!unloading) {
|
|
+ // if we're a transient chunk, we cannot save until unloading because otherwise a double save will
|
|
+ // result in double adding the entities
|
|
+ return false;
|
|
+ }
|
|
+ try {
|
|
+ mergeFrom = RegionFileIOThread.loadData(this.world, this.chunkX, this.chunkZ, RegionFileIOThread.RegionFileType.ENTITY_DATA, PrioritisedExecutor.Priority.BLOCKING);
|
|
+ } catch (final Exception ex) {
|
|
+ LOGGER.error("Cannot merge transient entities for chunk (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "', data on disk will be replaced", ex);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ final CompoundTag save = entities.save();
|
|
+ if (mergeFrom != null) {
|
|
+ if (save == null) {
|
|
+ // don't override the data on disk with nothing
|
|
+ return false;
|
|
+ } else {
|
|
+ EntityStorage.copyEntities(mergeFrom, save);
|
|
+ }
|
|
+ }
|
|
+ if (save == null && this.lastEntitySaveNull) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, save, RegionFileIOThread.RegionFileType.ENTITY_DATA);
|
|
+ this.lastEntitySaveNull = save == null;
|
|
+ if (unloading) {
|
|
+ this.lastEntityUnload = save;
|
|
+ }
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Failed to save entity data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'");
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ private boolean lastPoiSaveNull;
|
|
+ private boolean savePOI(final PoiChunk poi, final boolean unloading) {
|
|
+ try {
|
|
+ final CompoundTag save = poi.save();
|
|
+ poi.setDirty(false);
|
|
+ if (save == null && this.lastPoiSaveNull) {
|
|
+ if (unloading) {
|
|
+ this.poiDataUnload.completable().complete(null);
|
|
+ }
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, save, RegionFileIOThread.RegionFileType.POI_DATA);
|
|
+ this.lastPoiSaveNull = save == null;
|
|
+ if (unloading) {
|
|
+ this.poiDataUnload.completable().complete(save);
|
|
+ }
|
|
+ } catch (final ThreadDeath death) {
|
|
+ throw death;
|
|
+ } catch (final Throwable thr) {
|
|
+ LOGGER.error("Failed to save poi data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'");
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public String toString() {
|
|
+ final ChunkCompletion lastCompletion = this.lastChunkCompletion;
|
|
+ final ChunkEntitySlices entityChunk = this.entityChunk;
|
|
+ final long chunkStatus = this.chunkStatus;
|
|
+ final int fullChunkStatus = (int)chunkStatus;
|
|
+ final int pendingChunkStatus = (int)(chunkStatus >>> 32);
|
|
+ final ChunkHolder.FullChunkStatus currentFullStatus = fullChunkStatus < 0 || fullChunkStatus >= CHUNK_STATUS_BY_ID.length ? null : CHUNK_STATUS_BY_ID[fullChunkStatus];
|
|
+ final ChunkHolder.FullChunkStatus pendingFullStatus = pendingChunkStatus < 0 || pendingChunkStatus >= CHUNK_STATUS_BY_ID.length ? null : CHUNK_STATUS_BY_ID[pendingChunkStatus];
|
|
+ return "NewChunkHolder{" +
|
|
+ "world=" + this.world.getWorld().getName() +
|
|
+ ", chunkX=" + this.chunkX +
|
|
+ ", chunkZ=" + this.chunkZ +
|
|
+ ", entityChunkFromDisk=" + (entityChunk != null && !entityChunk.isTransient()) +
|
|
+ ", lastChunkCompletion={chunk_class=" + (lastCompletion == null || lastCompletion.chunk() == null ? "null" : lastCompletion.chunk().getClass().getName()) + ",status=" + (lastCompletion == null ? "null" : lastCompletion.genStatus()) + "}" +
|
|
+ ", currentGenStatus=" + this.currentGenStatus +
|
|
+ ", requestedGenStatus=" + this.requestedGenStatus +
|
|
+ ", generationTask=" + this.generationTask +
|
|
+ ", generationTaskStatus=" + this.generationTaskStatus +
|
|
+ ", priority=" + this.priority +
|
|
+ ", priorityLocked=" + this.priorityLocked +
|
|
+ ", neighbourRequestedPriority=" + this.neighbourRequestedPriority +
|
|
+ ", effective_priority=" + this.getEffectivePriority() +
|
|
+ ", oldTicketLevel=" + this.oldTicketLevel +
|
|
+ ", currentTicketLevel=" + this.currentTicketLevel +
|
|
+ ", totalNeighboursUsingThisChunk=" + this.totalNeighboursUsingThisChunk +
|
|
+ ", fullNeighbourChunksLoadedBitset=" + this.fullNeighbourChunksLoadedBitset +
|
|
+ ", chunkStatusRaw=" + chunkStatus +
|
|
+ ", currentChunkStatus=" + currentFullStatus +
|
|
+ ", pendingChunkStatus=" + pendingFullStatus +
|
|
+ ", is_unload_safe=" + this.isSafeToUnload() +
|
|
+ ", killed=" + this.killed +
|
|
+ '}';
|
|
+ }
|
|
+
|
|
+ private static JsonElement serializeCompletable(final Completable<?> completable) {
|
|
+ if (completable == null) {
|
|
+ return new JsonPrimitive("null");
|
|
+ }
|
|
+
|
|
+ final JsonObject ret = new JsonObject();
|
|
+ final boolean isCompleted = completable.isCompleted();
|
|
+ ret.addProperty("completed", Boolean.valueOf(isCompleted));
|
|
+
|
|
+ if (isCompleted) {
|
|
+ ret.addProperty("completed_exceptionally", Boolean.valueOf(completable.getThrowable() != null));
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ // holds ticket and scheduling lock
|
|
+ public JsonObject getDebugJson() {
|
|
+ final JsonObject ret = new JsonObject();
|
|
+
|
|
+ final ChunkCompletion lastCompletion = this.lastChunkCompletion;
|
|
+ final ChunkEntitySlices slices = this.entityChunk;
|
|
+ final PoiChunk poiChunk = this.poiChunk;
|
|
+
|
|
+ ret.addProperty("chunkX", Integer.valueOf(this.chunkX));
|
|
+ ret.addProperty("chunkZ", Integer.valueOf(this.chunkZ));
|
|
+ ret.addProperty("entity_chunk", slices == null ? "null" : "transient=" + slices.isTransient());
|
|
+ ret.addProperty("poi_chunk", "null=" + (poiChunk == null));
|
|
+ ret.addProperty("completed_chunk_class", lastCompletion == null ? "null" : lastCompletion.chunk().getClass().getName());
|
|
+ ret.addProperty("completed_gen_status", lastCompletion == null ? "null" : lastCompletion.genStatus().toString());
|
|
+ ret.addProperty("priority", Objects.toString(this.priority));
|
|
+ ret.addProperty("neighbour_requested_priority", Objects.toString(this.neighbourRequestedPriority));
|
|
+ ret.addProperty("generation_task", Objects.toString(this.generationTask));
|
|
+ ret.addProperty("is_safe_unload", Objects.toString(this.isSafeToUnload()));
|
|
+ ret.addProperty("old_ticket_level", Integer.valueOf(this.oldTicketLevel));
|
|
+ ret.addProperty("current_ticket_level", Integer.valueOf(this.currentTicketLevel));
|
|
+ ret.addProperty("neighbours_using_chunk", Integer.valueOf(this.totalNeighboursUsingThisChunk));
|
|
+
|
|
+ final JsonObject neighbourWaitState = new JsonObject();
|
|
+ ret.add("neighbour_state", neighbourWaitState);
|
|
+
|
|
+ final JsonArray blockingGenNeighbours = new JsonArray();
|
|
+ neighbourWaitState.add("blocking_gen_task", blockingGenNeighbours);
|
|
+ for (final NewChunkHolder blockingGenNeighbour : this.neighboursBlockingGenTask) {
|
|
+ final JsonObject neighbour = new JsonObject();
|
|
+ blockingGenNeighbours.add(neighbour);
|
|
+
|
|
+ neighbour.addProperty("chunkX", Integer.valueOf(blockingGenNeighbour.chunkX));
|
|
+ neighbour.addProperty("chunkZ", Integer.valueOf(blockingGenNeighbour.chunkZ));
|
|
+ }
|
|
+
|
|
+ final JsonArray neighboursWaitingForUs = new JsonArray();
|
|
+ neighbourWaitState.add("neighbours_waiting_on_us", neighboursWaitingForUs);
|
|
+ for (final Reference2ObjectMap.Entry<NewChunkHolder, ChunkStatus> entry : this.neighboursWaitingForUs.reference2ObjectEntrySet()) {
|
|
+ final NewChunkHolder holder = entry.getKey();
|
|
+ final ChunkStatus status = entry.getValue();
|
|
+
|
|
+ final JsonObject neighbour = new JsonObject();
|
|
+ neighboursWaitingForUs.add(neighbour);
|
|
+
|
|
+
|
|
+ neighbour.addProperty("chunkX", Integer.valueOf(holder.chunkX));
|
|
+ neighbour.addProperty("chunkZ", Integer.valueOf(holder.chunkZ));
|
|
+ neighbour.addProperty("waiting_for", Objects.toString(status));
|
|
+ }
|
|
+
|
|
+ ret.addProperty("fullchunkstatus", Objects.toString(this.getChunkStatus()));
|
|
+ ret.addProperty("fullchunkstatus_raw", Long.valueOf(this.chunkStatus));
|
|
+ ret.addProperty("generation_task", Objects.toString(this.generationTask));
|
|
+ ret.addProperty("requested_generation", Objects.toString(this.requestedGenStatus));
|
|
+ ret.addProperty("has_entity_load_task", Boolean.valueOf(this.entityDataLoadTask != null));
|
|
+ ret.addProperty("has_poi_load_task", Boolean.valueOf(this.poiDataLoadTask != null));
|
|
+
|
|
+ final UnloadTask entityDataUnload = this.entityDataUnload;
|
|
+ final UnloadTask poiDataUnload = this.poiDataUnload;
|
|
+ final UnloadTask chunkDataUnload = this.chunkDataUnload;
|
|
+
|
|
+ ret.add("entity_unload_completable", serializeCompletable(entityDataUnload == null ? null : entityDataUnload.completable()));
|
|
+ ret.add("poi_unload_completable", serializeCompletable(poiDataUnload == null ? null : poiDataUnload.completable()));
|
|
+ ret.add("chunk_unload_completable", serializeCompletable(chunkDataUnload == null ? null : chunkDataUnload.completable()));
|
|
+
|
|
+ final DelayedPrioritisedTask unloadTask = chunkDataUnload == null ? null : chunkDataUnload.task();
|
|
+ if (unloadTask == null) {
|
|
+ ret.addProperty("unload_task_priority", "null");
|
|
+ ret.addProperty("unload_task_priority_raw", "null");
|
|
+ } else {
|
|
+ ret.addProperty("unload_task_priority", Objects.toString(unloadTask.getPriority()));
|
|
+ ret.addProperty("unload_task_priority_raw", Integer.valueOf(unloadTask.getPriorityInternal()));
|
|
+ }
|
|
+
|
|
+ ret.addProperty("killed", Boolean.valueOf(this.killed));
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/PriorityHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/PriorityHolder.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..b4c56bf12dc8dd17452210ece4fd67411cc6b2fd
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/PriorityHolder.java
|
|
@@ -0,0 +1,215 @@
|
|
+package io.papermc.paper.chunk.system.scheduling;
|
|
+
|
|
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
|
+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
|
+import java.lang.invoke.VarHandle;
|
|
+
|
|
+public abstract class PriorityHolder {
|
|
+
|
|
+ protected volatile int priority;
|
|
+ protected static final VarHandle PRIORITY_HANDLE = ConcurrentUtil.getVarHandle(PriorityHolder.class, "priority", int.class);
|
|
+
|
|
+ protected static final int PRIORITY_SCHEDULED = Integer.MIN_VALUE >>> 0;
|
|
+ protected static final int PRIORITY_EXECUTED = Integer.MIN_VALUE >>> 1;
|
|
+
|
|
+ protected final int getPriorityVolatile() {
|
|
+ return (int)PRIORITY_HANDLE.getVolatile((PriorityHolder)this);
|
|
+ }
|
|
+
|
|
+ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) {
|
|
+ return (int)PRIORITY_HANDLE.compareAndExchange((PriorityHolder)this, (int)expect, (int)update);
|
|
+ }
|
|
+
|
|
+ protected final int getAndOrPriorityVolatile(final int val) {
|
|
+ return (int)PRIORITY_HANDLE.getAndBitwiseOr((PriorityHolder)this, (int)val);
|
|
+ }
|
|
+
|
|
+ protected final void setPriorityPlain(final int val) {
|
|
+ PRIORITY_HANDLE.set((PriorityHolder)this, (int)val);
|
|
+ }
|
|
+
|
|
+ protected PriorityHolder(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+ this.setPriorityPlain(priority.priority);
|
|
+ }
|
|
+
|
|
+ // used only for debug json
|
|
+ public boolean isScheduled() {
|
|
+ return (this.getPriorityVolatile() & PRIORITY_SCHEDULED) != 0;
|
|
+ }
|
|
+
|
|
+ // returns false if cancelled
|
|
+ protected boolean markExecuting() {
|
|
+ return (this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) == 0;
|
|
+ }
|
|
+
|
|
+ protected boolean isMarkedExecuted() {
|
|
+ return (this.getPriorityVolatile() & PRIORITY_EXECUTED) != 0;
|
|
+ }
|
|
+
|
|
+ public void cancel() {
|
|
+ if ((this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled already
|
|
+ return;
|
|
+ }
|
|
+ this.cancelScheduled();
|
|
+ }
|
|
+
|
|
+ public void schedule() {
|
|
+ int priority = this.getPriorityVolatile();
|
|
+
|
|
+ if ((priority & PRIORITY_SCHEDULED) != 0) {
|
|
+ throw new IllegalStateException("schedule() called twice");
|
|
+ }
|
|
+
|
|
+ if ((priority & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.scheduleTask(PrioritisedExecutor.Priority.getPriority(priority));
|
|
+
|
|
+ int failures = 0;
|
|
+ for (;;) {
|
|
+ if (priority == (priority = this.compareAndExchangePriorityVolatile(priority, priority | PRIORITY_SCHEDULED))) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((priority & PRIORITY_SCHEDULED) != 0) {
|
|
+ throw new IllegalStateException("schedule() called twice");
|
|
+ }
|
|
+
|
|
+ if ((priority & PRIORITY_EXECUTED) != 0) {
|
|
+ // cancelled or executed
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ this.setPriorityScheduled(PrioritisedExecutor.Priority.getPriority(priority));
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public final PrioritisedExecutor.Priority getPriority() {
|
|
+ final int ret = this.getPriorityVolatile();
|
|
+ if ((ret & PRIORITY_EXECUTED) != 0) {
|
|
+ return PrioritisedExecutor.Priority.COMPLETING;
|
|
+ }
|
|
+ if ((ret & PRIORITY_SCHEDULED) != 0) {
|
|
+ return this.getScheduledPriority();
|
|
+ }
|
|
+ return PrioritisedExecutor.Priority.getPriority(ret);
|
|
+ }
|
|
+
|
|
+ public final void lowerPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+
|
|
+ int failures = 0;
|
|
+ for (int curr = this.getPriorityVolatile();;) {
|
|
+ if ((curr & PRIORITY_EXECUTED) != 0) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((curr & PRIORITY_SCHEDULED) != 0) {
|
|
+ this.lowerPriorityScheduled(priority);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!priority.isLowerPriority(curr)) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority))) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // failed, retry
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public final void setPriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+
|
|
+ int failures = 0;
|
|
+ for (int curr = this.getPriorityVolatile();;) {
|
|
+ if ((curr & PRIORITY_EXECUTED) != 0) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((curr & PRIORITY_SCHEDULED) != 0) {
|
|
+ this.setPriorityScheduled(priority);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority))) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // failed, retry
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public final void raisePriority(final PrioritisedExecutor.Priority priority) {
|
|
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
|
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
|
+ }
|
|
+
|
|
+ int failures = 0;
|
|
+ for (int curr = this.getPriorityVolatile();;) {
|
|
+ if ((curr & PRIORITY_EXECUTED) != 0) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if ((curr & PRIORITY_SCHEDULED) != 0) {
|
|
+ this.raisePriorityScheduled(priority);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!priority.isHigherPriority(curr)) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority))) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ // failed, retry
|
|
+
|
|
+ ++failures;
|
|
+ for (int i = 0; i < failures; ++i) {
|
|
+ ConcurrentUtil.backoff();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected abstract void cancelScheduled();
|
|
+
|
|
+ protected abstract PrioritisedExecutor.Priority getScheduledPriority();
|
|
+
|
|
+ protected abstract void scheduleTask(final PrioritisedExecutor.Priority priority);
|
|
+
|
|
+ protected abstract void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority);
|
|
+
|
|
+ protected abstract void setPriorityScheduled(final PrioritisedExecutor.Priority priority);
|
|
+
|
|
+ protected abstract void raisePriorityScheduled(final PrioritisedExecutor.Priority priority);
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/command/PaperCommand.java b/src/main/java/io/papermc/paper/command/PaperCommand.java
|
|
index c9a2ac696f7cefc8b0715f53db3fc541f26b62f6..1e9105cf5ab2ff0ee847fafd00b41e1bd47f1d9e 100644
|
|
--- a/src/main/java/io/papermc/paper/command/PaperCommand.java
|
|
+++ b/src/main/java/io/papermc/paper/command/PaperCommand.java
|
|
@@ -1,5 +1,6 @@
|
|
package io.papermc.paper.command;
|
|
|
|
+import io.papermc.paper.command.subcommands.ChunkDebugCommand;
|
|
import io.papermc.paper.command.subcommands.EntityCommand;
|
|
import io.papermc.paper.command.subcommands.FixLightCommand;
|
|
import io.papermc.paper.command.subcommands.HeapDumpCommand;
|
|
@@ -42,6 +43,7 @@ public final class PaperCommand extends Command {
|
|
commands.put(Set.of("reload"), new ReloadCommand());
|
|
commands.put(Set.of("version"), new VersionCommand());
|
|
commands.put(Set.of("fixlight"), new FixLightCommand());
|
|
+ commands.put(Set.of("debug", "chunkinfo", "holderinfo"), new ChunkDebugCommand());
|
|
|
|
return commands.entrySet().stream()
|
|
.flatMap(entry -> entry.getKey().stream().map(s -> Map.entry(s, entry.getValue())))
|
|
diff --git a/src/main/java/io/papermc/paper/command/subcommands/ChunkDebugCommand.java b/src/main/java/io/papermc/paper/command/subcommands/ChunkDebugCommand.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..a6fb7ae77d7cad2243e28a33718e4631f65697fa
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/command/subcommands/ChunkDebugCommand.java
|
|
@@ -0,0 +1,264 @@
|
|
+package io.papermc.paper.command.subcommands;
|
|
+
|
|
+import io.papermc.paper.command.CommandUtil;
|
|
+import io.papermc.paper.command.PaperSubcommand;
|
|
+import java.io.File;
|
|
+import java.time.LocalDateTime;
|
|
+import java.time.format.DateTimeFormatter;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Collections;
|
|
+import java.util.List;
|
|
+import java.util.Locale;
|
|
+import io.papermc.paper.util.MCUtil;
|
|
+import net.minecraft.server.MinecraftServer;
|
|
+import net.minecraft.server.level.ChunkHolder;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ImposterProtoChunk;
|
|
+import net.minecraft.world.level.chunk.LevelChunk;
|
|
+import net.minecraft.world.level.chunk.ProtoChunk;
|
|
+import org.bukkit.Bukkit;
|
|
+import org.bukkit.command.CommandSender;
|
|
+import org.bukkit.craftbukkit.CraftWorld;
|
|
+import org.checkerframework.checker.nullness.qual.NonNull;
|
|
+import org.checkerframework.checker.nullness.qual.Nullable;
|
|
+import org.checkerframework.framework.qual.DefaultQualifier;
|
|
+
|
|
+import static net.kyori.adventure.text.Component.text;
|
|
+import static net.kyori.adventure.text.format.NamedTextColor.BLUE;
|
|
+import static net.kyori.adventure.text.format.NamedTextColor.DARK_AQUA;
|
|
+import static net.kyori.adventure.text.format.NamedTextColor.GREEN;
|
|
+import static net.kyori.adventure.text.format.NamedTextColor.RED;
|
|
+
|
|
+@DefaultQualifier(NonNull.class)
|
|
+public final class ChunkDebugCommand implements PaperSubcommand {
|
|
+ @Override
|
|
+ public boolean execute(final CommandSender sender, final String subCommand, final String[] args) {
|
|
+ switch (subCommand) {
|
|
+ case "debug" -> this.doDebug(sender, args);
|
|
+ case "chunkinfo" -> this.doChunkInfo(sender, args);
|
|
+ case "holderinfo" -> this.doHolderInfo(sender, args);
|
|
+ }
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public List<String> tabComplete(final CommandSender sender, final String subCommand, final String[] args) {
|
|
+ switch (subCommand) {
|
|
+ case "debug" -> {
|
|
+ if (args.length == 1) {
|
|
+ return CommandUtil.getListMatchingLast(sender, args, "help", "chunks");
|
|
+ }
|
|
+ }
|
|
+ case "holderinfo" -> {
|
|
+ List<String> worldNames = new ArrayList<>();
|
|
+ worldNames.add("*");
|
|
+ for (org.bukkit.World world : Bukkit.getWorlds()) {
|
|
+ worldNames.add(world.getName());
|
|
+ }
|
|
+ if (args.length == 1) {
|
|
+ return CommandUtil.getListMatchingLast(sender, args, worldNames);
|
|
+ }
|
|
+ }
|
|
+ case "chunkinfo" -> {
|
|
+ List<String> worldNames = new ArrayList<>();
|
|
+ worldNames.add("*");
|
|
+ for (org.bukkit.World world : Bukkit.getWorlds()) {
|
|
+ worldNames.add(world.getName());
|
|
+ }
|
|
+ if (args.length == 1) {
|
|
+ return CommandUtil.getListMatchingLast(sender, args, worldNames);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ return Collections.emptyList();
|
|
+ }
|
|
+
|
|
+ private void doChunkInfo(final CommandSender sender, final String[] args) {
|
|
+ List<org.bukkit.World> worlds;
|
|
+ if (args.length < 1 || args[0].equals("*")) {
|
|
+ worlds = Bukkit.getWorlds();
|
|
+ } else {
|
|
+ worlds = new ArrayList<>(args.length);
|
|
+ for (final String arg : args) {
|
|
+ org.bukkit.@Nullable World world = Bukkit.getWorld(arg);
|
|
+ if (world == null) {
|
|
+ sender.sendMessage(text("World '" + arg + "' is invalid", RED));
|
|
+ return;
|
|
+ }
|
|
+ worlds.add(world);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ int accumulatedTotal = 0;
|
|
+ int accumulatedInactive = 0;
|
|
+ int accumulatedBorder = 0;
|
|
+ int accumulatedTicking = 0;
|
|
+ int accumulatedEntityTicking = 0;
|
|
+
|
|
+ for (final org.bukkit.World bukkitWorld : worlds) {
|
|
+ final ServerLevel world = ((CraftWorld) bukkitWorld).getHandle();
|
|
+
|
|
+ int total = 0;
|
|
+ int inactive = 0;
|
|
+ int border = 0;
|
|
+ int ticking = 0;
|
|
+ int entityTicking = 0;
|
|
+
|
|
+ for (final ChunkHolder chunk : io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(world)) {
|
|
+ if (chunk.getFullChunkNowUnchecked() == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ ++total;
|
|
+
|
|
+ ChunkHolder.FullChunkStatus state = chunk.getFullStatus();
|
|
+
|
|
+ switch (state) {
|
|
+ case INACCESSIBLE -> ++inactive;
|
|
+ case BORDER -> ++border;
|
|
+ case TICKING -> ++ticking;
|
|
+ case ENTITY_TICKING -> ++entityTicking;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ accumulatedTotal += total;
|
|
+ accumulatedInactive += inactive;
|
|
+ accumulatedBorder += border;
|
|
+ accumulatedTicking += ticking;
|
|
+ accumulatedEntityTicking += entityTicking;
|
|
+
|
|
+ sender.sendMessage(text().append(text("Chunks in ", BLUE), text(bukkitWorld.getName(), GREEN), text(":")));
|
|
+ sender.sendMessage(text().color(DARK_AQUA).append(
|
|
+ text("Total: ", BLUE), text(total),
|
|
+ text(" Inactive: ", BLUE), text(inactive),
|
|
+ text(" Border: ", BLUE), text(border),
|
|
+ text(" Ticking: ", BLUE), text(ticking),
|
|
+ text(" Entity: ", BLUE), text(entityTicking)
|
|
+ ));
|
|
+ }
|
|
+ if (worlds.size() > 1) {
|
|
+ sender.sendMessage(text().append(text("Chunks in ", BLUE), text("all listed worlds", GREEN), text(":", DARK_AQUA)));
|
|
+ sender.sendMessage(text().color(DARK_AQUA).append(
|
|
+ text("Total: ", BLUE), text(accumulatedTotal),
|
|
+ text(" Inactive: ", BLUE), text(accumulatedInactive),
|
|
+ text(" Border: ", BLUE), text(accumulatedBorder),
|
|
+ text(" Ticking: ", BLUE), text(accumulatedTicking),
|
|
+ text(" Entity: ", BLUE), text(accumulatedEntityTicking)
|
|
+ ));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void doHolderInfo(final CommandSender sender, final String[] args) {
|
|
+ List<org.bukkit.World> worlds;
|
|
+ if (args.length < 1 || args[0].equals("*")) {
|
|
+ worlds = Bukkit.getWorlds();
|
|
+ } else {
|
|
+ worlds = new ArrayList<>(args.length);
|
|
+ for (final String arg : args) {
|
|
+ org.bukkit.@Nullable World world = Bukkit.getWorld(arg);
|
|
+ if (world == null) {
|
|
+ sender.sendMessage(text("World '" + arg + "' is invalid", RED));
|
|
+ return;
|
|
+ }
|
|
+ worlds.add(world);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ int accumulatedTotal = 0;
|
|
+ int accumulatedCanUnload = 0;
|
|
+ int accumulatedNull = 0;
|
|
+ int accumulatedReadOnly = 0;
|
|
+ int accumulatedProtoChunk = 0;
|
|
+ int accumulatedFullChunk = 0;
|
|
+
|
|
+ for (final org.bukkit.World bukkitWorld : worlds) {
|
|
+ final ServerLevel world = ((CraftWorld) bukkitWorld).getHandle();
|
|
+
|
|
+ int total = 0;
|
|
+ int canUnload = 0;
|
|
+ int nullChunks = 0;
|
|
+ int readOnly = 0;
|
|
+ int protoChunk = 0;
|
|
+ int fullChunk = 0;
|
|
+
|
|
+ for (final ChunkHolder chunk : world.chunkTaskScheduler.chunkHolderManager.getOldChunkHolders()) { // Paper - change updating chunks map
|
|
+ final ChunkAccess lastChunk = chunk.getAvailableChunkNow();
|
|
+
|
|
+ ++total;
|
|
+
|
|
+ if (lastChunk == null) {
|
|
+ ++nullChunks;
|
|
+ } else if (lastChunk instanceof ImposterProtoChunk) {
|
|
+ ++readOnly;
|
|
+ } else if (lastChunk instanceof ProtoChunk) {
|
|
+ ++protoChunk;
|
|
+ } else if (lastChunk instanceof LevelChunk) {
|
|
+ ++fullChunk;
|
|
+ }
|
|
+
|
|
+ if (chunk.newChunkHolder.isSafeToUnload() == null) {
|
|
+ ++canUnload;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ accumulatedTotal += total;
|
|
+ accumulatedCanUnload += canUnload;
|
|
+ accumulatedNull += nullChunks;
|
|
+ accumulatedReadOnly += readOnly;
|
|
+ accumulatedProtoChunk += protoChunk;
|
|
+ accumulatedFullChunk += fullChunk;
|
|
+
|
|
+ sender.sendMessage(text().append(text("Chunks in ", BLUE), text(bukkitWorld.getName(), GREEN), text(":")));
|
|
+ sender.sendMessage(text().color(DARK_AQUA).append(
|
|
+ text("Total: ", BLUE), text(total),
|
|
+ text(" Unloadable: ", BLUE), text(canUnload),
|
|
+ text(" Null: ", BLUE), text(nullChunks),
|
|
+ text(" ReadOnly: ", BLUE), text(readOnly),
|
|
+ text(" Proto: ", BLUE), text(protoChunk),
|
|
+ text(" Full: ", BLUE), text(fullChunk)
|
|
+ ));
|
|
+ }
|
|
+ if (worlds.size() > 1) {
|
|
+ sender.sendMessage(text().append(text("Chunks in ", BLUE), text("all listed worlds", GREEN), text(":", DARK_AQUA)));
|
|
+ sender.sendMessage(text().color(DARK_AQUA).append(
|
|
+ text("Total: ", BLUE), text(accumulatedTotal),
|
|
+ text(" Unloadable: ", BLUE), text(accumulatedCanUnload),
|
|
+ text(" Null: ", BLUE), text(accumulatedNull),
|
|
+ text(" ReadOnly: ", BLUE), text(accumulatedReadOnly),
|
|
+ text(" Proto: ", BLUE), text(accumulatedProtoChunk),
|
|
+ text(" Full: ", BLUE), text(accumulatedFullChunk)
|
|
+ ));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void doDebug(final CommandSender sender, final String[] args) {
|
|
+ if (args.length < 1) {
|
|
+ sender.sendMessage(text("Use /paper debug [chunks] help for more information on a specific command", RED));
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final String debugType = args[0].toLowerCase(Locale.ENGLISH);
|
|
+ switch (debugType) {
|
|
+ case "chunks" -> {
|
|
+ if (args.length >= 2 && args[1].toLowerCase(Locale.ENGLISH).equals("help")) {
|
|
+ sender.sendMessage(text("Use /paper debug chunks [world] to dump loaded chunk information to a file", RED));
|
|
+ break;
|
|
+ }
|
|
+ File file = new File(new File(new File("."), "debug"),
|
|
+ "chunks-" + DateTimeFormatter.ofPattern("yyyy-MM-dd_HH.mm.ss").format(LocalDateTime.now()) + ".txt");
|
|
+ sender.sendMessage(text("Writing chunk information dump to " + file, GREEN));
|
|
+ try {
|
|
+ MCUtil.dumpChunks(file, false);
|
|
+ sender.sendMessage(text("Successfully written chunk information!", GREEN));
|
|
+ } catch (Throwable thr) {
|
|
+ MinecraftServer.LOGGER.warn("Failed to dump chunk information to file " + file.toString(), thr);
|
|
+ sender.sendMessage(text("Failed to dump chunk information, see console", RED));
|
|
+ }
|
|
+ }
|
|
+ // "help" & default
|
|
+ default -> sender.sendMessage(text("Use /paper debug [chunks] help for more information on a specific command", RED));
|
|
+ }
|
|
+ }
|
|
+
|
|
+}
|
|
diff --git a/src/main/java/io/papermc/paper/util/MCUtil.java b/src/main/java/io/papermc/paper/util/MCUtil.java
|
|
index 9798a1010120125039cbf226a0e3679cc92f92c7..cf3e083c2ada3275a52c303de16a62576696e83f 100644
|
|
--- a/src/main/java/io/papermc/paper/util/MCUtil.java
|
|
+++ b/src/main/java/io/papermc/paper/util/MCUtil.java
|
|
@@ -1,16 +1,29 @@
|
|
package io.papermc.paper.util;
|
|
|
|
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|
+import com.google.gson.JsonArray;
|
|
+import com.google.gson.JsonObject;
|
|
+import com.google.gson.internal.Streams;
|
|
+import com.google.gson.stream.JsonWriter;
|
|
+import com.mojang.datafixers.util.Either;
|
|
import it.unimi.dsi.fastutil.objects.ObjectRBTreeSet;
|
|
import java.lang.ref.Cleaner;
|
|
+import it.unimi.dsi.fastutil.objects.ReferenceArrayList;
|
|
import net.minecraft.core.BlockPos;
|
|
import net.minecraft.core.Direction;
|
|
import net.minecraft.server.MinecraftServer;
|
|
+import net.minecraft.server.level.ChunkHolder;
|
|
+import net.minecraft.server.level.ChunkMap;
|
|
+import net.minecraft.server.level.DistanceManager;
|
|
import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.server.level.ServerPlayer;
|
|
+import net.minecraft.server.level.Ticket;
|
|
import net.minecraft.world.entity.Entity;
|
|
import net.minecraft.world.level.ChunkPos;
|
|
import net.minecraft.world.level.ClipContext;
|
|
import net.minecraft.world.level.Level;
|
|
+import net.minecraft.world.level.chunk.ChunkAccess;
|
|
+import net.minecraft.world.level.chunk.ChunkStatus;
|
|
import org.apache.commons.lang.exception.ExceptionUtils;
|
|
import org.bukkit.Location;
|
|
import org.bukkit.block.BlockFace;
|
|
@@ -20,8 +33,11 @@ import org.spigotmc.AsyncCatcher;
|
|
|
|
import javax.annotation.Nonnull;
|
|
import javax.annotation.Nullable;
|
|
+import java.io.*;
|
|
+import java.nio.charset.StandardCharsets;
|
|
import java.util.List;
|
|
import java.util.Queue;
|
|
+import java.util.Set;
|
|
import java.util.concurrent.CompletableFuture;
|
|
import java.util.concurrent.ExecutionException;
|
|
import java.util.concurrent.LinkedBlockingQueue;
|
|
@@ -506,6 +522,100 @@ public final class MCUtil {
|
|
}
|
|
}
|
|
|
|
+ public static ChunkStatus getChunkStatus(ChunkHolder chunk) {
|
|
+ return chunk.getChunkHolderStatus();
|
|
+ }
|
|
+
|
|
+ public static void dumpChunks(File file, boolean watchdog) throws IOException {
|
|
+ file.getParentFile().mkdirs();
|
|
+ file.createNewFile();
|
|
+ ReferenceArrayList<org.bukkit.World> worlds = new ReferenceArrayList<>(org.bukkit.Bukkit.getWorlds());
|
|
+ ReferenceArrayList<org.bukkit.World> loadedWorlds = new ReferenceArrayList<>(worlds);
|
|
+ JsonObject data = new JsonObject();
|
|
+
|
|
+ data.addProperty("server-version", org.bukkit.Bukkit.getVersion());
|
|
+ data.addProperty("data-version", 1);
|
|
+
|
|
+ {
|
|
+ JsonArray players = new JsonArray();
|
|
+ data.add("all-players", players);
|
|
+ List<ServerPlayer> playerList = MinecraftServer.getServer().getPlayerList().players;
|
|
+ for (ServerPlayer player : playerList) {
|
|
+ JsonObject playerData = new JsonObject();
|
|
+ players.add(playerData);
|
|
+
|
|
+ Level playerWorld = player.getLevel();
|
|
+ org.bukkit.World craftWorld = playerWorld.getWorld();
|
|
+ Entity.RemovalReason removalReason = player.getRemovalReason();
|
|
+
|
|
+ playerData.addProperty("name", player.getScoreboardName());
|
|
+ playerData.addProperty("x", player.getX());
|
|
+ playerData.addProperty("y", player.getY());
|
|
+ playerData.addProperty("z", player.getZ());
|
|
+ playerData.addProperty("world", playerWorld == null ? "null world" : craftWorld.getName());
|
|
+ playerData.addProperty("removalReason", removalReason == null ? "null" : removalReason.name());
|
|
+
|
|
+ if (!worlds.contains(craftWorld)) {
|
|
+ worlds.add(craftWorld);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ JsonArray chunkWaitInformation = new JsonArray();
|
|
+ data.add("chunk-wait-infos", chunkWaitInformation);
|
|
+
|
|
+ for (io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.ChunkInfo chunkInfo : io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.getChunkInfos()) {
|
|
+ chunkWaitInformation.add(chunkInfo.toString());
|
|
+ }
|
|
+
|
|
+ JsonArray worldsData = new JsonArray();
|
|
+
|
|
+ for (org.bukkit.World bukkitWorld : worlds) {
|
|
+ JsonObject worldData = new JsonObject();
|
|
+
|
|
+ ServerLevel world = ((org.bukkit.craftbukkit.CraftWorld)bukkitWorld).getHandle();
|
|
+ List<ServerPlayer> players = world.players();
|
|
+
|
|
+ worldData.addProperty("is-loaded", loadedWorlds.contains(bukkitWorld));
|
|
+ worldData.addProperty("name", world.getWorld().getName());
|
|
+ worldData.addProperty("view-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()); // Paper - replace chunk loader system
|
|
+ worldData.addProperty("tick-view-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()); // Paper - replace chunk loader system
|
|
+ worldData.addProperty("keep-spawn-loaded", world.keepSpawnInMemory);
|
|
+ worldData.addProperty("keep-spawn-loaded-range", world.paperConfig().spawn.keepSpawnLoadedRange * 16);
|
|
+
|
|
+ JsonArray playersData = new JsonArray();
|
|
+
|
|
+ for (ServerPlayer player : players) {
|
|
+ JsonObject playerData = new JsonObject();
|
|
+
|
|
+ playerData.addProperty("name", player.getScoreboardName());
|
|
+ playerData.addProperty("x", player.getX());
|
|
+ playerData.addProperty("y", player.getY());
|
|
+ playerData.addProperty("z", player.getZ());
|
|
+
|
|
+ playersData.add(playerData);
|
|
+ }
|
|
+
|
|
+ worldData.add("players", playersData);
|
|
+ worldData.add("chunk-data", watchdog ? world.chunkTaskScheduler.chunkHolderManager.getDebugJsonForWatchdog() : world.chunkTaskScheduler.chunkHolderManager.getDebugJson());
|
|
+ worldsData.add(worldData);
|
|
+ }
|
|
+
|
|
+ data.add("worlds", worldsData);
|
|
+
|
|
+ StringWriter stringWriter = new StringWriter();
|
|
+ JsonWriter jsonWriter = new JsonWriter(stringWriter);
|
|
+ jsonWriter.setIndent(" ");
|
|
+ jsonWriter.setLenient(false);
|
|
+ Streams.write(data, jsonWriter);
|
|
+
|
|
+ String fileData = stringWriter.toString();
|
|
+
|
|
+ try (PrintStream out = new PrintStream(new FileOutputStream(file), false, StandardCharsets.UTF_8)) {
|
|
+ out.print(fileData);
|
|
+ }
|
|
+ }
|
|
+
|
|
public static int getTicketLevelFor(net.minecraft.world.level.chunk.ChunkStatus status) {
|
|
return net.minecraft.server.level.ChunkMap.MAX_VIEW_DISTANCE + net.minecraft.world.level.chunk.ChunkStatus.getDistance(status);
|
|
}
|
|
diff --git a/src/main/java/io/papermc/paper/util/TickThread.java b/src/main/java/io/papermc/paper/util/TickThread.java
|
|
index d59885ee9c8b29d5bac34dce0597e345e5358c77..fc57850b80303fcade89ca95794f63910404a407 100644
|
|
--- a/src/main/java/io/papermc/paper/util/TickThread.java
|
|
+++ b/src/main/java/io/papermc/paper/util/TickThread.java
|
|
@@ -6,7 +6,7 @@ import net.minecraft.world.entity.Entity;
|
|
import org.bukkit.Bukkit;
|
|
import java.util.concurrent.atomic.AtomicInteger;
|
|
|
|
-public final class TickThread extends Thread {
|
|
+public class TickThread extends Thread {
|
|
|
|
public static final boolean STRICT_THREAD_CHECKS = Boolean.getBoolean("paper.strict-thread-checks");
|
|
|
|
@@ -16,6 +16,10 @@ public final class TickThread extends Thread {
|
|
}
|
|
}
|
|
|
|
+ /**
|
|
+ * @deprecated
|
|
+ */
|
|
+ @Deprecated
|
|
public static void softEnsureTickThread(final String reason) {
|
|
if (!STRICT_THREAD_CHECKS) {
|
|
return;
|
|
@@ -23,6 +27,10 @@ public final class TickThread extends Thread {
|
|
ensureTickThread(reason);
|
|
}
|
|
|
|
+ /**
|
|
+ * @deprecated
|
|
+ */
|
|
+ @Deprecated
|
|
public static void ensureTickThread(final String reason) {
|
|
if (!isTickThread()) {
|
|
MinecraftServer.LOGGER.error("Thread " + Thread.currentThread().getName() + " failed main thread check: " + reason, new Throwable());
|
|
@@ -66,14 +74,14 @@ public final class TickThread extends Thread {
|
|
}
|
|
|
|
public static boolean isTickThread() {
|
|
- return Bukkit.isPrimaryThread();
|
|
+ return Thread.currentThread() instanceof TickThread;
|
|
}
|
|
|
|
public static boolean isTickThreadFor(final ServerLevel world, final int chunkX, final int chunkZ) {
|
|
- return Bukkit.isPrimaryThread();
|
|
+ return Thread.currentThread() instanceof TickThread;
|
|
}
|
|
|
|
public static boolean isTickThreadFor(final Entity entity) {
|
|
- return Bukkit.isPrimaryThread();
|
|
+ return Thread.currentThread() instanceof TickThread;
|
|
}
|
|
}
|
|
diff --git a/src/main/java/io/papermc/paper/world/ChunkEntitySlices.java b/src/main/java/io/papermc/paper/world/ChunkEntitySlices.java
|
|
new file mode 100644
|
|
index 0000000000000000000000000000000000000000..f597d65d56964297eeeed6c7e77703764178fee0
|
|
--- /dev/null
|
|
+++ b/src/main/java/io/papermc/paper/world/ChunkEntitySlices.java
|
|
@@ -0,0 +1,601 @@
|
|
+package io.papermc.paper.world;
|
|
+
|
|
+import com.destroystokyo.paper.util.maplist.EntityList;
|
|
+import io.papermc.paper.chunk.system.entity.EntityLookup;
|
|
+import io.papermc.paper.util.TickThread;
|
|
+import it.unimi.dsi.fastutil.objects.Reference2ObjectMap;
|
|
+import it.unimi.dsi.fastutil.objects.Reference2ObjectOpenHashMap;
|
|
+import net.minecraft.nbt.CompoundTag;
|
|
+import net.minecraft.server.level.ChunkHolder;
|
|
+import net.minecraft.server.level.ServerLevel;
|
|
+import net.minecraft.util.Mth;
|
|
+import net.minecraft.world.entity.Entity;
|
|
+import net.minecraft.world.entity.EntityType;
|
|
+import net.minecraft.world.entity.boss.EnderDragonPart;
|
|
+import net.minecraft.world.entity.boss.enderdragon.EnderDragon;
|
|
+import net.minecraft.world.level.ChunkPos;
|
|
+import net.minecraft.world.level.chunk.storage.EntityStorage;
|
|
+import net.minecraft.world.level.entity.Visibility;
|
|
+import net.minecraft.world.phys.AABB;
|
|
+import org.bukkit.craftbukkit.event.CraftEventFactory;
|
|
+import java.util.ArrayList;
|
|
+import java.util.Arrays;
|
|
+import java.util.Iterator;
|
|
+import java.util.List;
|
|
+import java.util.function.Predicate;
|
|
+
|
|
+public final class ChunkEntitySlices {
|
|
+
|
|
+ protected final int minSection;
|
|
+ protected final int maxSection;
|
|
+ public final int chunkX;
|
|
+ public final int chunkZ;
|
|
+ protected final ServerLevel world;
|
|
+
|
|
+ protected final EntityCollectionBySection allEntities;
|
|
+ protected final EntityCollectionBySection hardCollidingEntities;
|
|
+ protected final Reference2ObjectOpenHashMap<Class<? extends Entity>, EntityCollectionBySection> entitiesByClass;
|
|
+ protected final EntityList entities = new EntityList();
|
|
+
|
|
+ public ChunkHolder.FullChunkStatus status;
|
|
+
|
|
+ protected boolean isTransient;
|
|
+
|
|
+ public boolean isTransient() {
|
|
+ return this.isTransient;
|
|
+ }
|
|
+
|
|
+ public void setTransient(final boolean value) {
|
|
+ this.isTransient = value;
|
|
+ }
|
|
+
|
|
+ // TODO implement container search optimisations
|
|
+
|
|
+ public ChunkEntitySlices(final ServerLevel world, final int chunkX, final int chunkZ, final ChunkHolder.FullChunkStatus status,
|
|
+ final int minSection, final int maxSection) { // inclusive, inclusive
|
|
+ this.minSection = minSection;
|
|
+ this.maxSection = maxSection;
|
|
+ this.chunkX = chunkX;
|
|
+ this.chunkZ = chunkZ;
|
|
+ this.world = world;
|
|
+
|
|
+ this.allEntities = new EntityCollectionBySection(this);
|
|
+ this.hardCollidingEntities = new EntityCollectionBySection(this);
|
|
+ this.entitiesByClass = new Reference2ObjectOpenHashMap<>();
|
|
+
|
|
+ this.status = status;
|
|
+ }
|
|
+
|
|
+ // Paper start - optimise CraftChunk#getEntities
|
|
+ public org.bukkit.entity.Entity[] getChunkEntities() {
|
|
+ List<org.bukkit.entity.Entity> ret = new java.util.ArrayList<>();
|
|
+ final Entity[] entities = this.entities.getRawData();
|
|
+ for (int i = 0, size = Math.min(entities.length, this.entities.size()); i < size; ++i) {
|
|
+ final Entity entity = entities[i];
|
|
+ if (entity == null) {
|
|
+ continue;
|
|
+ }
|
|
+ final org.bukkit.entity.Entity bukkit = entity.getBukkitEntity();
|
|
+ if (bukkit != null && bukkit.isValid()) {
|
|
+ ret.add(bukkit);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret.toArray(new org.bukkit.entity.Entity[0]);
|
|
+ }
|
|
+
|
|
+ public CompoundTag save() {
|
|
+ final int len = this.entities.size();
|
|
+ if (len == 0) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ final Entity[] rawData = this.entities.getRawData();
|
|
+ final List<Entity> collectedEntities = new ArrayList<>(len);
|
|
+ for (int i = 0; i < len; ++i) {
|
|
+ final Entity entity = rawData[i];
|
|
+ if (entity.shouldBeSaved()) {
|
|
+ collectedEntities.add(entity);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (collectedEntities.isEmpty()) {
|
|
+ return null;
|
|
+ }
|
|
+
|
|
+ return EntityStorage.saveEntityChunk(collectedEntities, new ChunkPos(this.chunkX, this.chunkZ), this.world);
|
|
+ }
|
|
+
|
|
+ // returns true if this chunk has transient entities remaining
|
|
+ public boolean unload() {
|
|
+ final int len = this.entities.size();
|
|
+ final Entity[] collectedEntities = Arrays.copyOf(this.entities.getRawData(), len);
|
|
+
|
|
+ for (int i = 0; i < len; ++i) {
|
|
+ final Entity entity = collectedEntities[i];
|
|
+ if (entity.isRemoved()) {
|
|
+ // removed by us below
|
|
+ continue;
|
|
+ }
|
|
+ if (entity.shouldBeSaved()) {
|
|
+ entity.setRemoved(Entity.RemovalReason.UNLOADED_TO_CHUNK);
|
|
+ if (entity.isVehicle()) {
|
|
+ // we cannot assume that these entities are contained within this chunk, because entities can
|
|
+ // desync - so we need to remove them all
|
|
+ for (final Entity passenger : entity.getIndirectPassengers()) {
|
|
+ passenger.setRemoved(Entity.RemovalReason.UNLOADED_TO_CHUNK);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return this.entities.size() != 0;
|
|
+ }
|
|
+
|
|
+ private List<Entity> getAllEntities() {
|
|
+ final int len = this.entities.size();
|
|
+ if (len == 0) {
|
|
+ return new ArrayList<>();
|
|
+ }
|
|
+
|
|
+ final Entity[] rawData = this.entities.getRawData();
|
|
+ final List<Entity> collectedEntities = new ArrayList<>(len);
|
|
+ for (int i = 0; i < len; ++i) {
|
|
+ collectedEntities.add(rawData[i]);
|
|
+ }
|
|
+
|
|
+ return collectedEntities;
|
|
+ }
|
|
+
|
|
+ public void callEntitiesLoadEvent() {
|
|
+ CraftEventFactory.callEntitiesLoadEvent(this.world, new ChunkPos(this.chunkX, this.chunkZ), this.getAllEntities());
|
|
+ }
|
|
+
|
|
+ public void callEntitiesUnloadEvent() {
|
|
+ CraftEventFactory.callEntitiesUnloadEvent(this.world, new ChunkPos(this.chunkX, this.chunkZ), this.getAllEntities());
|
|
+ }
|
|
+ // Paper end - optimise CraftChunk#getEntities
|
|
+
|
|
+ public boolean isEmpty() {
|
|
+ return this.entities.size() == 0;
|
|
+ }
|
|
+
|
|
+ public void mergeInto(final ChunkEntitySlices slices) {
|
|
+ final Entity[] entities = this.entities.getRawData();
|
|
+ for (int i = 0, size = Math.min(entities.length, this.entities.size()); i < size; ++i) {
|
|
+ final Entity entity = entities[i];
|
|
+ slices.addEntity(entity, entity.sectionY);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private boolean preventStatusUpdates;
|
|
+ public boolean startPreventingStatusUpdates() {
|
|
+ final boolean ret = this.preventStatusUpdates;
|
|
+ this.preventStatusUpdates = true;
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public void stopPreventingStatusUpdates(final boolean prev) {
|
|
+ this.preventStatusUpdates = prev;
|
|
+ }
|
|
+
|
|
+ public void updateStatus(final ChunkHolder.FullChunkStatus status, final EntityLookup lookup) {
|
|
+ this.status = status;
|
|
+
|
|
+ final Entity[] entities = this.entities.getRawData();
|
|
+
|
|
+ for (int i = 0, size = this.entities.size(); i < size; ++i) {
|
|
+ final Entity entity = entities[i];
|
|
+
|
|
+ final Visibility oldVisibility = EntityLookup.getEntityStatus(entity);
|
|
+ entity.chunkStatus = status;
|
|
+ final Visibility newVisibility = EntityLookup.getEntityStatus(entity);
|
|
+
|
|
+ lookup.entityStatusChange(entity, this, oldVisibility, newVisibility, false, false, false);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public boolean addEntity(final Entity entity, final int chunkSection) {
|
|
+ if (!this.entities.add(entity)) {
|
|
+ return false;
|
|
+ }
|
|
+ entity.chunkStatus = this.status;
|
|
+ final int sectionIndex = chunkSection - this.minSection;
|
|
+
|
|
+ this.allEntities.addEntity(entity, sectionIndex);
|
|
+
|
|
+ if (entity.hardCollides()) {
|
|
+ this.hardCollidingEntities.addEntity(entity, sectionIndex);
|
|
+ }
|
|
+
|
|
+ for (final Iterator<Reference2ObjectMap.Entry<Class<? extends Entity>, EntityCollectionBySection>> iterator =
|
|
+ this.entitiesByClass.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) {
|
|
+ final Reference2ObjectMap.Entry<Class<? extends Entity>, EntityCollectionBySection> entry = iterator.next();
|
|
+
|
|
+ if (entry.getKey().isInstance(entity)) {
|
|
+ entry.getValue().addEntity(entity, sectionIndex);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public boolean removeEntity(final Entity entity, final int chunkSection) {
|
|
+ if (!this.entities.remove(entity)) {
|
|
+ return false;
|
|
+ }
|
|
+ entity.chunkStatus = null;
|
|
+ final int sectionIndex = chunkSection - this.minSection;
|
|
+
|
|
+ this.allEntities.removeEntity(entity, sectionIndex);
|
|
+
|
|
+ if (entity.hardCollides()) {
|
|
+ this.hardCollidingEntities.removeEntity(entity, sectionIndex);
|
|
+ }
|
|
+
|
|
+ for (final Iterator<Reference2ObjectMap.Entry<Class<? extends Entity>, EntityCollectionBySection>> iterator =
|
|
+ this.entitiesByClass.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) {
|
|
+ final Reference2ObjectMap.Entry<Class<? extends Entity>, EntityCollectionBySection> entry = iterator.next();
|
|
+
|
|
+ if (entry.getKey().isInstance(entity)) {
|
|
+ entry.getValue().removeEntity(entity, sectionIndex);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public void getHardCollidingEntities(final Entity except, final AABB box, final List<Entity> into, final Predicate<? super Entity> predicate) {
|
|
+ this.hardCollidingEntities.getEntities(except, box, into, predicate);
|
|
+ }
|
|
+
|
|
+ public void getEntities(final Entity except, final AABB box, final List<Entity> into, final Predicate<? super Entity> predicate) {
|
|
+ this.allEntities.getEntitiesWithEnderDragonParts(except, box, into, predicate);
|
|
+ }
|
|
+
|
|
+ public void getEntitiesWithoutDragonParts(final Entity except, final AABB box, final List<Entity> into, final Predicate<? super Entity> predicate) {
|
|
+ this.allEntities.getEntities(except, box, into, predicate);
|
|
+ }
|
|
+
|
|
+ public <T extends Entity> void getEntities(final EntityType<?> type, final AABB box, final List<? super T> into,
|
|
+ final Predicate<? super T> predicate) {
|
|
+ this.allEntities.getEntities(type, box, (List)into, (Predicate)predicate);
|
|
+ }
|
|
+
|
|
+ protected EntityCollectionBySection initClass(final Class<? extends Entity> clazz) {
|
|
+ final EntityCollectionBySection ret = new EntityCollectionBySection(this);
|
|
+
|
|
+ for (int sectionIndex = 0; sectionIndex < this.allEntities.entitiesBySection.length; ++sectionIndex) {
|
|
+ final BasicEntityList<Entity> sectionEntities = this.allEntities.entitiesBySection[sectionIndex];
|
|
+ if (sectionEntities == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final Entity[] storage = sectionEntities.storage;
|
|
+
|
|
+ for (int i = 0, len = Math.min(storage.length, sectionEntities.size()); i < len; ++i) {
|
|
+ final Entity entity = storage[i];
|
|
+
|
|
+ if (clazz.isInstance(entity)) {
|
|
+ ret.addEntity(entity, sectionIndex);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ public <T extends Entity> void getEntities(final Class<? extends T> clazz, final Entity except, final AABB box, final List<? super T> into,
|
|
+ final Predicate<? super T> predicate) {
|
|
+ EntityCollectionBySection collection = this.entitiesByClass.get(clazz);
|
|
+ if (collection != null) {
|
|
+ collection.getEntitiesWithEnderDragonParts(except, clazz, box, (List)into, (Predicate)predicate);
|
|
+ } else {
|
|
+ this.entitiesByClass.putIfAbsent(clazz, collection = this.initClass(clazz));
|
|
+ collection.getEntitiesWithEnderDragonParts(except, clazz, box, (List)into, (Predicate)predicate);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class BasicEntityList<E extends Entity> {
|
|
+
|
|
+ protected static final Entity[] EMPTY = new Entity[0];
|
|
+ protected static final int DEFAULT_CAPACITY = 4;
|
|
+
|
|
+ protected E[] storage;
|
|
+ protected int size;
|
|
+
|
|
+ public BasicEntityList() {
|
|
+ this(0);
|
|
+ }
|
|
+
|
|
+ public BasicEntityList(final int cap) {
|
|
+ this.storage = (E[])(cap <= 0 ? EMPTY : new Entity[cap]);
|
|
+ }
|
|
+
|
|
+ public boolean isEmpty() {
|
|
+ return this.size == 0;
|
|
+ }
|
|
+
|
|
+ public int size() {
|
|
+ return this.size;
|
|
+ }
|
|
+
|
|
+ private void resize() {
|
|
+ if (this.storage == EMPTY) {
|
|
+ this.storage = (E[])new Entity[DEFAULT_CAPACITY];
|
|
+ } else {
|
|
+ this.storage = Arrays.copyOf(this.storage, this.storage.length * 2);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void add(final E entity) {
|
|
+ final int idx = this.size++;
|
|
+ if (idx >= this.storage.length) {
|
|
+ this.resize();
|
|
+ this.storage[idx] = entity;
|
|
+ } else {
|
|
+ this.storage[idx] = entity;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public int indexOf(final E entity) {
|
|
+ final E[] storage = this.storage;
|
|
+
|
|
+ for (int i = 0, len = Math.min(this.storage.length, this.size); i < len; ++i) {
|
|
+ if (storage[i] == entity) {
|
|
+ return i;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ public boolean remove(final E entity) {
|
|
+ final int idx = this.indexOf(entity);
|
|
+ if (idx == -1) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ final int size = --this.size;
|
|
+ final E[] storage = this.storage;
|
|
+ if (idx != size) {
|
|
+ System.arraycopy(storage, idx + 1, storage, idx, size - idx);
|
|
+ }
|
|
+
|
|
+ storage[size] = null;
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ public boolean has(final E entity) {
|
|
+ return this.indexOf(entity) != -1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ protected static final class EntityCollectionBySection {
|
|
+
|
|
+ protected final ChunkEntitySlices manager;
|
|
+ protected final long[] nonEmptyBitset;
|
|
+ protected final BasicEntityList<Entity>[] entitiesBySection;
|
|
+ protected int count;
|
|
+
|
|
+ public EntityCollectionBySection(final ChunkEntitySlices manager) {
|
|
+ this.manager = manager;
|
|
+
|
|
+ final int sectionCount = manager.maxSection - manager.minSection + 1;
|
|
+
|
|
+ this.nonEmptyBitset = new long[(sectionCount + (Long.SIZE - 1)) >>> 6]; // (sectionCount + (Long.SIZE - 1)) / Long.SIZE
|
|
+ this.entitiesBySection = new BasicEntityList[sectionCount];
|
|
+ }
|
|
+
|
|
+ public void addEntity(final Entity entity, final int sectionIndex) {
|
|
+ BasicEntityList<Entity> list = this.entitiesBySection[sectionIndex];
|
|
+
|
|
+ if (list != null && list.has(entity)) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (list == null) {
|
|
+ this.entitiesBySection[sectionIndex] = list = new BasicEntityList<>();
|
|
+ this.nonEmptyBitset[sectionIndex >>> 6] |= (1L << (sectionIndex & (Long.SIZE - 1)));
|
|
+ }
|
|
+
|
|
+ list.add(entity);
|
|
+ ++this.count;
|
|
+ }
|
|
+
|
|
+ public void removeEntity(final Entity entity, final int sectionIndex) {
|
|
+ final BasicEntityList<Entity> list = this.entitiesBySection[sectionIndex];
|
|
+
|
|
+ if (list == null || !list.remove(entity)) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ --this.count;
|
|
+
|
|
+ if (list.isEmpty()) {
|
|
+ this.entitiesBySection[sectionIndex] = null;
|
|
+ this.nonEmptyBitset[sectionIndex >>> 6] ^= (1L << (sectionIndex & (Long.SIZE - 1)));
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void getEntities(final Entity except, final AABB box, final List<Entity> into, final Predicate<? super Entity> predicate) {
|
|
+ if (this.count == 0) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final int minSection = this.manager.minSection;
|
|
+ final int maxSection = this.manager.maxSection;
|
|
+
|
|
+ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection);
|
|
+ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection);
|
|
+
|
|
+ final BasicEntityList<Entity>[] entitiesBySection = this.entitiesBySection;
|
|
+
|
|
+ for (int section = min; section <= max; ++section) {
|
|
+ final BasicEntityList<Entity> list = entitiesBySection[section - minSection];
|
|
+
|
|
+ if (list == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final Entity[] storage = list.storage;
|
|
+
|
|
+ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) {
|
|
+ final Entity entity = storage[i];
|
|
+
|
|
+ if (entity == null || entity == except || !entity.getBoundingBox().intersects(box)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (predicate != null && !predicate.test(entity)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ into.add(entity);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void getEntitiesWithEnderDragonParts(final Entity except, final AABB box, final List<Entity> into,
|
|
+ final Predicate<? super Entity> predicate) {
|
|
+ if (this.count == 0) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final int minSection = this.manager.minSection;
|
|
+ final int maxSection = this.manager.maxSection;
|
|
+
|
|
+ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection);
|
|
+ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection);
|
|
+
|
|
+ final BasicEntityList<Entity>[] entitiesBySection = this.entitiesBySection;
|
|
+
|
|
+ for (int section = min; section <= max; ++section) {
|
|
+ final BasicEntityList<Entity> list = entitiesBySection[section - minSection];
|
|
+
|
|
+ if (list == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final Entity[] storage = list.storage;
|
|
+
|
|
+ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) {
|
|
+ final Entity entity = storage[i];
|
|
+
|
|
+ if (entity == null || entity == except || !entity.getBoundingBox().intersects(box)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (predicate == null || predicate.test(entity)) {
|
|
+ into.add(entity);
|
|
+ } // else: continue to test the ender dragon parts
|
|
+
|
|
+ if (entity instanceof EnderDragon) {
|
|
+ for (final EnderDragonPart part : ((EnderDragon)entity).subEntities) {
|
|
+ if (part == except || !part.getBoundingBox().intersects(box)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (predicate != null && !predicate.test(part)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ into.add(part);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void getEntitiesWithEnderDragonParts(final Entity except, final Class<?> clazz, final AABB box, final List<Entity> into,
|
|
+ final Predicate<? super Entity> predicate) {
|
|
+ if (this.count == 0) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final int minSection = this.manager.minSection;
|
|
+ final int maxSection = this.manager.maxSection;
|
|
+
|
|
+ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection);
|
|
+ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection);
|
|
+
|
|
+ final BasicEntityList<Entity>[] entitiesBySection = this.entitiesBySection;
|
|
+
|
|
+ for (int section = min; section <= max; ++section) {
|
|
+ final BasicEntityList<Entity> list = entitiesBySection[section - minSection];
|
|
+
|
|
+ if (list == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final Entity[] storage = list.storage;
|
|
+
|
|
+ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) {
|
|
+ final Entity entity = storage[i];
|
|
+
|
|
+ if (entity == null || entity == except || !entity.getBoundingBox().intersects(box)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (predicate == null || predicate.test(entity)) {
|
|
+ into.add(entity);
|
|
+ } // else: continue to test the ender dragon parts
|
|
+
|
|
+ if (entity instanceof EnderDragon) {
|
|
+ for (final EnderDragonPart part : ((EnderDragon)entity).subEntities) {
|
|
+ if (part == except || !part.getBoundingBox().intersects(box) || !clazz.isInstance(part)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (predicate != null && !predicate.test(part)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ into.add(part);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public <T extends Entity> void getEntities(final EntityType<?> type, final AABB box, final List<? super T> into,
|
|
+ final Predicate<? super T> predicate) {
|
|
+ if (this.count == 0) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final int minSection = this.manager.minSection;
|
|
+ final int maxSection = this.manager.maxSection;
|
|
+
|
|
+ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection);
|
|
+ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection);
|
|
+
|
|
+ final BasicEntityList<Entity>[] entitiesBySection = this.entitiesBySection;
|
|
+
|
|
+ for (int section = min; section <= max; ++section) {
|
|
+ final BasicEntityList<Entity> list = entitiesBySection[section - minSection];
|
|
+
|
|
+ if (list == null) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ final Entity[] storage = list.storage;
|
|
+
|
|
+ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) {
|
|
+ final Entity entity = storage[i];
|
|
+
|
|
+ if (entity == null || (type != null && entity.getType() != type) || !entity.getBoundingBox().intersects(box)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (predicate != null && !predicate.test((T)entity)) {
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ into.add((T)entity);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
diff --git a/src/main/java/net/minecraft/network/Connection.java b/src/main/java/net/minecraft/network/Connection.java
|
|
index 2dbaf078e99e3663bd2dbdd3548468192382fae5..463d54c6c808808d3d7fe6d5303be48844bb4444 100644
|
|
--- a/src/main/java/net/minecraft/network/Connection.java
|
|
+++ b/src/main/java/net/minecraft/network/Connection.java
|
|
@@ -90,6 +90,28 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
|
|
private int tickCount;
|
|
private boolean handlingFault;
|
|
public String hostname = ""; // CraftBukkit - add field
|
|
+ // Paper start - add pending task queue
|
|
+ private final Queue<Runnable> pendingTasks = new java.util.concurrent.ConcurrentLinkedQueue<>();
|
|
+ public void execute(final Runnable run) {
|
|
+ if (this.channel == null || !this.channel.isRegistered()) {
|
|
+ run.run();
|
|
+ return;
|
|
+ }
|
|
+ final boolean queue = !this.queue.isEmpty();
|
|
+ if (!queue) {
|
|
+ this.channel.eventLoop().execute(run);
|
|
+ } else {
|
|
+ this.pendingTasks.add(run);
|
|
+ if (this.queue.isEmpty()) {
|
|
+ // something flushed async, dump tasks now
|
|
+ Runnable r;
|
|
+ while ((r = this.pendingTasks.poll()) != null) {
|
|
+ this.channel.eventLoop().execute(r);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ // Paper end - add pending task queue
|
|
|
|
public Connection(PacketFlow side) {
|
|
this.receiving = side;
|
|
@@ -248,6 +270,7 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
|
|
}
|
|
|
|
private void flushQueue() {
|
|
+ try { // Paper - add pending task queue
|
|
if (this.channel != null && this.channel.isOpen()) {
|
|
Queue queue = this.queue;
|
|
|
|
@@ -260,6 +283,12 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
|
|
|
|
}
|
|
}
|
|
+ } finally { // Paper start - add pending task queue
|
|
+ Runnable r;
|
|
+ while ((r = this.pendingTasks.poll()) != null) {
|
|
+ this.channel.eventLoop().execute(r);
|
|
+ }
|
|
+ } // Paper end - add pending task queue
|
|
}
|
|
|
|
public void tick() {
|
|
diff --git a/src/main/java/net/minecraft/network/protocol/game/ServerboundCommandSuggestionPacket.java b/src/main/java/net/minecraft/network/protocol/game/ServerboundCommandSuggestionPacket.java
|
|
index a5e438a834826161c52ca9db57d234d9ff80a591..b8bc1b9b8e8a33df90a963f9f9769292bf595642 100644
|
|
--- a/src/main/java/net/minecraft/network/protocol/game/ServerboundCommandSuggestionPacket.java
|
|
+++ b/src/main/java/net/minecraft/network/protocol/game/ServerboundCommandSuggestionPacket.java
|
|
@@ -14,7 +14,7 @@ public class ServerboundCommandSuggestionPacket implements Packet<ServerGamePack
|
|
|
|
public ServerboundCommandSuggestionPacket(FriendlyByteBuf buf) {
|
|
this.id = buf.readVarInt();
|
|
- this.command = buf.readUtf(32500);
|
|
+ this.command = buf.readUtf(2048);
|
|
}
|
|
|
|
@Override
|
|
diff --git a/src/main/java/net/minecraft/server/Main.java b/src/main/java/net/minecraft/server/Main.java
|
|
index b7399e29094c66c88a6f4c0e996a906bcaa3b4ca..e600563a9d2ddbfa37c106481decb13e67f71524 100644
|
|
--- a/src/main/java/net/minecraft/server/Main.java
|
|
+++ b/src/main/java/net/minecraft/server/Main.java
|
|
@@ -259,6 +259,7 @@ public class Main {
|
|
|
|
convertable_conversionsession.saveDataTag(iregistrycustom_dimension, savedata);
|
|
*/
|
|
+ Class.forName(net.minecraft.world.entity.npc.VillagerTrades.class.getName());// Paper - load this sync so it won't fail later async
|
|
final DedicatedServer dedicatedserver = (DedicatedServer) MinecraftServer.spin((thread) -> {
|
|
DedicatedServer dedicatedserver1 = new DedicatedServer(optionset, worldLoader.get(), thread, convertable_conversionsession, resourcepackrepository, worldstem, dedicatedserversettings, DataFixers.getDataFixer(), services, LoggerChunkProgressListener::new);
|
|
|
|
diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java
|
|
index 2737dfbf4d978f2bb118cb996ea5c6920decab90..e9f4ffec4b659f3300daa0138f6e955a8d97786d 100644
|
|
--- a/src/main/java/net/minecraft/server/MinecraftServer.java
|
|
+++ b/src/main/java/net/minecraft/server/MinecraftServer.java
|
|
@@ -298,7 +298,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
|
|
public static <S extends MinecraftServer> S spin(Function<Thread, S> serverFactory) {
|
|
AtomicReference<S> atomicreference = new AtomicReference();
|
|
- Thread thread = new Thread(() -> {
|
|
+ Thread thread = new io.papermc.paper.util.TickThread(() -> { // Paper - rewrite chunk system
|
|
((MinecraftServer) atomicreference.get()).runServer();
|
|
}, "Server thread");
|
|
|
|
@@ -582,7 +582,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
this.forceDifficulty();
|
|
for (ServerLevel worldserver : this.getAllLevels()) {
|
|
this.prepareLevels(worldserver.getChunkSource().chunkMap.progressListener, worldserver);
|
|
- worldserver.entityManager.tick(); // SPIGOT-6526: Load pending entities so they are available to the API
|
|
+ //worldserver.entityManager.tick(); // SPIGOT-6526: Load pending entities so they are available to the API // Paper - rewrite chunk system, not required to "tick" anything
|
|
this.server.getPluginManager().callEvent(new org.bukkit.event.world.WorldLoadEvent(worldserver.getWorld()));
|
|
}
|
|
|
|
@@ -784,6 +784,12 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
public abstract boolean shouldRconBroadcast();
|
|
|
|
public boolean saveAllChunks(boolean suppressLogs, boolean flush, boolean force) {
|
|
+ // Paper start - rewrite chunk system - add close param
|
|
+ // This allows us to avoid double saving chunks by closing instead of saving then closing
|
|
+ return this.saveAllChunks(suppressLogs, flush, force, false);
|
|
+ }
|
|
+ public boolean saveAllChunks(boolean suppressLogs, boolean flush, boolean force, boolean close) {
|
|
+ // Paper end - rewrite chunk system - add close param
|
|
boolean flag3 = false;
|
|
|
|
for (Iterator iterator = this.getAllLevels().iterator(); iterator.hasNext(); flag3 = true) {
|
|
@@ -792,8 +798,12 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
if (!suppressLogs) {
|
|
MinecraftServer.LOGGER.info("Saving chunks for level '{}'/{}", worldserver, worldserver.dimension().location());
|
|
}
|
|
-
|
|
- worldserver.save((ProgressListener) null, flush, worldserver.noSave && !force);
|
|
+ // Paper start - rewrite chunk system
|
|
+ worldserver.save((ProgressListener) null, flush, worldserver.noSave && !force, close);
|
|
+ if (flush) {
|
|
+ MinecraftServer.LOGGER.info("ThreadedAnvilChunkStorage ({}): All chunks are saved", worldserver.getChunkSource().chunkMap.getStorageName());
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
// CraftBukkit start - moved to WorldServer.save
|
|
@@ -812,7 +822,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
while (iterator1.hasNext()) {
|
|
ServerLevel worldserver2 = (ServerLevel) iterator1.next();
|
|
|
|
- MinecraftServer.LOGGER.info("ThreadedAnvilChunkStorage ({}): All chunks are saved", worldserver2.getChunkSource().chunkMap.getStorageName());
|
|
+ //MinecraftServer.LOGGER.info("ThreadedAnvilChunkStorage ({}): All chunks are saved", worldserver2.getChunkSource().chunkMap.getStorageName()); // Paper - move up
|
|
}
|
|
|
|
MinecraftServer.LOGGER.info("ThreadedAnvilChunkStorage: All dimensions are saved");
|
|
@@ -892,36 +902,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
}
|
|
}
|
|
|
|
- while (this.levels.values().stream().anyMatch((worldserver1) -> {
|
|
- return worldserver1.getChunkSource().chunkMap.hasWork();
|
|
- })) {
|
|
- this.nextTickTime = Util.getMillis() + 1L;
|
|
- iterator = this.getAllLevels().iterator();
|
|
-
|
|
- while (iterator.hasNext()) {
|
|
- worldserver = (ServerLevel) iterator.next();
|
|
- worldserver.getChunkSource().removeTicketsOnClosing();
|
|
- worldserver.getChunkSource().tick(() -> {
|
|
- return true;
|
|
- }, false);
|
|
- }
|
|
-
|
|
- this.waitUntilNextTick();
|
|
- }
|
|
-
|
|
- this.saveAllChunks(false, true, false);
|
|
- iterator = this.getAllLevels().iterator();
|
|
-
|
|
- while (iterator.hasNext()) {
|
|
- worldserver = (ServerLevel) iterator.next();
|
|
- if (worldserver != null) {
|
|
- try {
|
|
- worldserver.close();
|
|
- } catch (IOException ioexception) {
|
|
- MinecraftServer.LOGGER.error("Exception closing the level", ioexception);
|
|
- }
|
|
- }
|
|
- }
|
|
+ this.saveAllChunks(false, true, false, true); // Paper - rewrite chunk system - move closing into here
|
|
|
|
this.isSaving = false;
|
|
this.resources.close();
|
|
@@ -940,7 +921,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
this.getProfileCache().save();
|
|
}
|
|
// Spigot end
|
|
-
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.close(true); // Paper // Paper - rewrite chunk system
|
|
}
|
|
|
|
public String getLocalIp() {
|
|
@@ -974,6 +955,8 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
}
|
|
// Spigot End
|
|
|
|
+ public static volatile RuntimeException chunkSystemCrash; // Paper - rewrite chunk system
|
|
+
|
|
protected void runServer() {
|
|
try {
|
|
if (!this.initServer()) {
|
|
@@ -990,6 +973,12 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
Arrays.fill( recentTps, 20 );
|
|
long curTime, tickSection = Util.getMillis(), tickCount = 1;
|
|
while (this.running) {
|
|
+ // Paper start - rewrite chunk system
|
|
+ // guarantee that nothing can stop the server from halting if it can at least still tick
|
|
+ if (this.chunkSystemCrash != null) {
|
|
+ throw this.chunkSystemCrash;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
long i = (curTime = Util.getMillis()) - this.nextTickTime;
|
|
|
|
if (i > 5000L && this.nextTickTime - this.lastOverloadWarning >= 30000L) { // CraftBukkit
|
|
@@ -1102,6 +1091,11 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
}
|
|
|
|
private boolean haveTime() {
|
|
+ // Paper start
|
|
+ if (this.forceTicks) {
|
|
+ return true;
|
|
+ }
|
|
+ // Paper end
|
|
// CraftBukkit start
|
|
if (isOversleep) return canOversleep();// Paper - because of our changes, this logic is broken
|
|
return this.forceTicks || this.runningTask() || Util.getMillis() < (this.mayHaveDelayedTasks ? this.delayedTasksMaxNextTickTime : this.nextTickTime);
|
|
@@ -2228,7 +2222,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop<TickTa
|
|
// CraftBukkit start
|
|
@Override
|
|
public boolean isSameThread() {
|
|
- return super.isSameThread() || this.isStopped(); // CraftBukkit - MC-142590
|
|
+ return io.papermc.paper.util.TickThread.isTickThread(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public boolean isDebugging() {
|
|
diff --git a/src/main/java/net/minecraft/server/dedicated/DedicatedServer.java b/src/main/java/net/minecraft/server/dedicated/DedicatedServer.java
|
|
index 5e5c4de89784db702256ee765091e929066116e4..8318af4db5fa7242b5cb2ae50fd08174017d6f7d 100644
|
|
--- a/src/main/java/net/minecraft/server/dedicated/DedicatedServer.java
|
|
+++ b/src/main/java/net/minecraft/server/dedicated/DedicatedServer.java
|
|
@@ -400,7 +400,34 @@ public class DedicatedServer extends MinecraftServer implements ServerInterface
|
|
return this.getProperties().allowNether;
|
|
}
|
|
|
|
+ static final java.util.concurrent.atomic.AtomicInteger ASYNC_DEBUG_CHUNKS_COUNT = new java.util.concurrent.atomic.AtomicInteger(); // Paper - rewrite chunk system
|
|
+
|
|
public void handleConsoleInput(String command, CommandSourceStack commandSource) {
|
|
+ // Paper start - rewrite chunk system
|
|
+ if (command.equalsIgnoreCase("paper debug chunks --async")) {
|
|
+ LOGGER.info("Scheduling async debug chunks");
|
|
+ Runnable run = () -> {
|
|
+ LOGGER.info("Async debug chunks executing");
|
|
+ io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.dumpAllChunkLoadInfo(false);
|
|
+ CommandSender sender = MinecraftServer.getServer().console;
|
|
+ java.io.File file = new java.io.File(new java.io.File(new java.io.File("."), "debug"),
|
|
+ "chunks-" + java.time.format.DateTimeFormatter.ofPattern("yyyy-MM-dd_HH.mm.ss").format(java.time.LocalDateTime.now()) + ".txt");
|
|
+ sender.sendMessage(net.kyori.adventure.text.Component.text("Writing chunk information dump to " + file, net.kyori.adventure.text.format.NamedTextColor.GREEN));
|
|
+ try {
|
|
+ io.papermc.paper.util.MCUtil.dumpChunks(file, true);
|
|
+ sender.sendMessage(net.kyori.adventure.text.Component.text("Successfully written chunk information!", net.kyori.adventure.text.format.NamedTextColor.GREEN));
|
|
+ } catch (Throwable thr) {
|
|
+ MinecraftServer.LOGGER.warn("Failed to dump chunk information to file " + file.toString(), thr);
|
|
+ sender.sendMessage(net.kyori.adventure.text.Component.text("Failed to dump chunk information, see console", net.kyori.adventure.text.format.NamedTextColor.RED));
|
|
+ }
|
|
+ };
|
|
+ Thread t = new Thread(run);
|
|
+ t.setName("Async debug thread #" + ASYNC_DEBUG_CHUNKS_COUNT.getAndIncrement());
|
|
+ t.setDaemon(true);
|
|
+ t.start();
|
|
+ return;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
this.consoleInput.add(new ConsoleInput(command, commandSource));
|
|
}
|
|
|
|
diff --git a/src/main/java/net/minecraft/server/level/ChunkHolder.java b/src/main/java/net/minecraft/server/level/ChunkHolder.java
|
|
index a7feddc31da0870faa3d32a7108282e9e9143180..2ba3bb4e5670ece798a8882801a856d82851c00a 100644
|
|
--- a/src/main/java/net/minecraft/server/level/ChunkHolder.java
|
|
+++ b/src/main/java/net/minecraft/server/level/ChunkHolder.java
|
|
@@ -50,17 +50,12 @@ public class ChunkHolder {
|
|
private static final List<ChunkStatus> CHUNK_STATUSES = ChunkStatus.getStatusList();
|
|
private static final ChunkHolder.FullChunkStatus[] FULL_CHUNK_STATUSES = ChunkHolder.FullChunkStatus.values();
|
|
private static final int BLOCKS_BEFORE_RESEND_FUDGE = 64;
|
|
- private final AtomicReferenceArray<CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>>> futures;
|
|
+ // Paper - rewrite chunk system
|
|
private final LevelHeightAccessor levelHeightAccessor;
|
|
- private volatile CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> fullChunkFuture; private int fullChunkCreateCount; private volatile boolean isFullChunkReady; // Paper - cache chunk ticking stage
|
|
- private volatile CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> tickingChunkFuture; private volatile boolean isTickingReady; // Paper - cache chunk ticking stage
|
|
- private volatile CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> entityTickingChunkFuture; private volatile boolean isEntityTickingReady; // Paper - cache chunk ticking stage
|
|
- public CompletableFuture<ChunkAccess> chunkToSave; // Paper - public
|
|
+ // Paper - rewrite chunk system
|
|
@Nullable
|
|
private final DebugBuffer<ChunkHolder.ChunkSaveDebug> chunkToSaveHistory;
|
|
- public int oldTicketLevel;
|
|
- private int ticketLevel;
|
|
- private int queueLevel;
|
|
+ // Paper - rewrite chunk system
|
|
public final ChunkPos pos;
|
|
private boolean hasChangedSections;
|
|
private final ShortSet[] changedBlocksPerSection;
|
|
@@ -69,11 +64,22 @@ public class ChunkHolder {
|
|
private final LevelLightEngine lightEngine;
|
|
private final ChunkHolder.LevelChangeListener onLevelChange;
|
|
public final ChunkHolder.PlayerProvider playerProvider;
|
|
- private boolean wasAccessibleSinceLastSave;
|
|
+ // Paper - rewrite chunk system
|
|
private boolean resendLight;
|
|
- private CompletableFuture<Void> pendingFullStateConfirmation;
|
|
+ // Paper - rewrite chunk system
|
|
|
|
private final ChunkMap chunkMap; // Paper
|
|
+ // Paper start - no-tick view distance
|
|
+ public final LevelChunk getSendingChunk() {
|
|
+ // it's important that we use getChunkAtIfLoadedImmediately to mirror the chunk sending logic used
|
|
+ // in Chunk's neighbour callback
|
|
+ LevelChunk ret = this.chunkMap.level.getChunkSource().getChunkAtIfLoadedImmediately(this.pos.x, this.pos.z);
|
|
+ if (ret != null && ret.areNeighboursLoaded(1)) {
|
|
+ return ret;
|
|
+ }
|
|
+ return null;
|
|
+ }
|
|
+ // Paper end - no-tick view distance
|
|
|
|
// Paper start
|
|
public void onChunkAdd() {
|
|
@@ -85,148 +91,106 @@ public class ChunkHolder {
|
|
}
|
|
// Paper end
|
|
|
|
- public ChunkHolder(ChunkPos pos, int level, LevelHeightAccessor world, LevelLightEngine lightingProvider, ChunkHolder.LevelChangeListener levelUpdateListener, ChunkHolder.PlayerProvider playersWatchingChunkProvider) {
|
|
- this.futures = new AtomicReferenceArray(ChunkHolder.CHUNK_STATUSES.size());
|
|
- this.fullChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE;
|
|
- this.tickingChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE;
|
|
- this.entityTickingChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE;
|
|
- this.chunkToSave = CompletableFuture.completedFuture(null); // CraftBukkit - decompile error
|
|
+ public final io.papermc.paper.chunk.system.scheduling.NewChunkHolder newChunkHolder; // Paper - rewrite chunk system
|
|
+
|
|
+ public ChunkHolder(ChunkPos pos, LevelHeightAccessor world, LevelLightEngine lightingProvider, ChunkHolder.PlayerProvider playersWatchingChunkProvider, io.papermc.paper.chunk.system.scheduling.NewChunkHolder newChunkHolder) { // Paper - rewrite chunk system
|
|
+ this.newChunkHolder = newChunkHolder; // Paper - rewrite chunk system
|
|
this.chunkToSaveHistory = null;
|
|
this.blockChangedLightSectionFilter = new BitSet();
|
|
this.skyChangedLightSectionFilter = new BitSet();
|
|
- this.pendingFullStateConfirmation = CompletableFuture.completedFuture(null); // CraftBukkit - decompile error
|
|
+ // Paper - rewrite chunk system
|
|
this.pos = pos;
|
|
this.levelHeightAccessor = world;
|
|
this.lightEngine = lightingProvider;
|
|
- this.onLevelChange = levelUpdateListener;
|
|
+ this.onLevelChange = null; // Paper - rewrite chunk system
|
|
this.playerProvider = playersWatchingChunkProvider;
|
|
- this.oldTicketLevel = ChunkMap.MAX_CHUNK_DISTANCE + 1;
|
|
- this.ticketLevel = this.oldTicketLevel;
|
|
- this.queueLevel = this.oldTicketLevel;
|
|
- this.setTicketLevel(level);
|
|
+ // Paper - rewrite chunk system
|
|
this.changedBlocksPerSection = new ShortSet[world.getSectionsCount()];
|
|
this.chunkMap = (ChunkMap)playersWatchingChunkProvider; // Paper
|
|
}
|
|
|
|
// Paper start
|
|
public @Nullable ChunkAccess getAvailableChunkNow() {
|
|
- // TODO can we just getStatusFuture(EMPTY)?
|
|
- for (ChunkStatus curr = ChunkStatus.FULL, next = curr.getParent(); curr != next; curr = next, next = next.getParent()) {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> future = this.getFutureIfPresentUnchecked(curr);
|
|
- Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either = future.getNow(null);
|
|
- if (either == null || either.left().isEmpty()) {
|
|
- continue;
|
|
- }
|
|
- return either.left().get();
|
|
- }
|
|
- return null;
|
|
+ return this.newChunkHolder.getCurrentChunk(); // Paper - rewrite chunk system
|
|
}
|
|
// Paper end
|
|
// CraftBukkit start
|
|
public LevelChunk getFullChunkNow() {
|
|
- // Note: We use the oldTicketLevel for isLoaded checks.
|
|
- if (!ChunkHolder.getFullChunkStatus(this.oldTicketLevel).isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) return null;
|
|
- return this.getFullChunkNowUnchecked();
|
|
+ // Paper start - rewrite chunk system
|
|
+ ChunkAccess chunk = this.getAvailableChunkNow();
|
|
+ if (!this.isFullChunkReady() || !(chunk instanceof LevelChunk)) return null; // instanceof to avoid a race condition on off-main threads
|
|
+ return (LevelChunk)chunk;
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
public LevelChunk getFullChunkNowUnchecked() {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> statusFuture = this.getFutureIfPresentUnchecked(ChunkStatus.FULL);
|
|
- Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either = (Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>) statusFuture.getNow(null);
|
|
- return (either == null) ? null : (LevelChunk) either.left().orElse(null);
|
|
+ // Paper start - rewrite chunk system
|
|
+ ChunkAccess chunk = this.getAvailableChunkNow();
|
|
+ return chunk instanceof LevelChunk ? (LevelChunk)chunk : null;
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
// CraftBukkit end
|
|
|
|
public CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> getFutureIfPresentUnchecked(ChunkStatus leastStatus) {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = (CompletableFuture) this.futures.get(leastStatus.getIndex());
|
|
-
|
|
- return completablefuture == null ? ChunkHolder.UNLOADED_CHUNK_FUTURE : completablefuture;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> getFutureIfPresent(ChunkStatus leastStatus) {
|
|
- return ChunkHolder.getStatus(this.ticketLevel).isOrAfter(leastStatus) ? this.getFutureIfPresentUnchecked(leastStatus) : ChunkHolder.UNLOADED_CHUNK_FUTURE;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public final CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> getTickingChunkFuture() { // Paper - final for inline
|
|
- return this.tickingChunkFuture;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public final CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> getEntityTickingChunkFuture() { // Paper - final for inline
|
|
- return this.entityTickingChunkFuture;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public final CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> getFullChunkFuture() { // Paper - final for inline
|
|
- return this.fullChunkFuture;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Nullable
|
|
public final LevelChunk getTickingChunk() { // Paper - final for inline
|
|
- CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getTickingChunkFuture();
|
|
- Either<LevelChunk, ChunkHolder.ChunkLoadingFailure> either = (Either) completablefuture.getNow(null); // CraftBukkit - decompile error
|
|
-
|
|
- return either == null ? null : (LevelChunk) either.left().orElse(null); // CraftBukkit - decompile error
|
|
+ // Paper start - rewrite chunk system
|
|
+ if (!this.isTickingReady()) {
|
|
+ return null;
|
|
+ }
|
|
+ return (LevelChunk)this.getAvailableChunkNow();
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
@Nullable
|
|
public final LevelChunk getFullChunk() { // Paper - final for inline
|
|
- CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getFullChunkFuture();
|
|
- Either<LevelChunk, ChunkHolder.ChunkLoadingFailure> either = (Either) completablefuture.getNow(null); // CraftBukkit - decompile error
|
|
-
|
|
- return either == null ? null : (LevelChunk) either.left().orElse(null); // CraftBukkit - decompile error
|
|
+ // Paper start - rewrite chunk system
|
|
+ if (!this.isFullChunkReady()) {
|
|
+ return null;
|
|
+ }
|
|
+ return (LevelChunk)this.getAvailableChunkNow();
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
@Nullable
|
|
public ChunkStatus getLastAvailableStatus() {
|
|
- for (int i = ChunkHolder.CHUNK_STATUSES.size() - 1; i >= 0; --i) {
|
|
- ChunkStatus chunkstatus = (ChunkStatus) ChunkHolder.CHUNK_STATUSES.get(i);
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getFutureIfPresentUnchecked(chunkstatus);
|
|
-
|
|
- if (((Either) completablefuture.getNow(ChunkHolder.UNLOADED_CHUNK)).left().isPresent()) {
|
|
- return chunkstatus;
|
|
- }
|
|
- }
|
|
-
|
|
- return null;
|
|
+ return this.newChunkHolder.getCurrentGenStatus(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
// Paper start
|
|
public ChunkStatus getChunkHolderStatus() {
|
|
- for (ChunkStatus curr = ChunkStatus.FULL, next = curr.getParent(); curr != next; curr = next, next = next.getParent()) {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> future = this.getFutureIfPresentUnchecked(curr);
|
|
- Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either = future.getNow(null);
|
|
- if (either == null || !either.left().isPresent()) {
|
|
- continue;
|
|
- }
|
|
- return curr;
|
|
- }
|
|
-
|
|
- return null;
|
|
+ return this.newChunkHolder.getCurrentGenStatus(); // Paper - rewrite chunk system
|
|
}
|
|
// Paper end
|
|
|
|
@Nullable
|
|
public ChunkAccess getLastAvailable() {
|
|
- for (int i = ChunkHolder.CHUNK_STATUSES.size() - 1; i >= 0; --i) {
|
|
- ChunkStatus chunkstatus = (ChunkStatus) ChunkHolder.CHUNK_STATUSES.get(i);
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getFutureIfPresentUnchecked(chunkstatus);
|
|
-
|
|
- if (!completablefuture.isCompletedExceptionally()) {
|
|
- Optional<ChunkAccess> optional = ((Either) completablefuture.getNow(ChunkHolder.UNLOADED_CHUNK)).left();
|
|
-
|
|
- if (optional.isPresent()) {
|
|
- return (ChunkAccess) optional.get();
|
|
- }
|
|
- }
|
|
- }
|
|
-
|
|
- return null;
|
|
+ return this.newChunkHolder.getCurrentChunk(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
- public final CompletableFuture<ChunkAccess> getChunkToSave() { // Paper - final for inline
|
|
- return this.chunkToSave;
|
|
- }
|
|
+ // Paper - rewrite chunk system
|
|
|
|
public void blockChanged(BlockPos pos) {
|
|
- LevelChunk chunk = this.getTickingChunk();
|
|
+ LevelChunk chunk = this.getSendingChunk(); // Paper - no-tick view distance
|
|
|
|
if (chunk != null) {
|
|
int i = this.levelHeightAccessor.getSectionIndex(pos.getY());
|
|
@@ -242,14 +206,15 @@ public class ChunkHolder {
|
|
}
|
|
|
|
public void sectionLightChanged(LightLayer lightType, int y) {
|
|
- Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either = (Either) this.getFutureIfPresent(ChunkStatus.FEATURES).getNow(null); // CraftBukkit - decompile error
|
|
+ // Paper start - no-tick view distance
|
|
|
|
- if (either != null) {
|
|
- ChunkAccess ichunkaccess = (ChunkAccess) either.left().orElse(null); // CraftBukkit - decompile error
|
|
+ if (true) {
|
|
+ ChunkAccess ichunkaccess = this.getAvailableChunkNow();
|
|
|
|
if (ichunkaccess != null) {
|
|
ichunkaccess.setUnsaved(true);
|
|
- LevelChunk chunk = this.getTickingChunk();
|
|
+ LevelChunk chunk = this.getSendingChunk();
|
|
+ // Paper end - no-tick view distance
|
|
|
|
if (chunk != null) {
|
|
int j = this.lightEngine.getMinLightSection();
|
|
@@ -340,66 +305,32 @@ public class ChunkHolder {
|
|
}
|
|
|
|
public void broadcast(Packet<?> packet, boolean onlyOnWatchDistanceEdge) {
|
|
- this.playerProvider.getPlayers(this.pos, onlyOnWatchDistanceEdge).forEach((entityplayer) -> {
|
|
- entityplayer.connection.send(packet);
|
|
- });
|
|
- }
|
|
-
|
|
- public CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> getOrScheduleFuture(ChunkStatus targetStatus, ChunkMap chunkStorage) {
|
|
- int i = targetStatus.getIndex();
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = (CompletableFuture) this.futures.get(i);
|
|
-
|
|
- if (completablefuture != null) {
|
|
- Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either = (Either) completablefuture.getNow(ChunkHolder.NOT_DONE_YET);
|
|
-
|
|
- if (either == null) {
|
|
- String s = "value in future for status: " + targetStatus + " was incorrectly set to null at chunk: " + this.pos;
|
|
+ // Paper start - per player view distance
|
|
+ // there can be potential desync with player's last mapped section and the view distance map, so use the
|
|
+ // view distance map here.
|
|
+ com.destroystokyo.paper.util.misc.PlayerAreaMap viewDistanceMap = this.chunkMap.playerChunkManager.broadcastMap; // Paper - replace old player chunk manager
|
|
+ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet<ServerPlayer> players = viewDistanceMap.getObjectsInRange(this.pos);
|
|
+ if (players == null) {
|
|
+ return;
|
|
+ }
|
|
|
|
- throw chunkStorage.debugFuturesAndCreateReportedException(new IllegalStateException("null value previously set for chunk status"), s);
|
|
+ Object[] backingSet = players.getBackingSet();
|
|
+ for (int i = 0, len = backingSet.length; i < len; ++i) {
|
|
+ if (!(backingSet[i] instanceof ServerPlayer player)) {
|
|
+ continue;
|
|
}
|
|
-
|
|
- if (either == ChunkHolder.NOT_DONE_YET || either.right().isEmpty()) {
|
|
- return completablefuture;
|
|
+ if (!this.chunkMap.playerChunkManager.isChunkSent(player, this.pos.x, this.pos.z, onlyOnWatchDistanceEdge)) {
|
|
+ continue;
|
|
}
|
|
+ player.connection.send(packet);
|
|
}
|
|
-
|
|
- if (ChunkHolder.getStatus(this.ticketLevel).isOrAfter(targetStatus)) {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture1 = chunkStorage.schedule(this, targetStatus);
|
|
-
|
|
- this.updateChunkToSave(completablefuture1, "schedule " + targetStatus);
|
|
- this.futures.set(i, completablefuture1);
|
|
- return completablefuture1;
|
|
- } else {
|
|
- return completablefuture == null ? ChunkHolder.UNLOADED_CHUNK_FUTURE : completablefuture;
|
|
- }
|
|
+ // Paper end - per player view distance
|
|
}
|
|
|
|
- protected void addSaveDependency(String thenDesc, CompletableFuture<?> then) {
|
|
- if (this.chunkToSaveHistory != null) {
|
|
- this.chunkToSaveHistory.push(new ChunkHolder.ChunkSaveDebug(Thread.currentThread(), then, thenDesc));
|
|
- }
|
|
-
|
|
- this.chunkToSave = this.chunkToSave.thenCombine(then, (ichunkaccess, object) -> {
|
|
- return ichunkaccess;
|
|
- });
|
|
- }
|
|
-
|
|
- private void updateChunkToSave(CompletableFuture<? extends Either<? extends ChunkAccess, ChunkHolder.ChunkLoadingFailure>> then, String thenDesc) {
|
|
- if (this.chunkToSaveHistory != null) {
|
|
- this.chunkToSaveHistory.push(new ChunkHolder.ChunkSaveDebug(Thread.currentThread(), then, thenDesc));
|
|
- }
|
|
-
|
|
- this.chunkToSave = this.chunkToSave.thenCombine(then, (ichunkaccess, either) -> {
|
|
- return (ChunkAccess) either.map((ichunkaccess1) -> {
|
|
- return ichunkaccess1;
|
|
- }, (playerchunk_failure) -> {
|
|
- return ichunkaccess;
|
|
- });
|
|
- });
|
|
- }
|
|
+ // Paper - rewrite chunk system
|
|
|
|
public ChunkHolder.FullChunkStatus getFullStatus() {
|
|
- return ChunkHolder.getFullChunkStatus(this.ticketLevel);
|
|
+ return this.newChunkHolder.getChunkStatus(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public final ChunkPos getPos() { // Paper - final for inline
|
|
@@ -407,207 +338,10 @@ public class ChunkHolder {
|
|
}
|
|
|
|
public final int getTicketLevel() { // Paper - final for inline
|
|
- return this.ticketLevel;
|
|
- }
|
|
-
|
|
- public int getQueueLevel() {
|
|
- return this.queueLevel;
|
|
- }
|
|
-
|
|
- private void setQueueLevel(int level) {
|
|
- this.queueLevel = level;
|
|
- }
|
|
-
|
|
- public void setTicketLevel(int level) {
|
|
- this.ticketLevel = level;
|
|
- }
|
|
-
|
|
- private void scheduleFullChunkPromotion(ChunkMap playerchunkmap, CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> completablefuture, Executor executor, ChunkHolder.FullChunkStatus playerchunk_state) {
|
|
- this.pendingFullStateConfirmation.cancel(false);
|
|
- CompletableFuture<Void> completablefuture1 = new CompletableFuture();
|
|
-
|
|
- completablefuture1.thenRunAsync(() -> {
|
|
- playerchunkmap.onFullChunkStatusChange(this.pos, playerchunk_state);
|
|
- }, executor);
|
|
- this.pendingFullStateConfirmation = completablefuture1;
|
|
- completablefuture.thenAccept((either) -> {
|
|
- either.ifLeft((chunk) -> {
|
|
- completablefuture1.complete(null); // CraftBukkit - decompile error
|
|
- });
|
|
- });
|
|
+ return this.newChunkHolder.getTicketLevel(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
- private void demoteFullChunk(ChunkMap playerchunkmap, ChunkHolder.FullChunkStatus playerchunk_state) {
|
|
- this.pendingFullStateConfirmation.cancel(false);
|
|
- playerchunkmap.onFullChunkStatusChange(this.pos, playerchunk_state);
|
|
- }
|
|
-
|
|
- protected void updateFutures(ChunkMap chunkStorage, Executor executor) {
|
|
- ChunkStatus chunkstatus = ChunkHolder.getStatus(this.oldTicketLevel);
|
|
- ChunkStatus chunkstatus1 = ChunkHolder.getStatus(this.ticketLevel);
|
|
- boolean flag = this.oldTicketLevel <= ChunkMap.MAX_CHUNK_DISTANCE;
|
|
- boolean flag1 = this.ticketLevel <= ChunkMap.MAX_CHUNK_DISTANCE;
|
|
- ChunkHolder.FullChunkStatus playerchunk_state = ChunkHolder.getFullChunkStatus(this.oldTicketLevel);
|
|
- ChunkHolder.FullChunkStatus playerchunk_state1 = ChunkHolder.getFullChunkStatus(this.ticketLevel);
|
|
- // CraftBukkit start
|
|
- // ChunkUnloadEvent: Called before the chunk is unloaded: isChunkLoaded is still true and chunk can still be modified by plugins.
|
|
- if (playerchunk_state.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && !playerchunk_state1.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
- this.getFutureIfPresentUnchecked(ChunkStatus.FULL).thenAccept((either) -> {
|
|
- LevelChunk chunk = (LevelChunk)either.left().orElse(null);
|
|
- if (chunk != null) {
|
|
- chunkStorage.callbackExecutor.execute(() -> {
|
|
- // Minecraft will apply the chunks tick lists to the world once the chunk got loaded, and then store the tick
|
|
- // lists again inside the chunk once the chunk becomes inaccessible and set the chunk's needsSaving flag.
|
|
- // These actions may however happen deferred, so we manually set the needsSaving flag already here.
|
|
- chunk.setUnsaved(true);
|
|
- chunk.unloadCallback();
|
|
- });
|
|
- }
|
|
- }).exceptionally((throwable) -> {
|
|
- // ensure exceptions are printed, by default this is not the case
|
|
- MinecraftServer.LOGGER.error("Failed to schedule unload callback for chunk " + ChunkHolder.this.pos, throwable);
|
|
- return null;
|
|
- });
|
|
-
|
|
- // Run callback right away if the future was already done
|
|
- chunkStorage.callbackExecutor.run();
|
|
- }
|
|
- // CraftBukkit end
|
|
-
|
|
- if (flag) {
|
|
- Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either = Either.right(new ChunkHolder.ChunkLoadingFailure() {
|
|
- public String toString() {
|
|
- return "Unloaded ticket level " + ChunkHolder.this.pos;
|
|
- }
|
|
- });
|
|
-
|
|
- for (int i = flag1 ? chunkstatus1.getIndex() + 1 : 0; i <= chunkstatus.getIndex(); ++i) {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = (CompletableFuture) this.futures.get(i);
|
|
-
|
|
- if (completablefuture == null) {
|
|
- this.futures.set(i, CompletableFuture.completedFuture(either));
|
|
- }
|
|
- }
|
|
- }
|
|
-
|
|
- boolean flag2 = playerchunk_state.isOrAfter(ChunkHolder.FullChunkStatus.BORDER);
|
|
- boolean flag3 = playerchunk_state1.isOrAfter(ChunkHolder.FullChunkStatus.BORDER);
|
|
-
|
|
- this.wasAccessibleSinceLastSave |= flag3;
|
|
- if (!flag2 && flag3) {
|
|
- int expectCreateCount = ++this.fullChunkCreateCount; // Paper
|
|
- this.fullChunkFuture = chunkStorage.prepareAccessibleChunk(this);
|
|
- this.scheduleFullChunkPromotion(chunkStorage, this.fullChunkFuture, executor, ChunkHolder.FullChunkStatus.BORDER);
|
|
- // Paper start - cache ticking ready status
|
|
- this.fullChunkFuture.thenAccept(either -> {
|
|
- final Optional<LevelChunk> left = either.left();
|
|
- if (left.isPresent() && ChunkHolder.this.fullChunkCreateCount == expectCreateCount) {
|
|
- LevelChunk fullChunk = either.left().get();
|
|
- ChunkHolder.this.isFullChunkReady = true;
|
|
- io.papermc.paper.chunk.system.ChunkSystem.onChunkBorder(fullChunk, this);
|
|
- }
|
|
- });
|
|
- this.updateChunkToSave(this.fullChunkFuture, "full");
|
|
- }
|
|
-
|
|
- if (flag2 && !flag3) {
|
|
- // Paper start
|
|
- if (this.isFullChunkReady) {
|
|
- io.papermc.paper.chunk.system.ChunkSystem.onChunkNotBorder(this.fullChunkFuture.join().left().get(), this); // Paper
|
|
- }
|
|
- // Paper end
|
|
- this.fullChunkFuture.complete(ChunkHolder.UNLOADED_LEVEL_CHUNK);
|
|
- this.fullChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE;
|
|
- ++this.fullChunkCreateCount; // Paper - cache ticking ready status
|
|
- this.isFullChunkReady = false; // Paper - cache ticking ready status
|
|
- }
|
|
-
|
|
- boolean flag4 = playerchunk_state.isOrAfter(ChunkHolder.FullChunkStatus.TICKING);
|
|
- boolean flag5 = playerchunk_state1.isOrAfter(ChunkHolder.FullChunkStatus.TICKING);
|
|
-
|
|
- if (!flag4 && flag5) {
|
|
- this.tickingChunkFuture = chunkStorage.prepareTickingChunk(this);
|
|
- this.scheduleFullChunkPromotion(chunkStorage, this.tickingChunkFuture, executor, ChunkHolder.FullChunkStatus.TICKING);
|
|
- // Paper start - cache ticking ready status
|
|
- this.tickingChunkFuture.thenAccept(either -> {
|
|
- either.ifLeft(chunk -> {
|
|
- // note: Here is a very good place to add callbacks to logic waiting on this.
|
|
- ChunkHolder.this.isTickingReady = true;
|
|
- io.papermc.paper.chunk.system.ChunkSystem.onChunkTicking(chunk, this);
|
|
- });
|
|
- });
|
|
- // Paper end
|
|
- this.updateChunkToSave(this.tickingChunkFuture, "ticking");
|
|
- }
|
|
-
|
|
- if (flag4 && !flag5) {
|
|
- // Paper start
|
|
- if (this.isTickingReady) {
|
|
- io.papermc.paper.chunk.system.ChunkSystem.onChunkNotTicking(this.tickingChunkFuture.join().left().get(), this); // Paper
|
|
- }
|
|
- // Paper end
|
|
- this.tickingChunkFuture.complete(ChunkHolder.UNLOADED_LEVEL_CHUNK); this.isTickingReady = false; // Paper - cache chunk ticking stage
|
|
- this.tickingChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE;
|
|
- }
|
|
-
|
|
- boolean flag6 = playerchunk_state.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING);
|
|
- boolean flag7 = playerchunk_state1.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING);
|
|
-
|
|
- if (!flag6 && flag7) {
|
|
- if (this.entityTickingChunkFuture != ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE) {
|
|
- throw (IllegalStateException) Util.pauseInIde(new IllegalStateException());
|
|
- }
|
|
-
|
|
- this.entityTickingChunkFuture = chunkStorage.prepareEntityTickingChunk(this.pos);
|
|
- this.scheduleFullChunkPromotion(chunkStorage, this.entityTickingChunkFuture, executor, ChunkHolder.FullChunkStatus.ENTITY_TICKING);
|
|
- // Paper start - cache ticking ready status
|
|
- this.entityTickingChunkFuture.thenAccept(either -> {
|
|
- either.ifLeft(chunk -> {
|
|
- ChunkHolder.this.isEntityTickingReady = true;
|
|
- io.papermc.paper.chunk.system.ChunkSystem.onChunkEntityTicking(chunk, this);
|
|
- });
|
|
- });
|
|
- // Paper end
|
|
- this.updateChunkToSave(this.entityTickingChunkFuture, "entity ticking");
|
|
- }
|
|
-
|
|
- if (flag6 && !flag7) {
|
|
- // Paper start
|
|
- if (this.isEntityTickingReady) {
|
|
- io.papermc.paper.chunk.system.ChunkSystem.onChunkNotEntityTicking(this.entityTickingChunkFuture.join().left().get(), this);
|
|
- }
|
|
- // Paper end
|
|
- this.entityTickingChunkFuture.complete(ChunkHolder.UNLOADED_LEVEL_CHUNK); this.isEntityTickingReady = false; // Paper - cache chunk ticking stage
|
|
- this.entityTickingChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE;
|
|
- }
|
|
-
|
|
- if (!playerchunk_state1.isOrAfter(playerchunk_state)) {
|
|
- this.demoteFullChunk(chunkStorage, playerchunk_state1);
|
|
- }
|
|
-
|
|
- this.onLevelChange.onLevelChange(this.pos, this::getQueueLevel, this.ticketLevel, this::setQueueLevel);
|
|
- this.oldTicketLevel = this.ticketLevel;
|
|
- // CraftBukkit start
|
|
- // ChunkLoadEvent: Called after the chunk is loaded: isChunkLoaded returns true and chunk is ready to be modified by plugins.
|
|
- if (!playerchunk_state.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && playerchunk_state1.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
|
|
- this.getFutureIfPresentUnchecked(ChunkStatus.FULL).thenAccept((either) -> {
|
|
- LevelChunk chunk = (LevelChunk)either.left().orElse(null);
|
|
- if (chunk != null) {
|
|
- chunkStorage.callbackExecutor.execute(() -> {
|
|
- chunk.loadCallback();
|
|
- });
|
|
- }
|
|
- }).exceptionally((throwable) -> {
|
|
- // ensure exceptions are printed, by default this is not the case
|
|
- MinecraftServer.LOGGER.error("Failed to schedule load callback for chunk " + ChunkHolder.this.pos, throwable);
|
|
- return null;
|
|
- });
|
|
-
|
|
- // Run callback right away if the future was already done
|
|
- chunkStorage.callbackExecutor.run();
|
|
- }
|
|
- // CraftBukkit end
|
|
- }
|
|
+ // Paper - rewrite chunk system
|
|
|
|
public static ChunkStatus getStatus(int level) {
|
|
return level < 33 ? ChunkStatus.FULL : ChunkStatus.getStatusAroundFullChunk(level - 33);
|
|
@@ -617,38 +351,14 @@ public class ChunkHolder {
|
|
return ChunkHolder.FULL_CHUNK_STATUSES[Mth.clamp(33 - distance + 1, (int) 0, ChunkHolder.FULL_CHUNK_STATUSES.length - 1)];
|
|
}
|
|
|
|
- public boolean wasAccessibleSinceLastSave() {
|
|
- return this.wasAccessibleSinceLastSave;
|
|
- }
|
|
-
|
|
- public void refreshAccessibility() {
|
|
- this.wasAccessibleSinceLastSave = ChunkHolder.getFullChunkStatus(this.ticketLevel).isOrAfter(ChunkHolder.FullChunkStatus.BORDER);
|
|
- }
|
|
+ // Paper - rewrite chunk system
|
|
|
|
public void replaceProtoChunk(ImposterProtoChunk chunk) {
|
|
- for (int i = 0; i < this.futures.length(); ++i) {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = (CompletableFuture) this.futures.get(i);
|
|
-
|
|
- if (completablefuture != null) {
|
|
- Optional<ChunkAccess> optional = ((Either) completablefuture.getNow(ChunkHolder.UNLOADED_CHUNK)).left();
|
|
-
|
|
- if (!optional.isEmpty() && optional.get() instanceof ProtoChunk) {
|
|
- this.futures.set(i, CompletableFuture.completedFuture(Either.left(chunk)));
|
|
- }
|
|
- }
|
|
- }
|
|
-
|
|
- this.updateChunkToSave(CompletableFuture.completedFuture(Either.left(chunk.getWrapped())), "replaceProto");
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public List<Pair<ChunkStatus, CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>>>> getAllFutures() {
|
|
- List<Pair<ChunkStatus, CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>>>> list = new ArrayList();
|
|
-
|
|
- for (int i = 0; i < ChunkHolder.CHUNK_STATUSES.size(); ++i) {
|
|
- list.add(Pair.of((ChunkStatus) ChunkHolder.CHUNK_STATUSES.get(i), (CompletableFuture) this.futures.get(i)));
|
|
- }
|
|
-
|
|
- return list;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@FunctionalInterface
|
|
@@ -697,15 +407,15 @@ public class ChunkHolder {
|
|
|
|
// Paper start
|
|
public final boolean isEntityTickingReady() {
|
|
- return this.isEntityTickingReady;
|
|
+ return this.newChunkHolder.isEntityTickingReady(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public final boolean isTickingReady() {
|
|
- return this.isTickingReady;
|
|
+ return this.newChunkHolder.isTickingReady(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public final boolean isFullChunkReady() {
|
|
- return this.isFullChunkReady;
|
|
+ return this.newChunkHolder.isFullChunkReady(); // Paper - rewrite chunk system
|
|
}
|
|
// Paper end
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java
|
|
index a07c413f1ee1a1689ca8ca87137cf4992d85c7aa..3c5b6231dc6f3dcc275e032d03989ed638fb03a9 100644
|
|
--- a/src/main/java/net/minecraft/server/level/ChunkMap.java
|
|
+++ b/src/main/java/net/minecraft/server/level/ChunkMap.java
|
|
@@ -125,10 +125,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
public static final int MAX_VIEW_DISTANCE = 33;
|
|
public static final int MAX_CHUNK_DISTANCE = 33 + ChunkStatus.maxDistance();
|
|
public static final int FORCED_TICKET_LEVEL = 31;
|
|
- public final Long2ObjectLinkedOpenHashMap<ChunkHolder> updatingChunkMap = new Long2ObjectLinkedOpenHashMap();
|
|
- public volatile Long2ObjectLinkedOpenHashMap<ChunkHolder> visibleChunkMap;
|
|
- private final Long2ObjectLinkedOpenHashMap<ChunkHolder> pendingUnloads;
|
|
- private final LongSet entitiesInLevel;
|
|
+ // Paper - rewrite chunk system
|
|
public final ServerLevel level;
|
|
private final ThreadedLevelLightEngine lightEngine;
|
|
public final BlockableEventLoop<Runnable> mainThreadExecutor; // Paper - public
|
|
@@ -137,16 +134,14 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
private final ChunkGeneratorStructureState chunkGeneratorState;
|
|
public final Supplier<DimensionDataStorage> overworldDataStorage;
|
|
private final PoiManager poiManager;
|
|
- public final LongSet toDrop;
|
|
+ // Paper - rewrite chunk system
|
|
private boolean modified;
|
|
- private final ChunkTaskPriorityQueueSorter queueSorter;
|
|
- private final ProcessorHandle<ChunkTaskPriorityQueueSorter.Message<Runnable>> worldgenMailbox;
|
|
- private final ProcessorHandle<ChunkTaskPriorityQueueSorter.Message<Runnable>> mainThreadMailbox;
|
|
+ // Paper - rewrite chunk system
|
|
public final ChunkProgressListener progressListener;
|
|
private final ChunkStatusUpdateListener chunkStatusListener;
|
|
public final ChunkMap.ChunkDistanceManager distanceManager;
|
|
private final AtomicInteger tickingGenerated;
|
|
- private final StructureTemplateManager structureTemplateManager;
|
|
+ public final StructureTemplateManager structureTemplateManager; // Paper - rewrite chunk system
|
|
private final String storageName;
|
|
private final PlayerMap playerMap;
|
|
public final Int2ObjectMap<ChunkMap.TrackedEntity> entityMap;
|
|
@@ -155,37 +150,21 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
private final Queue<Runnable> unloadQueue;
|
|
int viewDistance;
|
|
|
|
- // CraftBukkit start - recursion-safe executor for Chunk loadCallback() and unloadCallback()
|
|
- public final CallbackExecutor callbackExecutor = new CallbackExecutor();
|
|
- public static final class CallbackExecutor implements java.util.concurrent.Executor, Runnable {
|
|
-
|
|
- private final java.util.Queue<Runnable> queue = new java.util.ArrayDeque<>();
|
|
-
|
|
- @Override
|
|
- public void execute(Runnable runnable) {
|
|
- this.queue.add(runnable);
|
|
- }
|
|
-
|
|
- @Override
|
|
- public void run() {
|
|
- Runnable task;
|
|
- while ((task = this.queue.poll()) != null) {
|
|
- task.run();
|
|
- }
|
|
- }
|
|
- };
|
|
- // CraftBukkit end
|
|
+ // Paper - rewrite chunk system
|
|
|
|
// Paper start - distance maps
|
|
private final com.destroystokyo.paper.util.misc.PooledLinkedHashSets<ServerPlayer> pooledLinkedPlayerHashSets = new com.destroystokyo.paper.util.misc.PooledLinkedHashSets<>();
|
|
+ public final io.papermc.paper.chunk.PlayerChunkLoader playerChunkManager = new io.papermc.paper.chunk.PlayerChunkLoader(this, this.pooledLinkedPlayerHashSets); // Paper - replace chunk loader
|
|
|
|
void addPlayerToDistanceMaps(ServerPlayer player) {
|
|
+ this.playerChunkManager.addPlayer(player); // Paper - replace chunk loader
|
|
int chunkX = MCUtil.getChunkCoordinate(player.getX());
|
|
int chunkZ = MCUtil.getChunkCoordinate(player.getZ());
|
|
// Note: players need to be explicitly added to distance maps before they can be updated
|
|
}
|
|
|
|
void removePlayerFromDistanceMaps(ServerPlayer player) {
|
|
+ this.playerChunkManager.removePlayer(player); // Paper - replace chunk loader
|
|
|
|
}
|
|
|
|
@@ -193,6 +172,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
int chunkX = MCUtil.getChunkCoordinate(player.getX());
|
|
int chunkZ = MCUtil.getChunkCoordinate(player.getZ());
|
|
// Note: players need to be explicitly added to distance maps before they can be updated
|
|
+ this.playerChunkManager.updatePlayer(player); // Paper - replace chunk loader
|
|
}
|
|
// Paper end
|
|
// Paper start
|
|
@@ -222,16 +202,13 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
|
|
public final ChunkHolder getUnloadingChunkHolder(int chunkX, int chunkZ) {
|
|
- return this.pendingUnloads.get(io.papermc.paper.util.CoordinateUtils.getChunkKey(chunkX, chunkZ));
|
|
+ return null; // Paper - rewrite chunk system
|
|
}
|
|
// Paper end
|
|
|
|
public ChunkMap(ServerLevel world, LevelStorageSource.LevelStorageAccess session, DataFixer dataFixer, StructureTemplateManager structureTemplateManager, Executor executor, BlockableEventLoop<Runnable> mainThreadExecutor, LightChunkGetter chunkProvider, ChunkGenerator chunkGenerator, ChunkProgressListener worldGenerationProgressListener, ChunkStatusUpdateListener chunkStatusChangeListener, Supplier<DimensionDataStorage> persistentStateManagerFactory, int viewDistance, boolean dsync) {
|
|
super(session.getDimensionPath(world.dimension()).resolve("region"), dataFixer, dsync);
|
|
- this.visibleChunkMap = this.updatingChunkMap.clone();
|
|
- this.pendingUnloads = new Long2ObjectLinkedOpenHashMap();
|
|
- this.entitiesInLevel = new LongOpenHashSet();
|
|
- this.toDrop = new LongOpenHashSet();
|
|
+ // Paper - rewrite chunk system
|
|
this.tickingGenerated = new AtomicInteger();
|
|
this.playerMap = new PlayerMap();
|
|
this.entityMap = new Int2ObjectOpenHashMap();
|
|
@@ -262,19 +239,17 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
|
|
this.chunkGeneratorState = chunkGenerator.createState(iregistrycustom.lookupOrThrow(Registries.STRUCTURE_SET), this.randomState, j, world.spigotConfig); // Spigot
|
|
this.mainThreadExecutor = mainThreadExecutor;
|
|
- ProcessorMailbox<Runnable> threadedmailbox = ProcessorMailbox.create(executor, "worldgen");
|
|
+ // Paper - rewrite chunk system
|
|
|
|
Objects.requireNonNull(mainThreadExecutor);
|
|
- ProcessorHandle<Runnable> mailbox = ProcessorHandle.of("main", mainThreadExecutor::tell);
|
|
+ // Paper - rewrite chunk system
|
|
|
|
this.progressListener = worldGenerationProgressListener;
|
|
this.chunkStatusListener = chunkStatusChangeListener;
|
|
- ProcessorMailbox<Runnable> threadedmailbox1 = ProcessorMailbox.create(executor, "light");
|
|
+ // Paper - rewrite chunk system
|
|
|
|
- this.queueSorter = new ChunkTaskPriorityQueueSorter(ImmutableList.of(threadedmailbox, mailbox, threadedmailbox1), executor, Integer.MAX_VALUE);
|
|
- this.worldgenMailbox = this.queueSorter.getProcessor(threadedmailbox, false);
|
|
- this.mainThreadMailbox = this.queueSorter.getProcessor(mailbox, false);
|
|
- this.lightEngine = new ThreadedLevelLightEngine(chunkProvider, this, this.level.dimensionType().hasSkyLight(), threadedmailbox1, this.queueSorter.getProcessor(threadedmailbox1, false));
|
|
+ // Paper - rewrite chunk system
|
|
+ this.lightEngine = new ThreadedLevelLightEngine(chunkProvider, this, this.level.dimensionType().hasSkyLight(), null, null); // Paper - rewrite chunk system
|
|
this.distanceManager = new ChunkMap.ChunkDistanceManager(executor, mainThreadExecutor);
|
|
this.overworldDataStorage = persistentStateManagerFactory;
|
|
this.poiManager = new PoiManager(path.resolve("poi"), dataFixer, dsync, iregistrycustom, world);
|
|
@@ -339,20 +314,22 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
|
|
@Nullable
|
|
protected ChunkHolder getUpdatingChunkIfPresent(long pos) {
|
|
- return (ChunkHolder) this.updatingChunkMap.get(pos);
|
|
+ // Paper start - rewrite chunk system
|
|
+ io.papermc.paper.chunk.system.scheduling.NewChunkHolder holder = this.level.chunkTaskScheduler.chunkHolderManager.getChunkHolder(pos);
|
|
+ return holder == null ? null : holder.vanillaChunkHolder;
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
@Nullable
|
|
public ChunkHolder getVisibleChunkIfPresent(long pos) {
|
|
- return (ChunkHolder) this.visibleChunkMap.get(pos);
|
|
+ // Paper start - rewrite chunk system
|
|
+ io.papermc.paper.chunk.system.scheduling.NewChunkHolder holder = this.level.chunkTaskScheduler.chunkHolderManager.getChunkHolder(pos);
|
|
+ return holder == null ? null : holder.vanillaChunkHolder;
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
protected IntSupplier getChunkQueueLevel(long pos) {
|
|
- return () -> {
|
|
- ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos);
|
|
-
|
|
- return playerchunk == null ? ChunkTaskPriorityQueue.PRIORITY_LEVEL_COUNT - 1 : Math.min(playerchunk.getQueueLevel(), ChunkTaskPriorityQueue.PRIORITY_LEVEL_COUNT - 1);
|
|
- };
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public String getChunkDebugData(ChunkPos chunkPos) {
|
|
@@ -389,75 +366,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
// Paper end
|
|
|
|
private CompletableFuture<Either<List<ChunkAccess>, ChunkHolder.ChunkLoadingFailure>> getChunkRangeFuture(ChunkPos centerChunk, int margin, IntFunction<ChunkStatus> distanceToStatus) {
|
|
- List<CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>>> list = new ArrayList();
|
|
- List<ChunkHolder> list1 = new ArrayList();
|
|
- int j = centerChunk.x;
|
|
- int k = centerChunk.z;
|
|
-
|
|
- for (int l = -margin; l <= margin; ++l) {
|
|
- for (int i1 = -margin; i1 <= margin; ++i1) {
|
|
- int j1 = Math.max(Math.abs(i1), Math.abs(l));
|
|
- final ChunkPos chunkcoordintpair1 = new ChunkPos(j + i1, k + l);
|
|
- long k1 = chunkcoordintpair1.toLong();
|
|
- ChunkHolder playerchunk = this.getUpdatingChunkIfPresent(k1);
|
|
-
|
|
- if (playerchunk == null) {
|
|
- return CompletableFuture.completedFuture(Either.right(new ChunkHolder.ChunkLoadingFailure() {
|
|
- public String toString() {
|
|
- return "Unloaded " + chunkcoordintpair1;
|
|
- }
|
|
- }));
|
|
- }
|
|
-
|
|
- ChunkStatus chunkstatus = (ChunkStatus) distanceToStatus.apply(j1);
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = playerchunk.getOrScheduleFuture(chunkstatus, this);
|
|
-
|
|
- list1.add(playerchunk);
|
|
- list.add(completablefuture);
|
|
- }
|
|
- }
|
|
-
|
|
- CompletableFuture<List<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>>> completablefuture1 = Util.sequence(list);
|
|
- CompletableFuture<Either<List<ChunkAccess>, ChunkHolder.ChunkLoadingFailure>> completablefuture2 = completablefuture1.thenApply((list2) -> {
|
|
- List<ChunkAccess> list3 = Lists.newArrayList();
|
|
- // CraftBukkit start - decompile error
|
|
- int cnt = 0;
|
|
-
|
|
- for (Iterator iterator = list2.iterator(); iterator.hasNext(); ++cnt) {
|
|
- final int l1 = cnt;
|
|
- // CraftBukkit end
|
|
- final Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure> either = (Either) iterator.next();
|
|
-
|
|
- if (either == null) {
|
|
- throw this.debugFuturesAndCreateReportedException(new IllegalStateException("At least one of the chunk futures were null"), "n/a");
|
|
- }
|
|
-
|
|
- Optional<ChunkAccess> optional = either.left();
|
|
-
|
|
- if (!optional.isPresent()) {
|
|
- return Either.right(new ChunkHolder.ChunkLoadingFailure() {
|
|
- public String toString() {
|
|
- ChunkPos chunkcoordintpair2 = new ChunkPos(j + l1 % (margin * 2 + 1), k + l1 / (margin * 2 + 1));
|
|
-
|
|
- return "Unloaded " + chunkcoordintpair2 + " " + either.right().get();
|
|
- }
|
|
- });
|
|
- }
|
|
-
|
|
- list3.add((ChunkAccess) optional.get());
|
|
- }
|
|
-
|
|
- return Either.left(list3);
|
|
- });
|
|
- Iterator iterator = list1.iterator();
|
|
-
|
|
- while (iterator.hasNext()) {
|
|
- ChunkHolder playerchunk1 = (ChunkHolder) iterator.next();
|
|
-
|
|
- playerchunk1.addSaveDependency("getChunkRangeFuture " + centerChunk + " " + margin, completablefuture2);
|
|
- }
|
|
-
|
|
- return completablefuture2;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public ReportedException debugFuturesAndCreateReportedException(IllegalStateException exception, String details) {
|
|
@@ -487,261 +396,72 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
|
|
public CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> prepareEntityTickingChunk(ChunkPos pos) {
|
|
- return this.getChunkRangeFuture(pos, 2, (i) -> {
|
|
- return ChunkStatus.FULL;
|
|
- }).thenApplyAsync((either) -> {
|
|
- return either.mapLeft((list) -> {
|
|
- return (LevelChunk) list.get(list.size() / 2);
|
|
- });
|
|
- }, this.mainThreadExecutor);
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Nullable
|
|
ChunkHolder updateChunkScheduling(long pos, int level, @Nullable ChunkHolder holder, int k) {
|
|
- if (k > ChunkMap.MAX_CHUNK_DISTANCE && level > ChunkMap.MAX_CHUNK_DISTANCE) {
|
|
- return holder;
|
|
- } else {
|
|
- if (holder != null) {
|
|
- holder.setTicketLevel(level);
|
|
- }
|
|
-
|
|
- if (holder != null) {
|
|
- if (level > ChunkMap.MAX_CHUNK_DISTANCE) {
|
|
- this.toDrop.add(pos);
|
|
- } else {
|
|
- this.toDrop.remove(pos);
|
|
- }
|
|
- }
|
|
-
|
|
- if (level <= ChunkMap.MAX_CHUNK_DISTANCE && holder == null) {
|
|
- holder = (ChunkHolder) this.pendingUnloads.remove(pos);
|
|
- if (holder != null) {
|
|
- holder.setTicketLevel(level);
|
|
- } else {
|
|
- holder = new ChunkHolder(new ChunkPos(pos), level, this.level, this.lightEngine, this.queueSorter, this);
|
|
- // Paper start
|
|
- io.papermc.paper.chunk.system.ChunkSystem.onChunkHolderCreate(this.level, holder);
|
|
- // Paper end
|
|
- }
|
|
-
|
|
- // Paper start
|
|
- holder.onChunkAdd();
|
|
- // Paper end
|
|
- this.updatingChunkMap.put(pos, holder);
|
|
- this.modified = true;
|
|
- }
|
|
-
|
|
- return holder;
|
|
- }
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
public void close() throws IOException {
|
|
- try {
|
|
- this.queueSorter.close();
|
|
- this.poiManager.close();
|
|
- } finally {
|
|
- super.close();
|
|
- }
|
|
+ throw new UnsupportedOperationException("Use ServerChunkCache#close"); // Paper - rewrite chunk system
|
|
+ }
|
|
|
|
+ // Paper start - rewrite chunk system
|
|
+ protected void saveIncrementally() {
|
|
+ this.level.chunkTaskScheduler.chunkHolderManager.autoSave(); // Paper - rewrite chunk system
|
|
}
|
|
+ // Paper end - - rewrite chunk system
|
|
|
|
protected void saveAllChunks(boolean flush) {
|
|
- if (flush) {
|
|
- List<ChunkHolder> list = (List) io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(this.level).stream().filter(ChunkHolder::wasAccessibleSinceLastSave).peek(ChunkHolder::refreshAccessibility).collect(Collectors.toList()); // Paper
|
|
- MutableBoolean mutableboolean = new MutableBoolean();
|
|
-
|
|
- do {
|
|
- mutableboolean.setFalse();
|
|
- list.stream().map((playerchunk) -> {
|
|
- CompletableFuture completablefuture;
|
|
-
|
|
- do {
|
|
- completablefuture = playerchunk.getChunkToSave();
|
|
- BlockableEventLoop iasynctaskhandler = this.mainThreadExecutor;
|
|
-
|
|
- Objects.requireNonNull(completablefuture);
|
|
- iasynctaskhandler.managedBlock(completablefuture::isDone);
|
|
- } while (completablefuture != playerchunk.getChunkToSave());
|
|
-
|
|
- return (ChunkAccess) completablefuture.join();
|
|
- }).filter((ichunkaccess) -> {
|
|
- return ichunkaccess instanceof ImposterProtoChunk || ichunkaccess instanceof LevelChunk;
|
|
- }).filter(this::save).forEach((ichunkaccess) -> {
|
|
- mutableboolean.setTrue();
|
|
- });
|
|
- } while (mutableboolean.isTrue());
|
|
-
|
|
- this.processUnloads(() -> {
|
|
- return true;
|
|
- });
|
|
- this.flushWorker();
|
|
- } else {
|
|
- io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(this.level).forEach(this::saveChunkIfNeeded);
|
|
- }
|
|
-
|
|
+ this.level.chunkTaskScheduler.chunkHolderManager.saveAllChunks(flush, false, false); // Paper - rewrite chunk system
|
|
}
|
|
|
|
protected void tick(BooleanSupplier shouldKeepTicking) {
|
|
ProfilerFiller gameprofilerfiller = this.level.getProfiler();
|
|
|
|
+ try (Timing ignored = this.level.timings.poiUnload.startTiming()) { // Paper
|
|
gameprofilerfiller.push("poi");
|
|
this.poiManager.tick(shouldKeepTicking);
|
|
+ } // Paper
|
|
gameprofilerfiller.popPush("chunk_unload");
|
|
if (!this.level.noSave()) {
|
|
+ try (Timing ignored = this.level.timings.chunkUnload.startTiming()) { // Paper
|
|
this.processUnloads(shouldKeepTicking);
|
|
+ } // Paper
|
|
}
|
|
|
|
gameprofilerfiller.pop();
|
|
}
|
|
|
|
public boolean hasWork() {
|
|
- return this.lightEngine.hasLightWork() || !this.pendingUnloads.isEmpty() || io.papermc.paper.chunk.system.ChunkSystem.hasAnyChunkHolders(this.level) || this.poiManager.hasWork() || !this.toDrop.isEmpty() || !this.unloadQueue.isEmpty() || this.queueSorter.hasWork() || this.distanceManager.hasTickets(); // Paper
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
private void processUnloads(BooleanSupplier shouldKeepTicking) {
|
|
- LongIterator longiterator = this.toDrop.iterator();
|
|
-
|
|
- for (int i = 0; longiterator.hasNext() && (shouldKeepTicking.getAsBoolean() || i < 200 || this.toDrop.size() > 2000); longiterator.remove()) {
|
|
- long j = longiterator.nextLong();
|
|
- ChunkHolder playerchunk = (ChunkHolder) this.updatingChunkMap.remove(j);
|
|
-
|
|
- if (playerchunk != null) {
|
|
- playerchunk.onChunkRemove(); // Paper
|
|
- this.pendingUnloads.put(j, playerchunk);
|
|
- this.modified = true;
|
|
- ++i;
|
|
- this.scheduleUnload(j, playerchunk);
|
|
- }
|
|
- }
|
|
-
|
|
- int k = Math.max(0, this.unloadQueue.size() - 2000);
|
|
-
|
|
- Runnable runnable;
|
|
-
|
|
- while ((shouldKeepTicking.getAsBoolean() || k > 0) && (runnable = (Runnable) this.unloadQueue.poll()) != null) {
|
|
- --k;
|
|
- runnable.run();
|
|
- }
|
|
-
|
|
- int l = 0;
|
|
- Iterator objectiterator = io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(this.level).iterator(); // Paper
|
|
-
|
|
- while (l < 20 && shouldKeepTicking.getAsBoolean() && objectiterator.hasNext()) {
|
|
- if (this.saveChunkIfNeeded((ChunkHolder) objectiterator.next())) {
|
|
- ++l;
|
|
- }
|
|
- }
|
|
+ this.level.chunkTaskScheduler.chunkHolderManager.processUnloads(); // Paper - rewrite chunk system
|
|
|
|
}
|
|
|
|
private void scheduleUnload(long pos, ChunkHolder holder) {
|
|
- CompletableFuture<ChunkAccess> completablefuture = holder.getChunkToSave();
|
|
- Consumer<ChunkAccess> consumer = (ichunkaccess) -> { // CraftBukkit - decompile error
|
|
- CompletableFuture<ChunkAccess> completablefuture1 = holder.getChunkToSave();
|
|
-
|
|
- if (completablefuture1 != completablefuture) {
|
|
- this.scheduleUnload(pos, holder);
|
|
- } else {
|
|
- // Paper start
|
|
- boolean removed;
|
|
- if ((removed = this.pendingUnloads.remove(pos, holder)) && ichunkaccess != null) {
|
|
- io.papermc.paper.chunk.system.ChunkSystem.onChunkHolderDelete(this.level, holder);
|
|
- // Paper end
|
|
- if (ichunkaccess instanceof LevelChunk) {
|
|
- ((LevelChunk) ichunkaccess).setLoaded(false);
|
|
- }
|
|
-
|
|
- this.save(ichunkaccess);
|
|
- if (this.entitiesInLevel.remove(pos) && ichunkaccess instanceof LevelChunk) {
|
|
- LevelChunk chunk = (LevelChunk) ichunkaccess;
|
|
-
|
|
- this.level.unload(chunk);
|
|
- }
|
|
-
|
|
- this.lightEngine.updateChunkStatus(ichunkaccess.getPos());
|
|
- this.lightEngine.tryScheduleUpdate();
|
|
- this.progressListener.onStatusChange(ichunkaccess.getPos(), (ChunkStatus) null);
|
|
- this.chunkSaveCooldowns.remove(ichunkaccess.getPos().toLong());
|
|
- } else if (removed) { // Paper start
|
|
- io.papermc.paper.chunk.system.ChunkSystem.onChunkHolderDelete(this.level, holder);
|
|
- } // Paper end
|
|
-
|
|
- }
|
|
- };
|
|
- Queue queue = this.unloadQueue;
|
|
-
|
|
- Objects.requireNonNull(this.unloadQueue);
|
|
- completablefuture.thenAcceptAsync(consumer, queue::add).whenComplete((ovoid, throwable) -> {
|
|
- if (throwable != null) {
|
|
- ChunkMap.LOGGER.error("Failed to save chunk {}", holder.getPos(), throwable);
|
|
- }
|
|
-
|
|
- });
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
protected boolean promoteChunkMap() {
|
|
- if (!this.modified) {
|
|
- return false;
|
|
- } else {
|
|
- this.visibleChunkMap = this.updatingChunkMap.clone();
|
|
- this.modified = false;
|
|
- return true;
|
|
- }
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> schedule(ChunkHolder holder, ChunkStatus requiredStatus) {
|
|
- ChunkPos chunkcoordintpair = holder.getPos();
|
|
-
|
|
- if (requiredStatus == ChunkStatus.EMPTY) {
|
|
- return this.scheduleChunkLoad(chunkcoordintpair);
|
|
- } else {
|
|
- if (requiredStatus == ChunkStatus.LIGHT) {
|
|
- this.distanceManager.addTicket(TicketType.LIGHT, chunkcoordintpair, 33 + ChunkStatus.getDistance(ChunkStatus.LIGHT), chunkcoordintpair);
|
|
- }
|
|
-
|
|
- Optional<ChunkAccess> optional = ((Either) holder.getOrScheduleFuture(requiredStatus.getParent(), this).getNow(ChunkHolder.UNLOADED_CHUNK)).left();
|
|
-
|
|
- if (optional.isPresent() && ((ChunkAccess) optional.get()).getStatus().isOrAfter(requiredStatus)) {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = requiredStatus.load(this.level, this.structureTemplateManager, this.lightEngine, (ichunkaccess) -> {
|
|
- return this.protoChunkToFullChunk(holder);
|
|
- }, (ChunkAccess) optional.get());
|
|
-
|
|
- this.progressListener.onStatusChange(chunkcoordintpair, requiredStatus);
|
|
- return completablefuture;
|
|
- } else {
|
|
- return this.scheduleChunkGeneration(holder, requiredStatus);
|
|
- }
|
|
- }
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
private CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> scheduleChunkLoad(ChunkPos pos) {
|
|
- return this.readChunk(pos).thenApply((optional) -> {
|
|
- return optional.filter((nbttagcompound) -> {
|
|
- boolean flag = ChunkMap.isChunkDataValid(nbttagcompound);
|
|
-
|
|
- if (!flag) {
|
|
- ChunkMap.LOGGER.error("Chunk file at {} is missing level data, skipping", pos);
|
|
- }
|
|
-
|
|
- return flag;
|
|
- });
|
|
- }).thenApplyAsync((optional) -> {
|
|
- this.level.getProfiler().incrementCounter("chunkLoad");
|
|
- if (optional.isPresent()) {
|
|
- ProtoChunk protochunk = ChunkSerializer.read(this.level, this.poiManager, pos, (CompoundTag) optional.get());
|
|
-
|
|
- this.markPosition(pos, protochunk.getStatus().getChunkType());
|
|
- return Either.<ChunkAccess, ChunkHolder.ChunkLoadingFailure>left(protochunk); // CraftBukkit - decompile error
|
|
- } else {
|
|
- return Either.<ChunkAccess, ChunkHolder.ChunkLoadingFailure>left(this.createEmptyChunk(pos)); // CraftBukkit - decompile error
|
|
- }
|
|
- }, this.mainThreadExecutor).exceptionallyAsync((throwable) -> {
|
|
- return this.handleChunkLoadFailure(throwable, pos);
|
|
- }, this.mainThreadExecutor);
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
- private static boolean isChunkDataValid(CompoundTag nbt) {
|
|
+ public static boolean isChunkDataValid(CompoundTag nbt) { // Paper - async chunk loading
|
|
return nbt.contains("Status", 8);
|
|
}
|
|
|
|
@@ -777,45 +497,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
|
|
private CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> scheduleChunkGeneration(ChunkHolder holder, ChunkStatus requiredStatus) {
|
|
- ChunkPos chunkcoordintpair = holder.getPos();
|
|
- CompletableFuture<Either<List<ChunkAccess>, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getChunkRangeFuture(chunkcoordintpair, requiredStatus.getRange(), (i) -> {
|
|
- return this.getDependencyStatus(requiredStatus, i);
|
|
- });
|
|
-
|
|
- this.level.getProfiler().incrementCounter(() -> {
|
|
- return "chunkGenerate " + requiredStatus.getName();
|
|
- });
|
|
- Executor executor = (runnable) -> {
|
|
- this.worldgenMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable));
|
|
- };
|
|
-
|
|
- return completablefuture.thenComposeAsync((either) -> {
|
|
- return (CompletionStage) either.map((list) -> {
|
|
- try {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture1 = requiredStatus.generate(executor, this.level, this.generator, this.structureTemplateManager, this.lightEngine, (ichunkaccess) -> {
|
|
- return this.protoChunkToFullChunk(holder);
|
|
- }, list, false);
|
|
-
|
|
- this.progressListener.onStatusChange(chunkcoordintpair, requiredStatus);
|
|
- return completablefuture1;
|
|
- } catch (Exception exception) {
|
|
- exception.getStackTrace();
|
|
- CrashReport crashreport = CrashReport.forThrowable(exception, "Exception generating new chunk");
|
|
- CrashReportCategory crashreportsystemdetails = crashreport.addCategory("Chunk to be generated");
|
|
-
|
|
- crashreportsystemdetails.setDetail("Location", (Object) String.format(Locale.ROOT, "%d,%d", chunkcoordintpair.x, chunkcoordintpair.z));
|
|
- crashreportsystemdetails.setDetail("Position hash", (Object) ChunkPos.asLong(chunkcoordintpair.x, chunkcoordintpair.z));
|
|
- crashreportsystemdetails.setDetail("Generator", (Object) this.generator);
|
|
- this.mainThreadExecutor.execute(() -> {
|
|
- throw new ReportedException(crashreport);
|
|
- });
|
|
- throw new ReportedException(crashreport);
|
|
- }
|
|
- }, (playerchunk_failure) -> {
|
|
- this.releaseLightTicket(chunkcoordintpair);
|
|
- return CompletableFuture.completedFuture(Either.right(playerchunk_failure));
|
|
- });
|
|
- }, executor);
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
protected void releaseLightTicket(ChunkPos pos) {
|
|
@@ -826,7 +508,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}));
|
|
}
|
|
|
|
- private ChunkStatus getDependencyStatus(ChunkStatus centerChunkTargetStatus, int distance) {
|
|
+ public static ChunkStatus getDependencyStatus(ChunkStatus centerChunkTargetStatus, int distance) { // Paper -> public, static
|
|
ChunkStatus chunkstatus1;
|
|
|
|
if (distance == 0) {
|
|
@@ -838,7 +520,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
return chunkstatus1;
|
|
}
|
|
|
|
- private static void postLoadProtoChunk(ServerLevel world, List<CompoundTag> nbt) {
|
|
+ public static void postLoadProtoChunk(ServerLevel world, List<CompoundTag> nbt) { // Paper - public
|
|
if (!nbt.isEmpty()) {
|
|
// CraftBukkit start - these are spawned serialized (DefinedStructure) and we don't call an add event below at the moment due to ordering complexities
|
|
world.addWorldGenChunkEntities(EntityType.loadEntitiesRecursive(nbt, world).filter((entity) -> {
|
|
@@ -860,91 +542,15 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
|
|
private CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> protoChunkToFullChunk(ChunkHolder chunkHolder) {
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = chunkHolder.getFutureIfPresentUnchecked(ChunkStatus.FULL.getParent());
|
|
-
|
|
- return completablefuture.thenApplyAsync((either) -> {
|
|
- ChunkStatus chunkstatus = ChunkHolder.getStatus(chunkHolder.getTicketLevel());
|
|
-
|
|
- return !chunkstatus.isOrAfter(ChunkStatus.FULL) ? ChunkHolder.UNLOADED_CHUNK : either.mapLeft((ichunkaccess) -> {
|
|
- try (Timing ignored = level.timings.chunkPostLoad.startTimingIfSync()) { // Paper
|
|
- ChunkPos chunkcoordintpair = chunkHolder.getPos();
|
|
- ProtoChunk protochunk = (ProtoChunk) ichunkaccess;
|
|
- LevelChunk chunk;
|
|
-
|
|
- if (protochunk instanceof ImposterProtoChunk) {
|
|
- chunk = ((ImposterProtoChunk) protochunk).getWrapped();
|
|
- } else {
|
|
- chunk = new LevelChunk(this.level, protochunk, (chunk1) -> {
|
|
- ChunkMap.postLoadProtoChunk(this.level, protochunk.getEntities());
|
|
- });
|
|
- chunkHolder.replaceProtoChunk(new ImposterProtoChunk(chunk, false));
|
|
- }
|
|
-
|
|
- chunk.setFullStatus(() -> {
|
|
- return ChunkHolder.getFullChunkStatus(chunkHolder.getTicketLevel());
|
|
- });
|
|
- chunk.runPostLoad();
|
|
- if (this.entitiesInLevel.add(chunkcoordintpair.toLong())) {
|
|
- chunk.setLoaded(true);
|
|
- chunk.registerAllBlockEntitiesAfterLevelLoad();
|
|
- chunk.registerTickContainerInLevel(this.level);
|
|
- }
|
|
-
|
|
- return chunk;
|
|
- } // Paper
|
|
- });
|
|
- }, (runnable) -> {
|
|
- ProcessorHandle mailbox = this.mainThreadMailbox;
|
|
- long i = chunkHolder.getPos().toLong();
|
|
-
|
|
- Objects.requireNonNull(chunkHolder);
|
|
- mailbox.tell(ChunkTaskPriorityQueueSorter.message(runnable, i, chunkHolder::getTicketLevel));
|
|
- });
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> prepareTickingChunk(ChunkHolder holder) {
|
|
- ChunkPos chunkcoordintpair = holder.getPos();
|
|
- CompletableFuture<Either<List<ChunkAccess>, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getChunkRangeFuture(chunkcoordintpair, 1, (i) -> {
|
|
- return ChunkStatus.FULL;
|
|
- });
|
|
- CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> completablefuture1 = completablefuture.thenApplyAsync((either) -> {
|
|
- return either.mapLeft((list) -> {
|
|
- return (LevelChunk) list.get(list.size() / 2);
|
|
- });
|
|
- }, (runnable) -> {
|
|
- this.mainThreadMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable));
|
|
- }).thenApplyAsync((either) -> {
|
|
- return either.ifLeft((chunk) -> {
|
|
- chunk.postProcessGeneration();
|
|
- this.level.startTickingChunk(chunk);
|
|
- });
|
|
- }, this.mainThreadExecutor);
|
|
-
|
|
- completablefuture1.thenAcceptAsync((either) -> {
|
|
- either.ifLeft((chunk) -> {
|
|
- this.tickingGenerated.getAndIncrement();
|
|
- MutableObject<ClientboundLevelChunkWithLightPacket> mutableobject = new MutableObject();
|
|
-
|
|
- this.getPlayers(chunkcoordintpair, false).forEach((entityplayer) -> {
|
|
- this.playerLoadedChunk(entityplayer, mutableobject, chunk);
|
|
- });
|
|
- });
|
|
- }, (runnable) -> {
|
|
- this.mainThreadMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable));
|
|
- });
|
|
- return completablefuture1;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> prepareAccessibleChunk(ChunkHolder holder) {
|
|
- return this.getChunkRangeFuture(holder.getPos(), 1, ChunkStatus::getStatusAroundFullChunk).thenApplyAsync((either) -> {
|
|
- return either.mapLeft((list) -> {
|
|
- LevelChunk chunk = (LevelChunk) list.get(list.size() / 2);
|
|
-
|
|
- return chunk;
|
|
- });
|
|
- }, (runnable) -> {
|
|
- this.mainThreadMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable));
|
|
- });
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public int getTickingGenerated() {
|
|
@@ -952,94 +558,22 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
|
|
private boolean saveChunkIfNeeded(ChunkHolder chunkHolder) {
|
|
- if (!chunkHolder.wasAccessibleSinceLastSave()) {
|
|
- return false;
|
|
- } else {
|
|
- ChunkAccess ichunkaccess = (ChunkAccess) chunkHolder.getChunkToSave().getNow(null); // CraftBukkit - decompile error
|
|
-
|
|
- if (!(ichunkaccess instanceof ImposterProtoChunk) && !(ichunkaccess instanceof LevelChunk)) {
|
|
- return false;
|
|
- } else {
|
|
- long i = ichunkaccess.getPos().toLong();
|
|
- long j = this.chunkSaveCooldowns.getOrDefault(i, -1L);
|
|
- long k = System.currentTimeMillis();
|
|
-
|
|
- if (k < j) {
|
|
- return false;
|
|
- } else {
|
|
- boolean flag = this.save(ichunkaccess);
|
|
-
|
|
- chunkHolder.refreshAccessibility();
|
|
- if (flag) {
|
|
- this.chunkSaveCooldowns.put(i, k + 10000L);
|
|
- }
|
|
-
|
|
- return flag;
|
|
- }
|
|
- }
|
|
- }
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public boolean save(ChunkAccess chunk) {
|
|
- this.poiManager.flush(chunk.getPos());
|
|
- if (!chunk.isUnsaved()) {
|
|
- return false;
|
|
- } else {
|
|
- chunk.setUnsaved(false);
|
|
- ChunkPos chunkcoordintpair = chunk.getPos();
|
|
-
|
|
- try {
|
|
- ChunkStatus chunkstatus = chunk.getStatus();
|
|
-
|
|
- if (chunkstatus.getChunkType() != ChunkStatus.ChunkType.LEVELCHUNK) {
|
|
- if (this.isExistingChunkFull(chunkcoordintpair)) {
|
|
- return false;
|
|
- }
|
|
-
|
|
- if (chunkstatus == ChunkStatus.EMPTY && chunk.getAllStarts().values().stream().noneMatch(StructureStart::isValid)) {
|
|
- return false;
|
|
- }
|
|
- }
|
|
-
|
|
- this.level.getProfiler().incrementCounter("chunkSave");
|
|
- CompoundTag nbttagcompound = ChunkSerializer.write(this.level, chunk);
|
|
-
|
|
- this.write(chunkcoordintpair, nbttagcompound);
|
|
- this.markPosition(chunkcoordintpair, chunkstatus.getChunkType());
|
|
- return true;
|
|
- } catch (Exception exception) {
|
|
- ChunkMap.LOGGER.error("Failed to save chunk {},{}", new Object[]{chunkcoordintpair.x, chunkcoordintpair.z, exception});
|
|
- return false;
|
|
- }
|
|
- }
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
private boolean isExistingChunkFull(ChunkPos pos) {
|
|
- byte b0 = this.chunkTypeCache.get(pos.toLong());
|
|
-
|
|
- if (b0 != 0) {
|
|
- return b0 == 1;
|
|
- } else {
|
|
- CompoundTag nbttagcompound;
|
|
-
|
|
- try {
|
|
- nbttagcompound = (CompoundTag) ((Optional) this.readChunk(pos).join()).orElse((Object) null);
|
|
- if (nbttagcompound == null) {
|
|
- this.markPositionReplaceable(pos);
|
|
- return false;
|
|
- }
|
|
- } catch (Exception exception) {
|
|
- ChunkMap.LOGGER.error("Failed to read chunk {}", pos, exception);
|
|
- this.markPositionReplaceable(pos);
|
|
- return false;
|
|
- }
|
|
-
|
|
- ChunkStatus.ChunkType chunkstatus_type = ChunkSerializer.getChunkTypeFromTag(nbttagcompound);
|
|
-
|
|
- return this.markPosition(pos, chunkstatus_type) == 1;
|
|
- }
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
+ // Paper start - replace player loader system
|
|
+ public void setTickViewDistance(int distance) {
|
|
+ this.playerChunkManager.setTickDistance(distance);
|
|
+ }
|
|
+ // Paper end - replace player loader system
|
|
public void setViewDistance(int watchDistance) {
|
|
int j = Mth.clamp(watchDistance + 1, (int) 3, (int) 33);
|
|
|
|
@@ -1047,33 +581,18 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
int k = this.viewDistance;
|
|
|
|
this.viewDistance = j;
|
|
- this.distanceManager.updatePlayerTickets(this.viewDistance + 1);
|
|
- Iterator objectiterator = io.papermc.paper.chunk.system.ChunkSystem.getUpdatingChunkHolders(this.level).iterator(); // Paper
|
|
-
|
|
- while (objectiterator.hasNext()) {
|
|
- ChunkHolder playerchunk = (ChunkHolder) objectiterator.next();
|
|
- ChunkPos chunkcoordintpair = playerchunk.getPos();
|
|
- MutableObject<ClientboundLevelChunkWithLightPacket> mutableobject = new MutableObject();
|
|
-
|
|
- this.getPlayers(chunkcoordintpair, false).forEach((entityplayer) -> {
|
|
- SectionPos sectionposition = entityplayer.getLastSectionPos();
|
|
- boolean flag = ChunkMap.isChunkInRange(chunkcoordintpair.x, chunkcoordintpair.z, sectionposition.x(), sectionposition.z(), k);
|
|
- boolean flag1 = ChunkMap.isChunkInRange(chunkcoordintpair.x, chunkcoordintpair.z, sectionposition.x(), sectionposition.z(), this.viewDistance);
|
|
-
|
|
- this.updateChunkTracking(entityplayer, chunkcoordintpair, mutableobject, flag, flag1);
|
|
- });
|
|
- }
|
|
+ this.playerChunkManager.setLoadDistance(this.viewDistance); // Paper - replace player loader system
|
|
}
|
|
|
|
}
|
|
|
|
- protected void updateChunkTracking(ServerPlayer player, ChunkPos pos, MutableObject<ClientboundLevelChunkWithLightPacket> packet, boolean oldWithinViewDistance, boolean newWithinViewDistance) {
|
|
+ public void updateChunkTracking(ServerPlayer player, ChunkPos pos, MutableObject<ClientboundLevelChunkWithLightPacket> packet, boolean oldWithinViewDistance, boolean newWithinViewDistance) { // Paper - public
|
|
if (player.level == this.level) {
|
|
if (newWithinViewDistance && !oldWithinViewDistance) {
|
|
ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos.toLong());
|
|
|
|
if (playerchunk != null) {
|
|
- LevelChunk chunk = playerchunk.getTickingChunk();
|
|
+ LevelChunk chunk = playerchunk.getSendingChunk(); // Paper - replace chunk loader system
|
|
|
|
if (chunk != null) {
|
|
this.playerLoadedChunk(player, packet, chunk);
|
|
@@ -1103,30 +622,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
|
|
void dumpChunks(Writer writer) throws IOException {
|
|
- CsvOutput csvwriter = CsvOutput.builder().addColumn("x").addColumn("z").addColumn("level").addColumn("in_memory").addColumn("status").addColumn("full_status").addColumn("accessible_ready").addColumn("ticking_ready").addColumn("entity_ticking_ready").addColumn("ticket").addColumn("spawning").addColumn("block_entity_count").addColumn("ticking_ticket").addColumn("ticking_level").addColumn("block_ticks").addColumn("fluid_ticks").build(writer);
|
|
- TickingTracker tickingtracker = this.distanceManager.tickingTracker();
|
|
- Iterator<ChunkHolder> objectbidirectionaliterator = io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(this.level).iterator(); // Paper
|
|
-
|
|
- while (objectbidirectionaliterator.hasNext()) {
|
|
- ChunkHolder playerchunk = objectbidirectionaliterator.next(); // Paper
|
|
- long i = playerchunk.pos.toLong(); // Paper
|
|
- ChunkPos chunkcoordintpair = new ChunkPos(i);
|
|
- // Paper
|
|
- Optional<ChunkAccess> optional = Optional.ofNullable(playerchunk.getLastAvailable());
|
|
- Optional<LevelChunk> optional1 = optional.flatMap((ichunkaccess) -> {
|
|
- return ichunkaccess instanceof LevelChunk ? Optional.of((LevelChunk) ichunkaccess) : Optional.empty();
|
|
- });
|
|
-
|
|
- // CraftBukkit - decompile error
|
|
- csvwriter.writeRow(chunkcoordintpair.x, chunkcoordintpair.z, playerchunk.getTicketLevel(), optional.isPresent(), optional.map(ChunkAccess::getStatus).orElse(null), optional1.map(LevelChunk::getFullStatus).orElse(null), ChunkMap.printFuture(playerchunk.getFullChunkFuture()), ChunkMap.printFuture(playerchunk.getTickingChunkFuture()), ChunkMap.printFuture(playerchunk.getEntityTickingChunkFuture()), this.distanceManager.getTicketDebugString(i), this.anyPlayerCloseEnoughForSpawning(chunkcoordintpair), optional1.map((chunk) -> {
|
|
- return chunk.getBlockEntities().size();
|
|
- }).orElse(0), tickingtracker.getTicketDebugString(i), tickingtracker.getLevel(i), optional1.map((chunk) -> {
|
|
- return chunk.getBlockTicks().count();
|
|
- }).orElse(0), optional1.map((chunk) -> {
|
|
- return chunk.getFluidTicks().count();
|
|
- }).orElse(0));
|
|
- }
|
|
-
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
private static String printFuture(CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> future) {
|
|
@@ -1145,6 +641,35 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
}
|
|
}
|
|
|
|
+ // Paper start - Asynchronous chunk io
|
|
+ @Nullable
|
|
+ @Override
|
|
+ public CompoundTag readSync(ChunkPos chunkcoordintpair) throws IOException {
|
|
+ // Paper start - rewrite chunk system
|
|
+ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) {
|
|
+ return io.papermc.paper.chunk.system.io.RegionFileIOThread.loadData(
|
|
+ this.level, chunkcoordintpair.x, chunkcoordintpair.z, io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA,
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.getIOBlockingPriorityForCurrentThread()
|
|
+ );
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+ return super.readSync(chunkcoordintpair);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void write(ChunkPos chunkcoordintpair, CompoundTag nbttagcompound) throws IOException {
|
|
+ // Paper start - rewrite chunk system
|
|
+ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) {
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.scheduleSave(
|
|
+ this.level, chunkcoordintpair.x, chunkcoordintpair.z, nbttagcompound,
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA);
|
|
+ return;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+ super.write(chunkcoordintpair, nbttagcompound);
|
|
+ }
|
|
+ // Paper end
|
|
+
|
|
private CompletableFuture<Optional<CompoundTag>> readChunk(ChunkPos chunkPos) {
|
|
return this.read(chunkPos).thenApplyAsync((optional) -> {
|
|
return optional.map((nbttagcompound) -> this.upgradeChunkTag(nbttagcompound, chunkPos)); // CraftBukkit
|
|
@@ -1248,15 +773,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
this.removePlayerFromDistanceMaps(player); // Paper - distance maps
|
|
}
|
|
|
|
- for (int k = i - this.viewDistance - 1; k <= i + this.viewDistance + 1; ++k) {
|
|
- for (int l = j - this.viewDistance - 1; l <= j + this.viewDistance + 1; ++l) {
|
|
- if (ChunkMap.isChunkInRange(k, l, i, j, this.viewDistance)) {
|
|
- ChunkPos chunkcoordintpair = new ChunkPos(k, l);
|
|
-
|
|
- this.updateChunkTracking(player, chunkcoordintpair, new MutableObject(), !added, added);
|
|
- }
|
|
- }
|
|
- }
|
|
+ // Paper - handled by player chunk loader
|
|
|
|
}
|
|
|
|
@@ -1264,7 +781,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
SectionPos sectionposition = SectionPos.of((EntityAccess) player);
|
|
|
|
player.setLastSectionPos(sectionposition);
|
|
- player.connection.send(new ClientboundSetChunkCacheCenterPacket(sectionposition.x(), sectionposition.z()));
|
|
+ //player.connection.send(new ClientboundSetChunkCacheCenterPacket(sectionposition.x(), sectionposition.z())); // Paper - handled by player chunk loader
|
|
return sectionposition;
|
|
}
|
|
|
|
@@ -1319,65 +836,38 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
int k1;
|
|
int l1;
|
|
|
|
- if (Math.abs(i1 - i) <= this.viewDistance * 2 && Math.abs(j1 - j) <= this.viewDistance * 2) {
|
|
- k1 = Math.min(i, i1) - this.viewDistance - 1;
|
|
- l1 = Math.min(j, j1) - this.viewDistance - 1;
|
|
- int i2 = Math.max(i, i1) + this.viewDistance + 1;
|
|
- int j2 = Math.max(j, j1) + this.viewDistance + 1;
|
|
-
|
|
- for (int k2 = k1; k2 <= i2; ++k2) {
|
|
- for (int l2 = l1; l2 <= j2; ++l2) {
|
|
- boolean flag3 = ChunkMap.isChunkInRange(k2, l2, i1, j1, this.viewDistance);
|
|
- boolean flag4 = ChunkMap.isChunkInRange(k2, l2, i, j, this.viewDistance);
|
|
-
|
|
- this.updateChunkTracking(player, new ChunkPos(k2, l2), new MutableObject(), flag3, flag4);
|
|
- }
|
|
- }
|
|
- } else {
|
|
- boolean flag5;
|
|
- boolean flag6;
|
|
-
|
|
- for (k1 = i1 - this.viewDistance - 1; k1 <= i1 + this.viewDistance + 1; ++k1) {
|
|
- for (l1 = j1 - this.viewDistance - 1; l1 <= j1 + this.viewDistance + 1; ++l1) {
|
|
- if (ChunkMap.isChunkInRange(k1, l1, i1, j1, this.viewDistance)) {
|
|
- flag5 = true;
|
|
- flag6 = false;
|
|
- this.updateChunkTracking(player, new ChunkPos(k1, l1), new MutableObject(), true, false);
|
|
- }
|
|
- }
|
|
- }
|
|
-
|
|
- for (k1 = i - this.viewDistance - 1; k1 <= i + this.viewDistance + 1; ++k1) {
|
|
- for (l1 = j - this.viewDistance - 1; l1 <= j + this.viewDistance + 1; ++l1) {
|
|
- if (ChunkMap.isChunkInRange(k1, l1, i, j, this.viewDistance)) {
|
|
- flag5 = false;
|
|
- flag6 = true;
|
|
- this.updateChunkTracking(player, new ChunkPos(k1, l1), new MutableObject(), false, true);
|
|
- }
|
|
- }
|
|
- }
|
|
- }
|
|
+ // Paper - replaced by PlayerChunkLoader
|
|
|
|
this.updateMaps(player); // Paper - distance maps
|
|
+ this.playerChunkManager.updatePlayer(player); // Paper - respond to movement immediately
|
|
|
|
}
|
|
|
|
@Override
|
|
public List<ServerPlayer> getPlayers(ChunkPos chunkPos, boolean onlyOnWatchDistanceEdge) {
|
|
- Set<ServerPlayer> set = this.playerMap.getPlayers(chunkPos.toLong());
|
|
- Builder<ServerPlayer> builder = ImmutableList.builder();
|
|
- Iterator iterator = set.iterator();
|
|
+ // Paper start - per player view distance
|
|
+ // there can be potential desync with player's last mapped section and the view distance map, so use the
|
|
+ // view distance map here.
|
|
+ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet<ServerPlayer> players = this.playerChunkManager.broadcastMap.getObjectsInRange(chunkPos);
|
|
+ if (players == null) {
|
|
+ return java.util.Collections.emptyList();
|
|
+ }
|
|
|
|
- while (iterator.hasNext()) {
|
|
- ServerPlayer entityplayer = (ServerPlayer) iterator.next();
|
|
- SectionPos sectionposition = entityplayer.getLastSectionPos();
|
|
+ List<ServerPlayer> ret = new java.util.ArrayList<>(players.size());
|
|
|
|
- if (onlyOnWatchDistanceEdge && ChunkMap.isChunkOnRangeBorder(chunkPos.x, chunkPos.z, sectionposition.x(), sectionposition.z(), this.viewDistance) || !onlyOnWatchDistanceEdge && ChunkMap.isChunkInRange(chunkPos.x, chunkPos.z, sectionposition.x(), sectionposition.z(), this.viewDistance)) {
|
|
- builder.add(entityplayer);
|
|
+ Object[] backingSet = players.getBackingSet();
|
|
+ for (int i = 0, len = backingSet.length; i < len; ++i) {
|
|
+ if (!(backingSet[i] instanceof ServerPlayer player)) {
|
|
+ continue;
|
|
+ }
|
|
+ if (!this.playerChunkManager.isChunkSent(player, chunkPos.x, chunkPos.z, onlyOnWatchDistanceEdge)) {
|
|
+ continue;
|
|
}
|
|
+ ret.add(player);
|
|
}
|
|
|
|
- return builder.build();
|
|
+ return ret;
|
|
+ // Paper end - per player view distance
|
|
}
|
|
|
|
public void addEntity(Entity entity) {
|
|
@@ -1595,7 +1085,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
|
|
@Override
|
|
protected boolean isChunkToRemove(long pos) {
|
|
- return ChunkMap.this.toDrop.contains(pos);
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Nullable
|
|
@@ -1676,7 +1166,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
|
org.spigotmc.AsyncCatcher.catchOp("player tracker update"); // Spigot
|
|
if (player != this.entity) {
|
|
Vec3 vec3d = player.position().subtract(this.entity.position());
|
|
- double d0 = (double) Math.min(this.getEffectiveRange(), (ChunkMap.this.viewDistance - 1) * 16);
|
|
+ double d0 = (double) Math.min(this.getEffectiveRange(), io.papermc.paper.chunk.PlayerChunkLoader.getSendViewDistance(player) * 16); // Paper - per player view distance
|
|
double d1 = vec3d.x * vec3d.x + vec3d.z * vec3d.z;
|
|
double d2 = d0 * d0;
|
|
boolean flag = d1 <= d2 && this.entity.broadcastToPlayer(player);
|
|
diff --git a/src/main/java/net/minecraft/server/level/DistanceManager.java b/src/main/java/net/minecraft/server/level/DistanceManager.java
|
|
index d38ad1b1eee92a6dbd2b79b4fcdb8959cdb4007d..ffa1e457decf8502c3283352bf5be94d419ff165 100644
|
|
--- a/src/main/java/net/minecraft/server/level/DistanceManager.java
|
|
+++ b/src/main/java/net/minecraft/server/level/DistanceManager.java
|
|
@@ -40,6 +40,12 @@ import org.slf4j.Logger;
|
|
|
|
public abstract class DistanceManager {
|
|
|
|
+ // Paper start - rewrite chunk system
|
|
+ public io.papermc.paper.chunk.system.scheduling.ChunkHolderManager getChunkHolderManager() {
|
|
+ return this.chunkMap.level.chunkTaskScheduler.chunkHolderManager;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+
|
|
static final Logger LOGGER = LogUtils.getLogger();
|
|
private static final int ENTITY_TICKING_RANGE = 2;
|
|
static final int PLAYER_TICKET_LEVEL = 33 + ChunkStatus.getDistance(ChunkStatus.FULL) - 2;
|
|
@@ -47,61 +53,20 @@ public abstract class DistanceManager {
|
|
private static final int ENTITY_TICKING_LEVEL_THRESHOLD = 32;
|
|
private static final int BLOCK_TICKING_LEVEL_THRESHOLD = 33;
|
|
final Long2ObjectMap<ObjectSet<ServerPlayer>> playersPerChunk = new Long2ObjectOpenHashMap();
|
|
- public final Long2ObjectOpenHashMap<SortedArraySet<Ticket<?>>> tickets = new Long2ObjectOpenHashMap();
|
|
- private final DistanceManager.ChunkTicketTracker ticketTracker = new DistanceManager.ChunkTicketTracker();
|
|
+ // Paper - rewrite chunk system
|
|
private final DistanceManager.FixedPlayerDistanceChunkTracker naturalSpawnChunkCounter = new DistanceManager.FixedPlayerDistanceChunkTracker(8);
|
|
- private final TickingTracker tickingTicketsTracker = new TickingTracker();
|
|
- private final DistanceManager.PlayerTicketTracker playerTicketManager = new DistanceManager.PlayerTicketTracker(33);
|
|
- final Set<ChunkHolder> chunksToUpdateFutures = Sets.newHashSet();
|
|
- final ChunkTaskPriorityQueueSorter ticketThrottler;
|
|
- final ProcessorHandle<ChunkTaskPriorityQueueSorter.Message<Runnable>> ticketThrottlerInput;
|
|
- final ProcessorHandle<ChunkTaskPriorityQueueSorter.Release> ticketThrottlerReleaser;
|
|
- final LongSet ticketsToRelease = new LongOpenHashSet();
|
|
- final Executor mainThreadExecutor;
|
|
- private long ticketTickCounter;
|
|
- private int simulationDistance = 10;
|
|
+ //private final TickingTracker tickingTicketsTracker = new TickingTracker(); // Paper - no longer used
|
|
+ //private final DistanceManager.PlayerTicketTracker playerTicketManager = new DistanceManager.PlayerTicketTracker(33); // Paper - no longer used
|
|
+ // Paper - rewrite chunk system
|
|
private final ChunkMap chunkMap; // Paper
|
|
|
|
protected DistanceManager(Executor workerExecutor, Executor mainThreadExecutor, ChunkMap chunkMap) {
|
|
- Objects.requireNonNull(mainThreadExecutor);
|
|
- ProcessorHandle<Runnable> mailbox = ProcessorHandle.of("player ticket throttler", mainThreadExecutor::execute);
|
|
- ChunkTaskPriorityQueueSorter chunktaskqueuesorter = new ChunkTaskPriorityQueueSorter(ImmutableList.of(mailbox), workerExecutor, 4);
|
|
-
|
|
- this.ticketThrottler = chunktaskqueuesorter;
|
|
- this.ticketThrottlerInput = chunktaskqueuesorter.getProcessor(mailbox, true);
|
|
- this.ticketThrottlerReleaser = chunktaskqueuesorter.getReleaseProcessor(mailbox);
|
|
- this.mainThreadExecutor = mainThreadExecutor;
|
|
+ // Paper - rewrite chunk system
|
|
this.chunkMap = chunkMap; // Paper
|
|
}
|
|
|
|
protected void purgeStaleTickets() {
|
|
- ++this.ticketTickCounter;
|
|
- ObjectIterator objectiterator = this.tickets.long2ObjectEntrySet().fastIterator();
|
|
-
|
|
- while (objectiterator.hasNext()) {
|
|
- Entry<SortedArraySet<Ticket<?>>> entry = (Entry) objectiterator.next();
|
|
- Iterator<Ticket<?>> iterator = ((SortedArraySet) entry.getValue()).iterator();
|
|
- boolean flag = false;
|
|
-
|
|
- while (iterator.hasNext()) {
|
|
- Ticket<?> ticket = (Ticket) iterator.next();
|
|
-
|
|
- if (ticket.timedOut(this.ticketTickCounter)) {
|
|
- iterator.remove();
|
|
- flag = true;
|
|
- this.tickingTicketsTracker.removeTicket(entry.getLongKey(), ticket);
|
|
- }
|
|
- }
|
|
-
|
|
- if (flag) {
|
|
- this.ticketTracker.update(entry.getLongKey(), DistanceManager.getTicketLevelAt((SortedArraySet) entry.getValue()), false);
|
|
- }
|
|
-
|
|
- if (((SortedArraySet) entry.getValue()).isEmpty()) {
|
|
- objectiterator.remove();
|
|
- }
|
|
- }
|
|
-
|
|
+ this.getChunkHolderManager().tick(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
private static int getTicketLevelAt(SortedArraySet<Ticket<?>> tickets) {
|
|
@@ -117,108 +82,25 @@ public abstract class DistanceManager {
|
|
protected abstract ChunkHolder updateChunkScheduling(long pos, int level, @Nullable ChunkHolder holder, int k);
|
|
|
|
public boolean runAllUpdates(ChunkMap chunkStorage) {
|
|
- this.naturalSpawnChunkCounter.runAllUpdates();
|
|
- this.tickingTicketsTracker.runAllUpdates();
|
|
- this.playerTicketManager.runAllUpdates();
|
|
- int i = Integer.MAX_VALUE - this.ticketTracker.runDistanceUpdates(Integer.MAX_VALUE);
|
|
- boolean flag = i != 0;
|
|
-
|
|
- if (flag) {
|
|
- ;
|
|
- }
|
|
-
|
|
- if (!this.chunksToUpdateFutures.isEmpty()) {
|
|
- // CraftBukkit start
|
|
- // Iterate pending chunk updates with protection against concurrent modification exceptions
|
|
- java.util.Iterator<ChunkHolder> iter = this.chunksToUpdateFutures.iterator();
|
|
- int expectedSize = this.chunksToUpdateFutures.size();
|
|
- do {
|
|
- ChunkHolder playerchunk = iter.next();
|
|
- iter.remove();
|
|
- expectedSize--;
|
|
-
|
|
- playerchunk.updateFutures(chunkStorage, this.mainThreadExecutor);
|
|
-
|
|
- // Reset iterator if set was modified using add()
|
|
- if (this.chunksToUpdateFutures.size() != expectedSize) {
|
|
- expectedSize = this.chunksToUpdateFutures.size();
|
|
- iter = this.chunksToUpdateFutures.iterator();
|
|
- }
|
|
- } while (iter.hasNext());
|
|
- // CraftBukkit end
|
|
-
|
|
- return true;
|
|
- } else {
|
|
- if (!this.ticketsToRelease.isEmpty()) {
|
|
- LongIterator longiterator = this.ticketsToRelease.iterator();
|
|
-
|
|
- while (longiterator.hasNext()) {
|
|
- long j = longiterator.nextLong();
|
|
-
|
|
- if (this.getTickets(j).stream().anyMatch((ticket) -> {
|
|
- return ticket.getType() == TicketType.PLAYER;
|
|
- })) {
|
|
- ChunkHolder playerchunk = chunkStorage.getUpdatingChunkIfPresent(j);
|
|
-
|
|
- if (playerchunk == null) {
|
|
- throw new IllegalStateException();
|
|
- }
|
|
-
|
|
- CompletableFuture<Either<LevelChunk, ChunkHolder.ChunkLoadingFailure>> completablefuture = playerchunk.getEntityTickingChunkFuture();
|
|
-
|
|
- completablefuture.thenAccept((either) -> {
|
|
- this.mainThreadExecutor.execute(() -> {
|
|
- this.ticketThrottlerReleaser.tell(ChunkTaskPriorityQueueSorter.release(() -> {
|
|
- }, j, false));
|
|
- });
|
|
- });
|
|
- }
|
|
- }
|
|
-
|
|
- this.ticketsToRelease.clear();
|
|
- }
|
|
-
|
|
- return flag;
|
|
- }
|
|
+ return this.getChunkHolderManager().processTicketUpdates(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
boolean addTicket(long i, Ticket<?> ticket) { // CraftBukkit - void -> boolean
|
|
- SortedArraySet<Ticket<?>> arraysetsorted = this.getTickets(i);
|
|
- int j = DistanceManager.getTicketLevelAt(arraysetsorted);
|
|
- Ticket<?> ticket1 = (Ticket) arraysetsorted.addOrGet(ticket);
|
|
-
|
|
- ticket1.setCreatedTick(this.ticketTickCounter);
|
|
- if (ticket.getTicketLevel() < j) {
|
|
- this.ticketTracker.update(i, ticket.getTicketLevel(), true);
|
|
- }
|
|
-
|
|
- return ticket == ticket1; // CraftBukkit
|
|
+ org.spigotmc.AsyncCatcher.catchOp("ChunkMapDistance::addTicket"); // Paper
|
|
+ return this.getChunkHolderManager().addTicketAtLevel((TicketType)ticket.getType(), i, ticket.getTicketLevel(), ticket.key); // Paper - rewrite chunk system
|
|
}
|
|
|
|
boolean removeTicket(long i, Ticket<?> ticket) { // CraftBukkit - void -> boolean
|
|
- SortedArraySet<Ticket<?>> arraysetsorted = this.getTickets(i);
|
|
-
|
|
- boolean removed = false; // CraftBukkit
|
|
- if (arraysetsorted.remove(ticket)) {
|
|
- removed = true; // CraftBukkit
|
|
- }
|
|
-
|
|
- if (arraysetsorted.isEmpty()) {
|
|
- this.tickets.remove(i);
|
|
- }
|
|
-
|
|
- this.ticketTracker.update(i, DistanceManager.getTicketLevelAt(arraysetsorted), false);
|
|
- return removed; // CraftBukkit
|
|
+ org.spigotmc.AsyncCatcher.catchOp("ChunkMapDistance::removeTicket"); // Paper
|
|
+ return this.getChunkHolderManager().removeTicketAtLevel((TicketType)ticket.getType(), i, ticket.getTicketLevel(), ticket.key); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public <T> void addTicket(TicketType<T> type, ChunkPos pos, int level, T argument) {
|
|
- this.addTicket(pos.toLong(), new Ticket<>(type, level, argument));
|
|
+ this.getChunkHolderManager().addTicketAtLevel(type, pos, level, argument); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public <T> void removeTicket(TicketType<T> type, ChunkPos pos, int level, T argument) {
|
|
- Ticket<T> ticket = new Ticket<>(type, level, argument);
|
|
-
|
|
- this.removeTicket(pos.toLong(), ticket);
|
|
+ this.getChunkHolderManager().removeTicketAtLevel(type, pos, level, argument); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public <T> void addRegionTicket(TicketType<T> type, ChunkPos pos, int radius, T argument) {
|
|
@@ -227,13 +109,7 @@ public abstract class DistanceManager {
|
|
}
|
|
|
|
public <T> boolean addRegionTicketAtDistance(TicketType<T> tickettype, ChunkPos chunkcoordintpair, int i, T t0) {
|
|
- // CraftBukkit end
|
|
- Ticket<T> ticket = new Ticket<>(tickettype, 33 - i, t0);
|
|
- long j = chunkcoordintpair.toLong();
|
|
-
|
|
- boolean added = this.addTicket(j, ticket); // CraftBukkit
|
|
- this.tickingTicketsTracker.addTicket(j, ticket);
|
|
- return added; // CraftBukkit
|
|
+ return this.getChunkHolderManager().addTicketAtLevel(tickettype, chunkcoordintpair, 33 - i, t0); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public <T> void removeRegionTicket(TicketType<T> type, ChunkPos pos, int radius, T argument) {
|
|
@@ -242,31 +118,21 @@ public abstract class DistanceManager {
|
|
}
|
|
|
|
public <T> boolean removeRegionTicketAtDistance(TicketType<T> tickettype, ChunkPos chunkcoordintpair, int i, T t0) {
|
|
- // CraftBukkit end
|
|
- Ticket<T> ticket = new Ticket<>(tickettype, 33 - i, t0);
|
|
- long j = chunkcoordintpair.toLong();
|
|
-
|
|
- boolean removed = this.removeTicket(j, ticket); // CraftBukkit
|
|
- this.tickingTicketsTracker.removeTicket(j, ticket);
|
|
- return removed; // CraftBukkit
|
|
+ return this.getChunkHolderManager().removeTicketAtLevel(tickettype, chunkcoordintpair, 33 - i, t0); // Paper - rewrite chunk system
|
|
}
|
|
|
|
- private SortedArraySet<Ticket<?>> getTickets(long position) {
|
|
- return (SortedArraySet) this.tickets.computeIfAbsent(position, (j) -> {
|
|
- return SortedArraySet.create(4);
|
|
- });
|
|
- }
|
|
+ // Paper - rewrite chunk system
|
|
|
|
protected void updateChunkForced(ChunkPos pos, boolean forced) {
|
|
- Ticket<ChunkPos> ticket = new Ticket<>(TicketType.FORCED, 31, pos);
|
|
+ Ticket<ChunkPos> ticket = new Ticket<>(TicketType.FORCED, 31, pos, 0L); // Paper - rewrite chunk system
|
|
long i = pos.toLong();
|
|
|
|
if (forced) {
|
|
this.addTicket(i, ticket);
|
|
- this.tickingTicketsTracker.addTicket(i, ticket);
|
|
+ //this.tickingTicketsTracker.addTicket(i, ticket); // Paper - no longer used
|
|
} else {
|
|
this.removeTicket(i, ticket);
|
|
- this.tickingTicketsTracker.removeTicket(i, ticket);
|
|
+ //this.tickingTicketsTracker.removeTicket(i, ticket); // Paper - no longer used
|
|
}
|
|
|
|
}
|
|
@@ -275,12 +141,10 @@ public abstract class DistanceManager {
|
|
ChunkPos chunkcoordintpair = pos.chunk();
|
|
long i = chunkcoordintpair.toLong();
|
|
|
|
- ((ObjectSet) this.playersPerChunk.computeIfAbsent(i, (j) -> {
|
|
- return new ObjectOpenHashSet();
|
|
- })).add(player);
|
|
+ // Paper - no longer used
|
|
this.naturalSpawnChunkCounter.update(i, 0, true);
|
|
- this.playerTicketManager.update(i, 0, true);
|
|
- this.tickingTicketsTracker.addTicket(TicketType.PLAYER, chunkcoordintpair, this.getPlayerTicketLevel(), chunkcoordintpair);
|
|
+ //this.playerTicketManager.update(i, 0, true); // Paper - no longer used
|
|
+ //this.tickingTicketsTracker.addTicket(TicketType.PLAYER, chunkcoordintpair, this.getPlayerTicketLevel(), chunkcoordintpair); // Paper - no longer used
|
|
}
|
|
|
|
public void removePlayer(SectionPos pos, ServerPlayer player) {
|
|
@@ -293,46 +157,44 @@ public abstract class DistanceManager {
|
|
if (objectset.isEmpty()) {
|
|
this.playersPerChunk.remove(i);
|
|
this.naturalSpawnChunkCounter.update(i, Integer.MAX_VALUE, false);
|
|
- this.playerTicketManager.update(i, Integer.MAX_VALUE, false);
|
|
- this.tickingTicketsTracker.removeTicket(TicketType.PLAYER, chunkcoordintpair, this.getPlayerTicketLevel(), chunkcoordintpair);
|
|
+ //this.playerTicketManager.update(i, Integer.MAX_VALUE, false); // Paper - no longer used
|
|
+ //this.tickingTicketsTracker.removeTicket(TicketType.PLAYER, chunkcoordintpair, this.getPlayerTicketLevel(), chunkcoordintpair); // Paper - no longer used
|
|
}
|
|
|
|
}
|
|
|
|
- private int getPlayerTicketLevel() {
|
|
- return Math.max(0, 31 - this.simulationDistance);
|
|
- }
|
|
+ // Paper - rewrite chunk system
|
|
|
|
public boolean inEntityTickingRange(long chunkPos) {
|
|
- return this.tickingTicketsTracker.getLevel(chunkPos) < 32;
|
|
+ // Paper start - replace player chunk loader system
|
|
+ ChunkHolder holder = this.chunkMap.getVisibleChunkIfPresent(chunkPos);
|
|
+ return holder != null && holder.isEntityTickingReady();
|
|
+ // Paper end - replace player chunk loader system
|
|
}
|
|
|
|
public boolean inBlockTickingRange(long chunkPos) {
|
|
- return this.tickingTicketsTracker.getLevel(chunkPos) < 33;
|
|
+ // Paper start - replace player chunk loader system
|
|
+ ChunkHolder holder = this.chunkMap.getVisibleChunkIfPresent(chunkPos);
|
|
+ return holder != null && holder.isTickingReady();
|
|
+ // Paper end - replace player chunk loader system
|
|
}
|
|
|
|
protected String getTicketDebugString(long pos) {
|
|
- SortedArraySet<Ticket<?>> arraysetsorted = (SortedArraySet) this.tickets.get(pos);
|
|
-
|
|
- return arraysetsorted != null && !arraysetsorted.isEmpty() ? ((Ticket) arraysetsorted.first()).toString() : "no_ticket";
|
|
+ return this.getChunkHolderManager().getTicketDebugString(pos); // Paper - rewrite chunk system
|
|
}
|
|
|
|
protected void updatePlayerTickets(int viewDistance) {
|
|
- this.playerTicketManager.updateViewDistance(viewDistance);
|
|
+ this.chunkMap.playerChunkManager.setTargetNoTickViewDistance(viewDistance); // Paper - route to player chunk manager
|
|
}
|
|
|
|
// Paper start
|
|
public int getSimulationDistance() {
|
|
- return this.simulationDistance;
|
|
+ return this.chunkMap.playerChunkManager.getTargetTickViewDistance(); // Paper - route to player chunk manager
|
|
}
|
|
// Paper end
|
|
|
|
public void updateSimulationDistance(int simulationDistance) {
|
|
- if (simulationDistance != this.simulationDistance) {
|
|
- this.simulationDistance = simulationDistance;
|
|
- this.tickingTicketsTracker.replacePlayerTicketsLevel(this.getPlayerTicketLevel());
|
|
- }
|
|
-
|
|
+ this.chunkMap.playerChunkManager.setTargetTickViewDistance(simulationDistance); // Paper - route to player chunk manager
|
|
}
|
|
|
|
public int getNaturalSpawnChunkCount() {
|
|
@@ -346,103 +208,28 @@ public abstract class DistanceManager {
|
|
}
|
|
|
|
public String getDebugStatus() {
|
|
- return this.ticketThrottler.getDebugStatus();
|
|
+ return "No DistanceManager stats available"; // Paper - rewrite chunk system
|
|
}
|
|
|
|
- private void dumpTickets(String path) {
|
|
- try {
|
|
- FileOutputStream fileoutputstream = new FileOutputStream(new File(path));
|
|
-
|
|
- try {
|
|
- ObjectIterator objectiterator = this.tickets.long2ObjectEntrySet().iterator();
|
|
-
|
|
- while (objectiterator.hasNext()) {
|
|
- Entry<SortedArraySet<Ticket<?>>> entry = (Entry) objectiterator.next();
|
|
- ChunkPos chunkcoordintpair = new ChunkPos(entry.getLongKey());
|
|
- Iterator iterator = ((SortedArraySet) entry.getValue()).iterator();
|
|
-
|
|
- while (iterator.hasNext()) {
|
|
- Ticket<?> ticket = (Ticket) iterator.next();
|
|
-
|
|
- fileoutputstream.write((chunkcoordintpair.x + "\t" + chunkcoordintpair.z + "\t" + ticket.getType() + "\t" + ticket.getTicketLevel() + "\t\n").getBytes(StandardCharsets.UTF_8));
|
|
- }
|
|
- }
|
|
- } catch (Throwable throwable) {
|
|
- try {
|
|
- fileoutputstream.close();
|
|
- } catch (Throwable throwable1) {
|
|
- throwable.addSuppressed(throwable1);
|
|
- }
|
|
+ // Paper - rewrite chunk system
|
|
|
|
- throw throwable;
|
|
- }
|
|
-
|
|
- fileoutputstream.close();
|
|
- } catch (IOException ioexception) {
|
|
- DistanceManager.LOGGER.error("Failed to dump tickets to {}", path, ioexception);
|
|
- }
|
|
-
|
|
- }
|
|
-
|
|
- @VisibleForTesting
|
|
- TickingTracker tickingTracker() {
|
|
- return this.tickingTicketsTracker;
|
|
- }
|
|
+ // Paper - replace player chunk loader
|
|
|
|
public void removeTicketsOnClosing() {
|
|
- ImmutableSet<TicketType<?>> immutableset = ImmutableSet.of(TicketType.UNKNOWN, TicketType.POST_TELEPORT, TicketType.LIGHT, TicketType.FUTURE_AWAIT, TicketType.CHUNK_RELIGHT, ca.spottedleaf.starlight.common.light.StarLightInterface.CHUNK_WORK_TICKET); // Paper - add additional tickets to preserve
|
|
- ObjectIterator objectiterator = this.tickets.long2ObjectEntrySet().fastIterator();
|
|
-
|
|
- while (objectiterator.hasNext()) {
|
|
- Entry<SortedArraySet<Ticket<?>>> entry = (Entry) objectiterator.next();
|
|
- Iterator<Ticket<?>> iterator = ((SortedArraySet) entry.getValue()).iterator();
|
|
- boolean flag = false;
|
|
-
|
|
- while (iterator.hasNext()) {
|
|
- Ticket<?> ticket = (Ticket) iterator.next();
|
|
-
|
|
- if (!immutableset.contains(ticket.getType())) {
|
|
- iterator.remove();
|
|
- flag = true;
|
|
- this.tickingTicketsTracker.removeTicket(entry.getLongKey(), ticket);
|
|
- }
|
|
- }
|
|
-
|
|
- if (flag) {
|
|
- this.ticketTracker.update(entry.getLongKey(), DistanceManager.getTicketLevelAt((SortedArraySet) entry.getValue()), false);
|
|
- }
|
|
-
|
|
- if (((SortedArraySet) entry.getValue()).isEmpty()) {
|
|
- objectiterator.remove();
|
|
- }
|
|
- }
|
|
-
|
|
+ // Paper - rewrite chunk system - this stupid hack ain't needed anymore
|
|
}
|
|
|
|
public boolean hasTickets() {
|
|
- return !this.tickets.isEmpty();
|
|
+ return this.getChunkHolderManager().hasTickets(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
// CraftBukkit start
|
|
public <T> void removeAllTicketsFor(TicketType<T> ticketType, int ticketLevel, T ticketIdentifier) {
|
|
- Ticket<T> target = new Ticket<>(ticketType, ticketLevel, ticketIdentifier);
|
|
-
|
|
- for (java.util.Iterator<Entry<SortedArraySet<Ticket<?>>>> iterator = this.tickets.long2ObjectEntrySet().fastIterator(); iterator.hasNext();) {
|
|
- Entry<SortedArraySet<Ticket<?>>> entry = iterator.next();
|
|
- SortedArraySet<Ticket<?>> tickets = entry.getValue();
|
|
- if (tickets.remove(target)) {
|
|
- // copied from removeTicket
|
|
- this.ticketTracker.update(entry.getLongKey(), DistanceManager.getTicketLevelAt(tickets), false);
|
|
-
|
|
- // can't use entry after it's removed
|
|
- if (tickets.isEmpty()) {
|
|
- iterator.remove();
|
|
- }
|
|
- }
|
|
- }
|
|
+ this.getChunkHolderManager().removeAllTicketsFor(ticketType, ticketLevel, ticketIdentifier); // Paper - rewrite chunk system
|
|
}
|
|
// CraftBukkit end
|
|
|
|
+ /* Paper - rewrite chunk system
|
|
private class ChunkTicketTracker extends ChunkTracker {
|
|
|
|
public ChunkTicketTracker() {
|
|
@@ -487,6 +274,7 @@ public abstract class DistanceManager {
|
|
return this.runUpdates(distance);
|
|
}
|
|
}
|
|
+ */ // Paper - rewrite chunk system
|
|
|
|
private class FixedPlayerDistanceChunkTracker extends ChunkTracker {
|
|
|
|
@@ -566,6 +354,7 @@ public abstract class DistanceManager {
|
|
}
|
|
}
|
|
|
|
+ /* Paper - rewrite chunk system
|
|
private class PlayerTicketTracker extends DistanceManager.FixedPlayerDistanceChunkTracker {
|
|
|
|
private int viewDistance = 0;
|
|
@@ -661,4 +450,5 @@ public abstract class DistanceManager {
|
|
return distance <= this.viewDistance - 2;
|
|
}
|
|
}
|
|
+ */ // Paper - rewrite chunk system
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/server/level/ServerChunkCache.java b/src/main/java/net/minecraft/server/level/ServerChunkCache.java
|
|
index 28c8a3ba1caddf0ea334a6ef43cae25f982743e4..80d108ae7faf3fdcb024931e93032215935fe70b 100644
|
|
--- a/src/main/java/net/minecraft/server/level/ServerChunkCache.java
|
|
+++ b/src/main/java/net/minecraft/server/level/ServerChunkCache.java
|
|
@@ -367,7 +367,7 @@ public class ServerChunkCache extends ChunkSource {
|
|
public LevelChunk getChunkAtIfLoadedImmediately(int x, int z) {
|
|
long k = ChunkPos.asLong(x, z);
|
|
|
|
- if (Thread.currentThread() == this.mainThread) {
|
|
+ if (io.papermc.paper.util.TickThread.isTickThread()) { // Paper - rewrite chunk system
|
|
return this.getChunkAtIfLoadedMainThread(x, z);
|
|
}
|
|
|
|
@@ -389,11 +389,34 @@ public class ServerChunkCache extends ChunkSource {
|
|
return ret;
|
|
}
|
|
// Paper end
|
|
+ // Paper start - async chunk io
|
|
+ public CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> getChunkAtAsynchronously(int x, int z, boolean gen, boolean isUrgent) {
|
|
+ CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> ret = new CompletableFuture<>();
|
|
+
|
|
+ ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority priority;
|
|
+ if (isUrgent) {
|
|
+ priority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGHER;
|
|
+ } else {
|
|
+ priority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.NORMAL;
|
|
+ }
|
|
+
|
|
+ io.papermc.paper.chunk.system.ChunkSystem.scheduleChunkLoad(this.level, x, z, gen, ChunkStatus.FULL, true, priority, (chunk) -> {
|
|
+ if (chunk == null) {
|
|
+ ret.complete(ChunkHolder.UNLOADED_CHUNK);
|
|
+ } else {
|
|
+ ret.complete(Either.left(chunk));
|
|
+ }
|
|
+ });
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ // Paper end - async chunk io
|
|
|
|
@Nullable
|
|
@Override
|
|
public ChunkAccess getChunk(int x, int z, ChunkStatus leastStatus, boolean create) {
|
|
- if (Thread.currentThread() != this.mainThread) {
|
|
+ final int x1 = x; final int z1 = z; // Paper - conflict on variable change
|
|
+ if (!io.papermc.paper.util.TickThread.isTickThread()) { // Paper - rewrite chunk system
|
|
return (ChunkAccess) CompletableFuture.supplyAsync(() -> {
|
|
return this.getChunk(x, z, leastStatus, create);
|
|
}, this.mainThreadProcessor).join();
|
|
@@ -405,23 +428,20 @@ public class ServerChunkCache extends ChunkSource {
|
|
|
|
ChunkAccess ichunkaccess;
|
|
|
|
- for (int l = 0; l < 4; ++l) {
|
|
- if (k == this.lastChunkPos[l] && leastStatus == this.lastChunkStatus[l]) {
|
|
- ichunkaccess = this.lastChunk[l];
|
|
- if (ichunkaccess != null) { // CraftBukkit - the chunk can become accessible in the meantime TODO for non-null chunks it might also make sense to check that the chunk's state hasn't changed in the meantime
|
|
- return ichunkaccess;
|
|
- }
|
|
- }
|
|
- }
|
|
+ // Paper - rewrite chunk system - there are no correct callbacks to remove items from cache in the new chunk system
|
|
|
|
gameprofilerfiller.incrementCounter("getChunkCacheMiss");
|
|
- CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getChunkFutureMainThread(x, z, leastStatus, create);
|
|
+ CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getChunkFutureMainThread(x, z, leastStatus, create, true); // Paper
|
|
ServerChunkCache.MainThreadExecutor chunkproviderserver_b = this.mainThreadProcessor;
|
|
|
|
Objects.requireNonNull(completablefuture);
|
|
if (!completablefuture.isDone()) { // Paper
|
|
+ // Paper start - async chunk io/loading
|
|
+ io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.pushChunkWait(this.level, x1, z1); // Paper - rewrite chunk system
|
|
+ // Paper end
|
|
this.level.timings.syncChunkLoad.startTiming(); // Paper
|
|
chunkproviderserver_b.managedBlock(completablefuture::isDone);
|
|
+ io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.popChunkWait(); // Paper - async chunk debug // Paper - rewrite chunk system
|
|
this.level.timings.syncChunkLoad.stopTiming(); // Paper
|
|
} // Paper
|
|
ichunkaccess = (ChunkAccess) ((Either) completablefuture.join()).map((ichunkaccess1) -> {
|
|
@@ -441,7 +461,7 @@ public class ServerChunkCache extends ChunkSource {
|
|
@Nullable
|
|
@Override
|
|
public LevelChunk getChunkNow(int chunkX, int chunkZ) {
|
|
- if (Thread.currentThread() != this.mainThread) {
|
|
+ if (!io.papermc.paper.util.TickThread.isTickThread()) { // Paper - rewrite chunk system
|
|
return null;
|
|
} else {
|
|
this.level.getProfiler().incrementCounter("getChunkNow");
|
|
@@ -487,7 +507,7 @@ public class ServerChunkCache extends ChunkSource {
|
|
}
|
|
|
|
public CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> getChunkFuture(int chunkX, int chunkZ, ChunkStatus leastStatus, boolean create) {
|
|
- boolean flag1 = Thread.currentThread() == this.mainThread;
|
|
+ boolean flag1 = io.papermc.paper.util.TickThread.isTickThread(); // Paper - rewrite chunk system
|
|
CompletableFuture completablefuture;
|
|
|
|
if (flag1) {
|
|
@@ -508,47 +528,52 @@ public class ServerChunkCache extends ChunkSource {
|
|
}
|
|
|
|
private CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> getChunkFutureMainThread(int chunkX, int chunkZ, ChunkStatus leastStatus, boolean create) {
|
|
- ChunkPos chunkcoordintpair = new ChunkPos(chunkX, chunkZ);
|
|
- long k = chunkcoordintpair.toLong();
|
|
- int l = 33 + ChunkStatus.getDistance(leastStatus);
|
|
- ChunkHolder playerchunk = this.getVisibleChunkIfPresent(k);
|
|
+ // Paper start - add isUrgent - old sig left in place for dirty nms plugins
|
|
+ return getChunkFutureMainThread(chunkX, chunkZ, leastStatus, create, false);
|
|
+ }
|
|
+ private CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> getChunkFutureMainThread(int chunkX, int chunkZ, ChunkStatus leastStatus, boolean create, boolean isUrgent) {
|
|
+ // Paper start - rewrite chunk system
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread(this.level, chunkX, chunkZ, "Scheduling chunk load off-main");
|
|
+ int minLevel = 33 + ChunkStatus.getDistance(leastStatus);
|
|
+ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.level.chunkTaskScheduler.chunkHolderManager.getChunkHolder(chunkX, chunkZ);
|
|
|
|
- // CraftBukkit start - don't add new ticket for currently unloading chunk
|
|
- boolean currentlyUnloading = false;
|
|
- if (playerchunk != null) {
|
|
- ChunkHolder.FullChunkStatus oldChunkState = ChunkHolder.getFullChunkStatus(playerchunk.oldTicketLevel);
|
|
- ChunkHolder.FullChunkStatus currentChunkState = ChunkHolder.getFullChunkStatus(playerchunk.getTicketLevel());
|
|
- currentlyUnloading = (oldChunkState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && !currentChunkState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER));
|
|
- }
|
|
- if (create && !currentlyUnloading) {
|
|
- // CraftBukkit end
|
|
- this.distanceManager.addTicket(TicketType.UNKNOWN, chunkcoordintpair, l, chunkcoordintpair);
|
|
- if (this.chunkAbsent(playerchunk, l)) {
|
|
- ProfilerFiller gameprofilerfiller = this.level.getProfiler();
|
|
-
|
|
- gameprofilerfiller.push("chunkLoad");
|
|
- this.runDistanceManagerUpdates();
|
|
- playerchunk = this.getVisibleChunkIfPresent(k);
|
|
- gameprofilerfiller.pop();
|
|
- if (this.chunkAbsent(playerchunk, l)) {
|
|
- throw (IllegalStateException) Util.pauseInIde(new IllegalStateException("No chunk holder after ticket has been added"));
|
|
- }
|
|
- }
|
|
+ boolean needsFullScheduling = leastStatus == ChunkStatus.FULL && (chunkHolder == null || !chunkHolder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER));
|
|
+
|
|
+ if ((chunkHolder == null || chunkHolder.getTicketLevel() > minLevel || needsFullScheduling) && !create) {
|
|
+ return ChunkHolder.UNLOADED_CHUNK_FUTURE;
|
|
}
|
|
|
|
- return this.chunkAbsent(playerchunk, l) ? ChunkHolder.UNLOADED_CHUNK_FUTURE : playerchunk.getOrScheduleFuture(leastStatus, this.chunkMap);
|
|
- }
|
|
+ io.papermc.paper.chunk.system.scheduling.NewChunkHolder.ChunkCompletion chunkCompletion = chunkHolder == null ? null : chunkHolder.getLastChunkCompletion();
|
|
+ if (needsFullScheduling || chunkCompletion == null || !chunkCompletion.genStatus().isOrAfter(leastStatus)) {
|
|
+ // schedule
|
|
+ CompletableFuture<Either<ChunkAccess, ChunkHolder.ChunkLoadingFailure>> ret = new CompletableFuture<>();
|
|
+ Consumer<ChunkAccess> complete = (ChunkAccess chunk) -> {
|
|
+ if (chunk == null) {
|
|
+ ret.complete(Either.right(ChunkHolder.ChunkLoadingFailure.UNLOADED));
|
|
+ } else {
|
|
+ ret.complete(Either.left(chunk));
|
|
+ }
|
|
+ };
|
|
|
|
- private boolean chunkAbsent(@Nullable ChunkHolder holder, int maxLevel) {
|
|
- return holder == null || holder.oldTicketLevel > maxLevel; // CraftBukkit using oldTicketLevel for isLoaded checks
|
|
+ this.level.chunkTaskScheduler.scheduleChunkLoad(
|
|
+ chunkX, chunkZ, leastStatus, true,
|
|
+ isUrgent ? ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.BLOCKING : ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.NORMAL,
|
|
+ complete
|
|
+ );
|
|
+
|
|
+ return ret;
|
|
+ } else {
|
|
+ // can return now
|
|
+ return CompletableFuture.completedFuture(Either.left(chunkCompletion.chunk()));
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
+ // Paper - rewrite chunk system
|
|
+
|
|
@Override
|
|
public boolean hasChunk(int x, int z) {
|
|
- ChunkHolder playerchunk = this.getVisibleChunkIfPresent((new ChunkPos(x, z)).toLong());
|
|
- int k = 33 + ChunkStatus.getDistance(ChunkStatus.FULL);
|
|
-
|
|
- return !this.chunkAbsent(playerchunk, k);
|
|
+ return this.getChunkAtIfLoadedImmediately(x, z) != null; // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
@@ -559,22 +584,13 @@ public class ServerChunkCache extends ChunkSource {
|
|
if (playerchunk == null) {
|
|
return null;
|
|
} else {
|
|
- int l = ServerChunkCache.CHUNK_STATUSES.size() - 1;
|
|
-
|
|
- while (true) {
|
|
- ChunkStatus chunkstatus = (ChunkStatus) ServerChunkCache.CHUNK_STATUSES.get(l);
|
|
- Optional<ChunkAccess> optional = ((Either) playerchunk.getFutureIfPresentUnchecked(chunkstatus).getNow(ChunkHolder.UNLOADED_CHUNK)).left();
|
|
-
|
|
- if (optional.isPresent()) {
|
|
- return (BlockGetter) optional.get();
|
|
- }
|
|
-
|
|
- if (chunkstatus == ChunkStatus.LIGHT.getParent()) {
|
|
- return null;
|
|
- }
|
|
-
|
|
- --l;
|
|
+ // Paper start - rewrite chunk system
|
|
+ ChunkStatus status = playerchunk.getChunkHolderStatus();
|
|
+ if (status != null && !status.isOrAfter(ChunkStatus.LIGHT.getParent())) {
|
|
+ return null;
|
|
}
|
|
+ return playerchunk.getAvailableChunkNow();
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
}
|
|
|
|
@@ -588,15 +604,7 @@ public class ServerChunkCache extends ChunkSource {
|
|
}
|
|
|
|
boolean runDistanceManagerUpdates() {
|
|
- boolean flag = this.distanceManager.runAllUpdates(this.chunkMap);
|
|
- boolean flag1 = this.chunkMap.promoteChunkMap();
|
|
-
|
|
- if (!flag && !flag1) {
|
|
- return false;
|
|
- } else {
|
|
- this.clearCache();
|
|
- return true;
|
|
- }
|
|
+ return this.level.chunkTaskScheduler.chunkHolderManager.processTicketUpdates(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
// Paper start
|
|
@@ -606,17 +614,10 @@ public class ServerChunkCache extends ChunkSource {
|
|
// Paper end
|
|
|
|
public boolean isPositionTicking(long pos) {
|
|
- ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos);
|
|
-
|
|
- if (playerchunk == null) {
|
|
- return false;
|
|
- } else if (!this.level.shouldTickBlocksAt(pos)) {
|
|
- return false;
|
|
- } else {
|
|
- Either<LevelChunk, ChunkHolder.ChunkLoadingFailure> either = (Either) playerchunk.getTickingChunkFuture().getNow(null); // CraftBukkit - decompile error
|
|
-
|
|
- return either != null && either.left().isPresent();
|
|
- }
|
|
+ // Paper start - replace player chunk loader system
|
|
+ ChunkHolder holder = this.chunkMap.getVisibleChunkIfPresent(pos);
|
|
+ return holder != null && holder.isTickingReady();
|
|
+ // Paper end - replace player chunk loader system
|
|
}
|
|
|
|
public void save(boolean flush) {
|
|
@@ -632,17 +633,13 @@ public class ServerChunkCache extends ChunkSource {
|
|
this.close(true);
|
|
}
|
|
|
|
- public void close(boolean save) throws IOException {
|
|
- if (save) {
|
|
- this.save(true);
|
|
- }
|
|
- // CraftBukkit end
|
|
- this.lightEngine.close();
|
|
- this.chunkMap.close();
|
|
+ public void close(boolean save) { // Paper - rewrite chunk system
|
|
+ this.level.chunkTaskScheduler.chunkHolderManager.close(save, true); // Paper - rewrite chunk system
|
|
}
|
|
|
|
// CraftBukkit start - modelled on below
|
|
public void purgeUnload() {
|
|
+ if (true) return; // Paper - tickets will be removed later, this behavior isn't really well accounted for by the chunk system
|
|
this.level.getProfiler().push("purge");
|
|
this.distanceManager.purgeStaleTickets();
|
|
this.runDistanceManagerUpdates();
|
|
@@ -663,6 +660,7 @@ public class ServerChunkCache extends ChunkSource {
|
|
this.level.getProfiler().popPush("chunks");
|
|
if (tickChunks) {
|
|
this.level.timings.chunks.startTiming(); // Paper - timings
|
|
+ this.chunkMap.playerChunkManager.tick(); // Paper - this is mostly is to account for view distance changes
|
|
this.tickChunks();
|
|
this.level.timings.chunks.stopTiming(); // Paper - timings
|
|
}
|
|
@@ -759,7 +757,12 @@ public class ServerChunkCache extends ChunkSource {
|
|
ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos);
|
|
|
|
if (playerchunk != null) {
|
|
- ((Either) playerchunk.getFullChunkFuture().getNow(ChunkHolder.UNLOADED_LEVEL_CHUNK)).left().ifPresent(chunkConsumer);
|
|
+ // Paper start - rewrite chunk system
|
|
+ LevelChunk chunk = playerchunk.getFullChunk();
|
|
+ if (chunk != null) {
|
|
+ chunkConsumer.accept(chunk);
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
}
|
|
@@ -925,17 +928,11 @@ public class ServerChunkCache extends ChunkSource {
|
|
@Override
|
|
// CraftBukkit start - process pending Chunk loadCallback() and unloadCallback() after each run task
|
|
public boolean pollTask() {
|
|
- try {
|
|
+ ServerChunkCache.this.chunkMap.playerChunkManager.tickMidTick();
|
|
if (ServerChunkCache.this.runDistanceManagerUpdates()) {
|
|
return true;
|
|
- } else {
|
|
- ServerChunkCache.this.lightEngine.tryScheduleUpdate();
|
|
- return super.pollTask();
|
|
}
|
|
- } finally {
|
|
- chunkMap.callbackExecutor.run();
|
|
- }
|
|
- // CraftBukkit end
|
|
+ return super.pollTask() | ServerChunkCache.this.level.chunkTaskScheduler.executeMainThreadTask(); // Paper - rewrite chunk system
|
|
}
|
|
}
|
|
|
|
diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java
|
|
index 6f81aa6b0a835bf10e0d5f2b32fe9fb2dbb60400..3fce997cc390136a16c941f0461b20d2bc046948 100644
|
|
--- a/src/main/java/net/minecraft/server/level/ServerLevel.java
|
|
+++ b/src/main/java/net/minecraft/server/level/ServerLevel.java
|
|
@@ -193,7 +193,7 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
private final MinecraftServer server;
|
|
public final PrimaryLevelData serverLevelData; // CraftBukkit - type
|
|
final EntityTickList entityTickList;
|
|
- public final PersistentEntitySectionManager<Entity> entityManager;
|
|
+ //public final PersistentEntitySectionManager<Entity> entityManager; // Paper - rewrite chunk system
|
|
private final GameEventDispatcher gameEventDispatcher;
|
|
public boolean noSave;
|
|
private final SleepStatus sleepStatus;
|
|
@@ -318,7 +318,108 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
}
|
|
}
|
|
}
|
|
- // Paper end
|
|
+
|
|
+ // Paper start - rewrite chunk system
|
|
+ public final io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler chunkTaskScheduler;
|
|
+ public final io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController chunkDataControllerNew
|
|
+ = new io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA) {
|
|
+
|
|
+ @Override
|
|
+ public net.minecraft.world.level.chunk.storage.RegionFileStorage getCache() {
|
|
+ return ServerLevel.this.getChunkSource().chunkMap.regionFileCache;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void writeData(int chunkX, int chunkZ, net.minecraft.nbt.CompoundTag compound) throws IOException {
|
|
+ ServerLevel.this.getChunkSource().chunkMap.write(new ChunkPos(chunkX, chunkZ), compound);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public net.minecraft.nbt.CompoundTag readData(int chunkX, int chunkZ) throws IOException {
|
|
+ return ServerLevel.this.getChunkSource().chunkMap.readSync(new ChunkPos(chunkX, chunkZ));
|
|
+ }
|
|
+ };
|
|
+ public final io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController poiDataControllerNew
|
|
+ = new io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA) {
|
|
+
|
|
+ @Override
|
|
+ public net.minecraft.world.level.chunk.storage.RegionFileStorage getCache() {
|
|
+ return ServerLevel.this.getChunkSource().chunkMap.getPoiManager();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void writeData(int chunkX, int chunkZ, net.minecraft.nbt.CompoundTag compound) throws IOException {
|
|
+ ServerLevel.this.getChunkSource().chunkMap.getPoiManager().write(new ChunkPos(chunkX, chunkZ), compound);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public net.minecraft.nbt.CompoundTag readData(int chunkX, int chunkZ) throws IOException {
|
|
+ return ServerLevel.this.getChunkSource().chunkMap.getPoiManager().read(new ChunkPos(chunkX, chunkZ));
|
|
+ }
|
|
+ };
|
|
+ public final io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController entityDataControllerNew
|
|
+ = new io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.ENTITY_DATA) {
|
|
+
|
|
+ @Override
|
|
+ public net.minecraft.world.level.chunk.storage.RegionFileStorage getCache() {
|
|
+ return ServerLevel.this.entityStorage;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void writeData(int chunkX, int chunkZ, net.minecraft.nbt.CompoundTag compound) throws IOException {
|
|
+ ServerLevel.this.writeEntityChunk(chunkX, chunkZ, compound);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public net.minecraft.nbt.CompoundTag readData(int chunkX, int chunkZ) throws IOException {
|
|
+ return ServerLevel.this.readEntityChunk(chunkX, chunkZ);
|
|
+ }
|
|
+ };
|
|
+ private final EntityRegionFileStorage entityStorage;
|
|
+
|
|
+ private static final class EntityRegionFileStorage extends net.minecraft.world.level.chunk.storage.RegionFileStorage {
|
|
+
|
|
+ public EntityRegionFileStorage(Path directory, boolean dsync) {
|
|
+ super(directory, dsync);
|
|
+ }
|
|
+
|
|
+ protected void write(ChunkPos pos, net.minecraft.nbt.CompoundTag nbt) throws IOException {
|
|
+ ChunkPos nbtPos = nbt == null ? null : EntityStorage.readChunkPos(nbt);
|
|
+ if (nbtPos != null && !pos.equals(nbtPos)) {
|
|
+ throw new IllegalArgumentException(
|
|
+ "Entity chunk coordinate and serialized data do not have matching coordinates, trying to serialize coordinate " + pos.toString()
|
|
+ + " but compound says coordinate is " + nbtPos + " for world: " + this
|
|
+ );
|
|
+ }
|
|
+ super.write(pos, nbt);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ private void writeEntityChunk(int chunkX, int chunkZ, net.minecraft.nbt.CompoundTag compound) throws IOException {
|
|
+ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) {
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.scheduleSave(
|
|
+ this, chunkX, chunkZ, compound,
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.ENTITY_DATA);
|
|
+ return;
|
|
+ }
|
|
+ this.entityStorage.write(new ChunkPos(chunkX, chunkZ), compound);
|
|
+ }
|
|
+
|
|
+ private net.minecraft.nbt.CompoundTag readEntityChunk(int chunkX, int chunkZ) throws IOException {
|
|
+ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) {
|
|
+ return io.papermc.paper.chunk.system.io.RegionFileIOThread.loadData(
|
|
+ this, chunkX, chunkZ, io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.ENTITY_DATA,
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.getIOBlockingPriorityForCurrentThread()
|
|
+ );
|
|
+ }
|
|
+ return this.entityStorage.read(new ChunkPos(chunkX, chunkZ));
|
|
+ }
|
|
+
|
|
+ private final io.papermc.paper.chunk.system.entity.EntityLookup entityLookup;
|
|
+ public final io.papermc.paper.chunk.system.entity.EntityLookup getEntityLookup() {
|
|
+ return this.entityLookup;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
|
|
// Add env and gen to constructor, IWorldDataServer -> WorldDataServer
|
|
public ServerLevel(MinecraftServer minecraftserver, Executor executor, LevelStorageSource.LevelStorageAccess convertable_conversionsession, PrimaryLevelData iworlddataserver, ResourceKey<Level> resourcekey, LevelStem worlddimension, ChunkProgressListener worldloadlistener, boolean flag, long i, List<CustomSpawner> list, boolean flag1, org.bukkit.World.Environment env, org.bukkit.generator.ChunkGenerator gen, org.bukkit.generator.BiomeProvider biomeProvider) {
|
|
@@ -360,16 +461,16 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
// CraftBukkit end
|
|
boolean flag2 = minecraftserver.forceSynchronousWrites();
|
|
DataFixer datafixer = minecraftserver.getFixerUpper();
|
|
- EntityPersistentStorage<Entity> entitypersistentstorage = new EntityStorage(this, convertable_conversionsession.getDimensionPath(resourcekey).resolve("entities"), datafixer, flag2, minecraftserver);
|
|
+ this.entityStorage = new EntityRegionFileStorage(convertable_conversionsession.getDimensionPath(resourcekey).resolve("entities"), flag2); // Paper - rewrite chunk system //EntityPersistentStorage<Entity> entitypersistentstorage = new EntityStorage(this, convertable_conversionsession.getDimensionPath(resourcekey).resolve("entities"), datafixer, flag2, minecraftserver);
|
|
|
|
- this.entityManager = new PersistentEntitySectionManager<>(Entity.class, new ServerLevel.EntityCallbacks(), entitypersistentstorage);
|
|
+ // this.entityManager = new PersistentEntitySectionManager<>(Entity.class, new ServerLevel.EntityCallbacks(), entitypersistentstorage, this.entitySliceManager); // Paper // Paper - rewrite chunk system
|
|
StructureTemplateManager structuretemplatemanager = minecraftserver.getStructureManager();
|
|
int j = this.spigotConfig.viewDistance; // Spigot
|
|
int k = this.spigotConfig.simulationDistance; // Spigot
|
|
- PersistentEntitySectionManager persistententitysectionmanager = this.entityManager;
|
|
+ //PersistentEntitySectionManager persistententitysectionmanager = this.entityManager; // Paper - rewrite chunk system
|
|
|
|
- Objects.requireNonNull(this.entityManager);
|
|
- this.chunkSource = new ServerChunkCache(this, convertable_conversionsession, datafixer, structuretemplatemanager, executor, chunkgenerator, j, k, flag2, worldloadlistener, persistententitysectionmanager::updateChunkStatus, () -> {
|
|
+ //Objects.requireNonNull(this.entityManager); // Paper - rewrite chunk system
|
|
+ this.chunkSource = new ServerChunkCache(this, convertable_conversionsession, datafixer, structuretemplatemanager, executor, chunkgenerator, j, k, flag2, worldloadlistener, null, () -> { // Paper - rewrite chunk system
|
|
return minecraftserver.overworld().getDataStorage();
|
|
});
|
|
this.chunkSource.getGeneratorState().ensureStructuresGenerated();
|
|
@@ -399,6 +500,9 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
this.sleepStatus = new SleepStatus();
|
|
this.gameEventDispatcher = new GameEventDispatcher(this);
|
|
this.getCraftServer().addWorld(this.getWorld()); // CraftBukkit
|
|
+
|
|
+ this.chunkTaskScheduler = new io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler(this, io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.workerThreads); // Paper - rewrite chunk system
|
|
+ this.entityLookup = new io.papermc.paper.chunk.system.entity.EntityLookup(this, new EntityCallbacks()); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void setWeatherParameters(int clearDuration, int rainDuration, boolean raining, boolean thundering) {
|
|
@@ -502,7 +606,7 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
gameprofilerfiller.push("checkDespawn");
|
|
entity.checkDespawn();
|
|
gameprofilerfiller.pop();
|
|
- if (this.chunkSource.chunkMap.getDistanceManager().inEntityTickingRange(entity.chunkPosition().toLong())) {
|
|
+ if (true || this.chunkSource.chunkMap.getDistanceManager().inEntityTickingRange(entity.chunkPosition().toLong())) { // Paper - now always true if in the ticking list
|
|
Entity entity1 = entity.getVehicle();
|
|
|
|
if (entity1 != null) {
|
|
@@ -527,13 +631,16 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
}
|
|
|
|
gameprofilerfiller.push("entityManagement");
|
|
- this.entityManager.tick();
|
|
+ //this.entityManager.tick(); // Paper - rewrite chunk system
|
|
gameprofilerfiller.pop();
|
|
}
|
|
|
|
@Override
|
|
public boolean shouldTickBlocksAt(long chunkPos) {
|
|
- return this.chunkSource.chunkMap.getDistanceManager().inBlockTickingRange(chunkPos);
|
|
+ // Paper start - replace player chunk loader system
|
|
+ ChunkHolder holder = this.chunkSource.chunkMap.getVisibleChunkIfPresent(chunkPos);
|
|
+ return holder != null && holder.isTickingReady();
|
|
+ // Paper end - replace player chunk loader system
|
|
}
|
|
|
|
protected void tickTime() {
|
|
@@ -995,6 +1102,11 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
}
|
|
|
|
public void save(@Nullable ProgressListener progressListener, boolean flush, boolean savingDisabled) {
|
|
+ // Paper start - rewrite chunk system - add close param
|
|
+ this.save(progressListener, flush, savingDisabled, false);
|
|
+ }
|
|
+ public void save(@Nullable ProgressListener progressListener, boolean flush, boolean savingDisabled, boolean close) {
|
|
+ // Paper end - rewrite chunk system - add close param
|
|
ServerChunkCache chunkproviderserver = this.getChunkSource();
|
|
|
|
if (!savingDisabled) {
|
|
@@ -1010,16 +1122,13 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
}
|
|
|
|
timings.worldSaveChunks.startTiming(); // Paper
|
|
- chunkproviderserver.save(flush);
|
|
+ if (!close) chunkproviderserver.save(flush); // Paper - rewrite chunk system
|
|
+ if (close) chunkproviderserver.close(true); // Paper - rewrite chunk system
|
|
timings.worldSaveChunks.stopTiming(); // Paper
|
|
}// Paper
|
|
- if (flush) {
|
|
- this.entityManager.saveAll();
|
|
- } else {
|
|
- this.entityManager.autoSave();
|
|
- }
|
|
+ // Paper - rewrite chunk system - entity saving moved into ChunkHolder
|
|
|
|
- }
|
|
+ } else if (close) { chunkproviderserver.close(false); } // Paper - rewrite chunk system
|
|
|
|
// CraftBukkit start - moved from MinecraftServer.saveChunks
|
|
ServerLevel worldserver1 = this;
|
|
@@ -1155,7 +1264,7 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
this.removePlayerImmediately((ServerPlayer) entity, Entity.RemovalReason.DISCARDED);
|
|
}
|
|
|
|
- this.entityManager.addNewEntity(player);
|
|
+ this.entityLookup.addNewEntity(player); // Paper - rewite chunk system
|
|
}
|
|
|
|
// CraftBukkit start
|
|
@@ -1171,7 +1280,7 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
}
|
|
// CraftBukkit end
|
|
|
|
- return this.entityManager.addNewEntity(entity);
|
|
+ return this.entityLookup.addNewEntity(entity); // Paper - rewrite chunk system
|
|
}
|
|
}
|
|
|
|
@@ -1183,10 +1292,10 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
public boolean tryAddFreshEntityWithPassengers(Entity entity, org.bukkit.event.entity.CreatureSpawnEvent.SpawnReason reason) {
|
|
// CraftBukkit end
|
|
Stream<UUID> stream = entity.getSelfAndPassengers().map(Entity::getUUID); // CraftBukkit - decompile error
|
|
- PersistentEntitySectionManager persistententitysectionmanager = this.entityManager;
|
|
+ //PersistentEntitySectionManager persistententitysectionmanager = this.entityManager; // Paper - rewrite chunk system
|
|
|
|
- Objects.requireNonNull(this.entityManager);
|
|
- if (stream.anyMatch(persistententitysectionmanager::isLoaded)) {
|
|
+ //Objects.requireNonNull(this.entityManager); // Paper - rewrite chunk system
|
|
+ if (stream.anyMatch(this.entityLookup::hasEntity)) { // Paper - rewrite chunk system
|
|
return false;
|
|
} else {
|
|
this.addFreshEntityWithPassengers(entity, reason); // CraftBukkit
|
|
@@ -1706,7 +1815,7 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
}
|
|
}
|
|
|
|
- bufferedwriter.write(String.format(Locale.ROOT, "entities: %s\n", this.entityManager.gatherStats()));
|
|
+ bufferedwriter.write(String.format(Locale.ROOT, "entities: %s\n", this.entityLookup.getDebugInfo())); // Paper - rewrite chunk system
|
|
bufferedwriter.write(String.format(Locale.ROOT, "block_entity_tickers: %d\n", this.blockEntityTickers.size()));
|
|
bufferedwriter.write(String.format(Locale.ROOT, "block_ticks: %d\n", this.getBlockTicks().count()));
|
|
bufferedwriter.write(String.format(Locale.ROOT, "fluid_ticks: %d\n", this.getFluidTicks().count()));
|
|
@@ -1755,7 +1864,7 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
BufferedWriter bufferedwriter2 = Files.newBufferedWriter(path1);
|
|
|
|
try {
|
|
- playerchunkmap.dumpChunks(bufferedwriter2);
|
|
+ //playerchunkmap.dumpChunks(bufferedwriter2); // Paper - rewrite chunk system
|
|
} catch (Throwable throwable4) {
|
|
if (bufferedwriter2 != null) {
|
|
try {
|
|
@@ -1776,7 +1885,7 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
BufferedWriter bufferedwriter3 = Files.newBufferedWriter(path2);
|
|
|
|
try {
|
|
- this.entityManager.dumpSections(bufferedwriter3);
|
|
+ //this.entityManager.dumpSections(bufferedwriter3); // Paper - rewrite chunk system
|
|
} catch (Throwable throwable6) {
|
|
if (bufferedwriter3 != null) {
|
|
try {
|
|
@@ -1918,7 +2027,7 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
|
|
@VisibleForTesting
|
|
public String getWatchdogStats() {
|
|
- return String.format(Locale.ROOT, "players: %s, entities: %s [%s], block_entities: %d [%s], block_ticks: %d, fluid_ticks: %d, chunk_source: %s", this.players.size(), this.entityManager.gatherStats(), ServerLevel.getTypeCount(this.entityManager.getEntityGetter().getAll(), (entity) -> {
|
|
+ return String.format(Locale.ROOT, "players: %s, entities: %s [%s], block_entities: %d [%s], block_ticks: %d, fluid_ticks: %d, chunk_source: %s", this.players.size(), this.entityLookup.getDebugInfo(), ServerLevel.getTypeCount(this.entityLookup.getAll(), (entity) -> { // Paper - rewrite chunk system
|
|
return BuiltInRegistries.ENTITY_TYPE.getKey(entity.getType()).toString();
|
|
}), this.blockEntityTickers.size(), ServerLevel.getTypeCount(this.blockEntityTickers, TickingBlockEntity::getType), this.getBlockTicks().count(), this.getFluidTicks().count(), this.gatherChunkSourceStats());
|
|
}
|
|
@@ -1978,15 +2087,15 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
@Override
|
|
public LevelEntityGetter<Entity> getEntities() {
|
|
org.spigotmc.AsyncCatcher.catchOp("Chunk getEntities call"); // Spigot
|
|
- return this.entityManager.getEntityGetter();
|
|
+ return this.entityLookup; // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void addLegacyChunkEntities(Stream<Entity> entities) {
|
|
- this.entityManager.addLegacyChunkEntities(entities);
|
|
+ this.entityLookup.addLegacyChunkEntities(entities.toList()); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void addWorldGenChunkEntities(Stream<Entity> entities) {
|
|
- this.entityManager.addWorldGenChunkEntities(entities);
|
|
+ this.entityLookup.addWorldGenChunkEntities(entities.toList()); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void startTickingChunk(LevelChunk chunk) {
|
|
@@ -2002,34 +2111,49 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
|
@Override
|
|
public void close() throws IOException {
|
|
super.close();
|
|
- this.entityManager.close();
|
|
+ //this.entityManager.close(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
public String gatherChunkSourceStats() {
|
|
String s = this.chunkSource.gatherStats();
|
|
|
|
- return "Chunks[S] W: " + s + " E: " + this.entityManager.gatherStats();
|
|
+ return "Chunks[S] W: " + s + " E: " + this.entityLookup.getDebugInfo(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public boolean areEntitiesLoaded(long chunkPos) {
|
|
- return this.entityManager.areEntitiesLoaded(chunkPos);
|
|
+ // Paper start - rewrite chunk system
|
|
+ return this.getChunkIfLoadedImmediately(ChunkPos.getX(chunkPos), ChunkPos.getZ(chunkPos)) != null;
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
private boolean isPositionTickingWithEntitiesLoaded(long chunkPos) {
|
|
- return this.areEntitiesLoaded(chunkPos) && this.chunkSource.isPositionTicking(chunkPos);
|
|
+ // Paper start - optimize is ticking ready type functions
|
|
+ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.chunkTaskScheduler.chunkHolderManager.getChunkHolder(chunkPos);
|
|
+ // isTicking implies the chunk is loaded, and the chunk is loaded now implies the entities are loaded
|
|
+ return chunkHolder != null && chunkHolder.isTickingReady();
|
|
+ // Paper end
|
|
}
|
|
|
|
public boolean isPositionEntityTicking(BlockPos pos) {
|
|
- return this.entityManager.canPositionTick(pos) && this.chunkSource.chunkMap.getDistanceManager().inEntityTickingRange(ChunkPos.asLong(pos));
|
|
+ // Paper start - rewrite chunk system
|
|
+ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.chunkTaskScheduler.chunkHolderManager.getChunkHolder(io.papermc.paper.util.CoordinateUtils.getChunkKey(pos));
|
|
+ return chunkHolder != null && chunkHolder.isEntityTickingReady();
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
public boolean isNaturalSpawningAllowed(BlockPos pos) {
|
|
- return this.entityManager.canPositionTick(pos);
|
|
+ // Paper start - rewrite chunk system
|
|
+ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.chunkTaskScheduler.chunkHolderManager.getChunkHolder(io.papermc.paper.util.CoordinateUtils.getChunkKey(pos));
|
|
+ return chunkHolder != null && chunkHolder.isEntityTickingReady();
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
public boolean isNaturalSpawningAllowed(ChunkPos pos) {
|
|
- return this.entityManager.canPositionTick(pos);
|
|
+ // Paper start - rewrite chunk system
|
|
+ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.chunkTaskScheduler.chunkHolderManager.getChunkHolder(io.papermc.paper.util.CoordinateUtils.getChunkKey(pos));
|
|
+ return chunkHolder != null && chunkHolder.isEntityTickingReady();
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
diff --git a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
|
|
index 275b7f7dd36a2073a3eb9f89f4c832839e5aa9af..660693c6dc0ef86f4013df980b6d0c11c03e46cd 100644
|
|
--- a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
|
|
+++ b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
|
|
@@ -36,15 +36,14 @@ import net.minecraft.world.level.chunk.ChunkStatus;
|
|
|
|
public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCloseable {
|
|
private static final Logger LOGGER = LogUtils.getLogger();
|
|
- private final ProcessorMailbox<Runnable> taskMailbox;
|
|
- private final ObjectList<Pair<ThreadedLevelLightEngine.TaskType, Runnable>> lightTasks = new ObjectArrayList<>();
|
|
+ // Paper - rewrite chunk system
|
|
private final ChunkMap chunkMap;
|
|
- private final ProcessorHandle<ChunkTaskPriorityQueueSorter.Message<Runnable>> sorterMailbox;
|
|
+ // Paper - rewrite chunk system
|
|
private volatile int taskPerBatch = 5;
|
|
- private final AtomicBoolean scheduled = new AtomicBoolean();
|
|
+ // Paper - rewrite chunk system
|
|
|
|
// Paper start - replace light engine impl
|
|
- protected final ca.spottedleaf.starlight.common.light.StarLightInterface theLightEngine;
|
|
+ public final ca.spottedleaf.starlight.common.light.StarLightInterface theLightEngine;
|
|
public final boolean hasBlockLight;
|
|
public final boolean hasSkyLight;
|
|
// Paper end - replace light engine impl
|
|
@@ -52,8 +51,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
|
|
public ThreadedLevelLightEngine(LightChunkGetter chunkProvider, ChunkMap chunkStorage, boolean hasBlockLight, ProcessorMailbox<Runnable> processor, ProcessorHandle<ChunkTaskPriorityQueueSorter.Message<Runnable>> executor) {
|
|
super(chunkProvider, false, false); // Paper - destroy vanilla light engine state
|
|
this.chunkMap = chunkStorage;
|
|
- this.sorterMailbox = executor;
|
|
- this.taskMailbox = processor;
|
|
+ // Paper - rewrite chunk system
|
|
// Paper start - replace light engine impl
|
|
this.hasBlockLight = true;
|
|
this.hasSkyLight = hasBlockLight; // Nice variable name.
|
|
@@ -97,7 +95,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
|
|
++totalChunks;
|
|
}
|
|
|
|
- this.taskMailbox.tell(() -> {
|
|
+ this.chunkMap.level.chunkTaskScheduler.lightExecutor.queueRunnable(() -> { // Paper - rewrite chunk system
|
|
this.theLightEngine.relightChunks(chunks, (ChunkPos chunkPos) -> {
|
|
chunkLightCallback.accept(chunkPos);
|
|
((java.util.concurrent.Executor)((ServerLevel)this.theLightEngine.getWorld()).getChunkSource().mainThreadProcessor).execute(() -> {
|
|
@@ -269,17 +267,11 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
|
|
}
|
|
|
|
private void addTask(int x, int z, ThreadedLevelLightEngine.TaskType stage, Runnable task) {
|
|
- this.addTask(x, z, this.chunkMap.getChunkQueueLevel(ChunkPos.asLong(x, z)), stage, task);
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
private void addTask(int x, int z, IntSupplier completedLevelSupplier, ThreadedLevelLightEngine.TaskType stage, Runnable task) {
|
|
- this.sorterMailbox.tell(ChunkTaskPriorityQueueSorter.message(() -> {
|
|
- this.lightTasks.add(Pair.of(stage, task));
|
|
- if (this.lightTasks.size() >= this.taskPerBatch) {
|
|
- this.runUpdate();
|
|
- }
|
|
-
|
|
- }, ChunkPos.asLong(x, z), completedLevelSupplier));
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
@@ -337,74 +329,15 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
|
|
}
|
|
});
|
|
}
|
|
- // Paper end - replace light engine impl
|
|
- ChunkPos chunkPos = chunk.getPos();
|
|
- chunk.setLightCorrect(false);
|
|
- this.addTask(chunkPos.x, chunkPos.z, ThreadedLevelLightEngine.TaskType.PRE_UPDATE, Util.name(() -> {
|
|
- LevelChunkSection[] levelChunkSections = chunk.getSections();
|
|
-
|
|
- for(int i = 0; i < chunk.getSectionsCount(); ++i) {
|
|
- LevelChunkSection levelChunkSection = levelChunkSections[i];
|
|
- if (!levelChunkSection.hasOnlyAir()) {
|
|
- int j = this.levelHeightAccessor.getSectionYFromSectionIndex(i);
|
|
- super.updateSectionStatus(SectionPos.of(chunkPos, j), false);
|
|
- }
|
|
- }
|
|
-
|
|
- super.enableLightSources(chunkPos, true);
|
|
- if (!excludeBlocks) {
|
|
- chunk.getLights().forEach((pos) -> {
|
|
- super.onBlockEmissionIncrease(pos, chunk.getLightEmission(pos));
|
|
- });
|
|
- }
|
|
-
|
|
- }, () -> {
|
|
- return "lightChunk " + chunkPos + " " + excludeBlocks;
|
|
- }));
|
|
- return CompletableFuture.supplyAsync(() -> {
|
|
- chunk.setLightCorrect(true);
|
|
- super.retainData(chunkPos, false);
|
|
- this.chunkMap.releaseLightTicket(chunkPos);
|
|
- return chunk;
|
|
- }, (runnable) -> {
|
|
- this.addTask(chunkPos.x, chunkPos.z, ThreadedLevelLightEngine.TaskType.POST_UPDATE, runnable);
|
|
- });
|
|
+ throw new InternalError(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void tryScheduleUpdate() {
|
|
- if (this.hasLightWork() && this.scheduled.compareAndSet(false, true)) { // Paper // Paper - rewrite light engine
|
|
- this.taskMailbox.tell(() -> {
|
|
- this.runUpdate();
|
|
- this.scheduled.set(false);
|
|
- });
|
|
- }
|
|
-
|
|
+ // Paper - rewrite chunk system
|
|
}
|
|
|
|
private void runUpdate() {
|
|
- int i = Math.min(this.lightTasks.size(), this.taskPerBatch);
|
|
- ObjectListIterator<Pair<ThreadedLevelLightEngine.TaskType, Runnable>> objectListIterator = this.lightTasks.iterator();
|
|
-
|
|
- int j;
|
|
- for(j = 0; objectListIterator.hasNext() && j < i; ++j) {
|
|
- Pair<ThreadedLevelLightEngine.TaskType, Runnable> pair = objectListIterator.next();
|
|
- if (pair.getFirst() == ThreadedLevelLightEngine.TaskType.PRE_UPDATE) {
|
|
- pair.getSecond().run();
|
|
- }
|
|
- }
|
|
-
|
|
- objectListIterator.back(j);
|
|
- this.theLightEngine.propagateChanges(); // Paper - rewrite light engine
|
|
-
|
|
- for(int var5 = 0; objectListIterator.hasNext() && var5 < i; ++var5) {
|
|
- Pair<ThreadedLevelLightEngine.TaskType, Runnable> pair2 = objectListIterator.next();
|
|
- if (pair2.getFirst() == ThreadedLevelLightEngine.TaskType.POST_UPDATE) {
|
|
- pair2.getSecond().run();
|
|
- }
|
|
-
|
|
- objectListIterator.remove();
|
|
- }
|
|
-
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void setTaskPerBatch(int taskBatchSize) {
|
|
diff --git a/src/main/java/net/minecraft/server/level/Ticket.java b/src/main/java/net/minecraft/server/level/Ticket.java
|
|
index b346fa94b23d81da7da073f71dd12e672e0f079c..768a2667f950a635a562fa8a0c75b31a3ae9190e 100644
|
|
--- a/src/main/java/net/minecraft/server/level/Ticket.java
|
|
+++ b/src/main/java/net/minecraft/server/level/Ticket.java
|
|
@@ -6,9 +6,12 @@ public final class Ticket<T> implements Comparable<Ticket<?>> {
|
|
private final TicketType<T> type;
|
|
private final int ticketLevel;
|
|
public final T key;
|
|
- private long createdTick;
|
|
+ // Paper start - rewrite chunk system
|
|
+ public final long removalTick;
|
|
|
|
- protected Ticket(TicketType<T> type, int level, T argument) {
|
|
+ public Ticket(TicketType<T> type, int level, T argument, long removalTick) {
|
|
+ this.removalTick = removalTick;
|
|
+ // Paper end - rewrite chunk system
|
|
this.type = type;
|
|
this.ticketLevel = level;
|
|
this.key = argument;
|
|
@@ -44,7 +47,7 @@ public final class Ticket<T> implements Comparable<Ticket<?>> {
|
|
|
|
@Override
|
|
public String toString() {
|
|
- return "Ticket[" + this.type + " " + this.ticketLevel + " (" + this.key + ")] at " + this.createdTick;
|
|
+ return "Ticket[" + this.type + " " + this.ticketLevel + " (" + this.key + ")] to die on " + this.removalTick; // Paper - rewrite chunk system
|
|
}
|
|
|
|
public TicketType<T> getType() {
|
|
@@ -56,11 +59,10 @@ public final class Ticket<T> implements Comparable<Ticket<?>> {
|
|
}
|
|
|
|
protected void setCreatedTick(long tickCreated) {
|
|
- this.createdTick = tickCreated;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
protected boolean timedOut(long currentTick) {
|
|
- long l = this.type.timeout();
|
|
- return l != 0L && currentTick - this.createdTick > l;
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/server/level/TicketType.java b/src/main/java/net/minecraft/server/level/TicketType.java
|
|
index 6051e5f272838ef23276a90e21c2fc821ca155d1..97d1ff2af23bac14e67bca5896843325aaa5bfc1 100644
|
|
--- a/src/main/java/net/minecraft/server/level/TicketType.java
|
|
+++ b/src/main/java/net/minecraft/server/level/TicketType.java
|
|
@@ -8,6 +8,7 @@ import net.minecraft.world.level.ChunkPos;
|
|
|
|
public class TicketType<T> {
|
|
public static final TicketType<Long> FUTURE_AWAIT = create("future_await", Long::compareTo); // Paper
|
|
+ public static final TicketType<Long> ASYNC_LOAD = create("async_load", Long::compareTo); // Paper
|
|
|
|
private final String name;
|
|
private final Comparator<T> comparator;
|
|
@@ -27,6 +28,13 @@ public class TicketType<T> {
|
|
public static final TicketType<Unit> PLUGIN = TicketType.create("plugin", (a, b) -> 0); // CraftBukkit
|
|
public static final TicketType<org.bukkit.plugin.Plugin> PLUGIN_TICKET = TicketType.create("plugin_ticket", (plugin1, plugin2) -> plugin1.getClass().getName().compareTo(plugin2.getClass().getName())); // CraftBukkit
|
|
public static final TicketType<Long> CHUNK_RELIGHT = create("light_update", Long::compareTo); // Paper - ensure chunks stay loaded for lighting
|
|
+ // Paper start - rewrite chunk system
|
|
+ public static final TicketType<Long> CHUNK_LOAD = create("chunk_load", Long::compareTo);
|
|
+ public static final TicketType<Long> STATUS_UPGRADE = create("status_upgrade", Long::compareTo);
|
|
+ public static final TicketType<Long> ENTITY_LOAD = create("entity_load", Long::compareTo);
|
|
+ public static final TicketType<Long> POI_LOAD = create("poi_load", Long::compareTo);
|
|
+ public static final TicketType<Unit> UNLOAD_COOLDOWN = create("unload_cooldown", (u1, u2) -> 0, 5 * 20);
|
|
+ // Paper end - rewrite chunk system
|
|
|
|
public static <T> TicketType<T> create(String name, Comparator<T> argumentComparator) {
|
|
return new TicketType<>(name, argumentComparator, 0L);
|
|
diff --git a/src/main/java/net/minecraft/server/level/WorldGenRegion.java b/src/main/java/net/minecraft/server/level/WorldGenRegion.java
|
|
index e96a0ca47e4701ba187555bd92c968345bc85677..73b96f804079288e9c5fcc11da54e61e89a6782a 100644
|
|
--- a/src/main/java/net/minecraft/server/level/WorldGenRegion.java
|
|
+++ b/src/main/java/net/minecraft/server/level/WorldGenRegion.java
|
|
@@ -504,4 +504,21 @@ public class WorldGenRegion implements WorldGenLevel {
|
|
public long nextSubTickCount() {
|
|
return this.subTickCount.getAndIncrement();
|
|
}
|
|
+
|
|
+ // Paper start
|
|
+ // No-op, this class doesn't provide entity access
|
|
+ @Override
|
|
+ public List<Entity> getHardCollidingEntities(Entity except, AABB box, Predicate<? super Entity> predicate) {
|
|
+ return Collections.emptyList();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void getEntities(Entity except, AABB box, Predicate<? super Entity> predicate, List<Entity> into) {}
|
|
+
|
|
+ @Override
|
|
+ public void getHardCollidingEntities(Entity except, AABB box, Predicate<? super Entity> predicate, List<Entity> into) {}
|
|
+
|
|
+ @Override
|
|
+ public <T> void getEntitiesByClass(Class<? extends T> clazz, Entity except, AABB box, List<? super T> into, Predicate<? super T> predicate) {}
|
|
+ // Paper end
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java
|
|
index e4435962a60cf9c6d833183bd244a2758ff42808..b85545f997447875e737e4d22a8a8dbcf1f8e2c8 100644
|
|
--- a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java
|
|
+++ b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java
|
|
@@ -776,6 +776,13 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic
|
|
this.disconnect(Component.translatable("disconnect.spam"));
|
|
return;
|
|
}
|
|
+ // Paper start
|
|
+ String str = packet.getCommand(); int index = -1;
|
|
+ if (str.length() > 64 && ((index = str.indexOf(' ')) == -1 || index >= 64)) {
|
|
+ server.scheduleOnMain(() -> this.disconnect(Component.translatable("disconnect.spam", new Object[0]))); // Paper
|
|
+ return;
|
|
+ }
|
|
+ // Paper end
|
|
// CraftBukkit end
|
|
StringReader stringreader = new StringReader(packet.getCommand());
|
|
|
|
diff --git a/src/main/java/net/minecraft/server/players/PlayerList.java b/src/main/java/net/minecraft/server/players/PlayerList.java
|
|
index 257b13703166bf953c73c83db8982b412ca96565..da36a7342c5ef34af9ea4330e6cee2880d8d4dc8 100644
|
|
--- a/src/main/java/net/minecraft/server/players/PlayerList.java
|
|
+++ b/src/main/java/net/minecraft/server/players/PlayerList.java
|
|
@@ -251,7 +251,7 @@ public abstract class PlayerList {
|
|
boolean flag1 = gamerules.getBoolean(GameRules.RULE_REDUCEDDEBUGINFO);
|
|
|
|
// Spigot - view distance
|
|
- playerconnection.send(new ClientboundLoginPacket(player.getId(), worlddata.isHardcore(), player.gameMode.getGameModeForPlayer(), player.gameMode.getPreviousGameModeForPlayer(), this.server.levelKeys(), this.synchronizedRegistries, worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), this.getMaxPlayers(), worldserver1.spigotConfig.viewDistance, worldserver1.spigotConfig.simulationDistance, flag1, !flag, worldserver1.isDebug(), worldserver1.isFlat(), player.getLastDeathLocation()));
|
|
+ playerconnection.send(new ClientboundLoginPacket(player.getId(), worlddata.isHardcore(), player.gameMode.getGameModeForPlayer(), player.gameMode.getPreviousGameModeForPlayer(), this.server.levelKeys(), this.synchronizedRegistries, worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), this.getMaxPlayers(), worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance(), worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance(), flag1, !flag, worldserver1.isDebug(), worldserver1.isFlat(), player.getLastDeathLocation())); // Paper - replace old player chunk management
|
|
player.getBukkitEntity().sendSupportedChannels(); // CraftBukkit
|
|
playerconnection.send(new ClientboundUpdateEnabledFeaturesPacket(FeatureFlags.REGISTRY.toNames(worldserver1.enabledFeatures())));
|
|
playerconnection.send(new ClientboundCustomPayloadPacket(ClientboundCustomPayloadPacket.BRAND, (new FriendlyByteBuf(Unpooled.buffer())).writeUtf(this.getServer().getServerModName())));
|
|
@@ -783,8 +783,8 @@ public abstract class PlayerList {
|
|
// CraftBukkit start
|
|
LevelData worlddata = worldserver1.getLevelData();
|
|
entityplayer1.connection.send(new ClientboundRespawnPacket(worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), entityplayer1.gameMode.getGameModeForPlayer(), entityplayer1.gameMode.getPreviousGameModeForPlayer(), worldserver1.isDebug(), worldserver1.isFlat(), (byte) i, entityplayer1.getLastDeathLocation()));
|
|
- entityplayer1.connection.send(new ClientboundSetChunkCacheRadiusPacket(worldserver1.spigotConfig.viewDistance)); // Spigot
|
|
- entityplayer1.connection.send(new ClientboundSetSimulationDistancePacket(worldserver1.spigotConfig.simulationDistance)); // Spigot
|
|
+ entityplayer1.connection.send(new ClientboundSetChunkCacheRadiusPacket(worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance())); // Spigot // Paper - replace old player chunk management
|
|
+ entityplayer1.connection.send(new ClientboundSetSimulationDistancePacket(worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance())); // Spigot // Paper - replace old player chunk management
|
|
entityplayer1.spawnIn(worldserver1);
|
|
entityplayer1.unsetRemoved();
|
|
entityplayer1.connection.teleport(new Location(worldserver1.getWorld(), entityplayer1.getX(), entityplayer1.getY(), entityplayer1.getZ(), entityplayer1.getYRot(), entityplayer1.getXRot()));
|
|
@@ -1281,7 +1281,7 @@ public abstract class PlayerList {
|
|
|
|
public void setViewDistance(int viewDistance) {
|
|
this.viewDistance = viewDistance;
|
|
- this.broadcastAll(new ClientboundSetChunkCacheRadiusPacket(viewDistance));
|
|
+ //this.broadcastAll(new ClientboundSetChunkCacheRadiusPacket(viewDistance)); // Paper - move into setViewDistance
|
|
Iterator iterator = this.server.getAllLevels().iterator();
|
|
|
|
while (iterator.hasNext()) {
|
|
@@ -1296,7 +1296,7 @@ public abstract class PlayerList {
|
|
|
|
public void setSimulationDistance(int simulationDistance) {
|
|
this.simulationDistance = simulationDistance;
|
|
- this.broadcastAll(new ClientboundSetSimulationDistancePacket(simulationDistance));
|
|
+ //this.broadcastAll(new ClientboundSetSimulationDistancePacket(simulationDistance)); // Paper - handled by playerchunkloader
|
|
Iterator iterator = this.server.getAllLevels().iterator();
|
|
|
|
while (iterator.hasNext()) {
|
|
diff --git a/src/main/java/net/minecraft/util/SortedArraySet.java b/src/main/java/net/minecraft/util/SortedArraySet.java
|
|
index ca788f0dcec4a117b410fe8348969e056b138b1e..4f5f2c25e12ee6d977bc98d9118650cfe91e6c0e 100644
|
|
--- a/src/main/java/net/minecraft/util/SortedArraySet.java
|
|
+++ b/src/main/java/net/minecraft/util/SortedArraySet.java
|
|
@@ -22,6 +22,41 @@ public class SortedArraySet<T> extends AbstractSet<T> {
|
|
this.contents = (T[])castRawArray(new Object[initialCapacity]);
|
|
}
|
|
}
|
|
+ // Paper start - optimise removeIf
|
|
+ @Override
|
|
+ public boolean removeIf(java.util.function.Predicate<? super T> filter) {
|
|
+ // prev. impl used an iterator, which could be n^2 and creates garbage
|
|
+ int i = 0, len = this.size;
|
|
+ T[] backingArray = this.contents;
|
|
+
|
|
+ for (;;) {
|
|
+ if (i >= len) {
|
|
+ return false;
|
|
+ }
|
|
+ if (!filter.test(backingArray[i])) {
|
|
+ ++i;
|
|
+ continue;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ // we only want to write back to backingArray if we really need to
|
|
+
|
|
+ int lastIndex = i; // this is where new elements are shifted to
|
|
+
|
|
+ for (; i < len; ++i) {
|
|
+ T curr = backingArray[i];
|
|
+ if (!filter.test(curr)) { // if test throws we're screwed
|
|
+ backingArray[lastIndex++] = curr;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // cleanup end
|
|
+ Arrays.fill(backingArray, lastIndex, len, null);
|
|
+ this.size = lastIndex;
|
|
+ return true;
|
|
+ }
|
|
+ // Paper end - optimise removeIf
|
|
|
|
public static <T extends Comparable<T>> SortedArraySet<T> create() {
|
|
return create(10);
|
|
@@ -110,6 +145,31 @@ public class SortedArraySet<T> extends AbstractSet<T> {
|
|
}
|
|
}
|
|
|
|
+ // Paper start - rewrite chunk system
|
|
+ public T replace(T object) {
|
|
+ int i = this.findIndex(object);
|
|
+ if (i >= 0) {
|
|
+ T old = this.contents[i];
|
|
+ this.contents[i] = object;
|
|
+ return old;
|
|
+ } else {
|
|
+ this.addInternal(object, getInsertionPosition(i));
|
|
+ return object;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public T removeAndGet(T object) {
|
|
+ int i = this.findIndex(object);
|
|
+ if (i >= 0) {
|
|
+ final T ret = this.contents[i];
|
|
+ this.removeInternal(i);
|
|
+ return ret;
|
|
+ } else {
|
|
+ return null;
|
|
+ }
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+
|
|
@Override
|
|
public boolean remove(Object object) {
|
|
int i = this.findIndex((T)object);
|
|
diff --git a/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java b/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java
|
|
index 7de38549f08b9639f1e76256f12e10e8fa658c16..759b125cc1251b9b4f1f443c9f70c482ef5b32f8 100644
|
|
--- a/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java
|
|
+++ b/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java
|
|
@@ -186,7 +186,11 @@ public class WorldUpgrader {
|
|
}
|
|
|
|
WorldUpgrader.LOGGER.error("Error upgrading chunk {}", chunkcoordintpair, throwable);
|
|
+ // Paper start
|
|
+ } catch (IOException e) {
|
|
+ WorldUpgrader.LOGGER.error("Error upgrading chunk {}", chunkcoordintpair, e);
|
|
}
|
|
+ // Paper end
|
|
|
|
if (flag1) {
|
|
++this.converted;
|
|
diff --git a/src/main/java/net/minecraft/world/entity/Entity.java b/src/main/java/net/minecraft/world/entity/Entity.java
|
|
index 23b22543c3d164e3fdf2f262f3e0124636b32fce..0cd735648314872e98e310627247b9ad425ab64d 100644
|
|
--- a/src/main/java/net/minecraft/world/entity/Entity.java
|
|
+++ b/src/main/java/net/minecraft/world/entity/Entity.java
|
|
@@ -319,6 +319,58 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource {
|
|
}
|
|
// Paper end
|
|
|
|
+ // Paper start
|
|
+ /**
|
|
+ * Overriding this field will cause memory leaks.
|
|
+ */
|
|
+ private final boolean hardCollides;
|
|
+
|
|
+ private static final java.util.Map<Class<? extends Entity>, Boolean> cachedOverrides = java.util.Collections.synchronizedMap(new java.util.WeakHashMap<>());
|
|
+ {
|
|
+ /* // Goodbye, broken on reobf...
|
|
+ Boolean hardCollides = cachedOverrides.get(this.getClass());
|
|
+ if (hardCollides == null) {
|
|
+ try {
|
|
+ java.lang.reflect.Method getHardCollisionBoxEntityMethod = Entity.class.getMethod("canCollideWith", Entity.class);
|
|
+ java.lang.reflect.Method hasHardCollisionBoxMethod = Entity.class.getMethod("canBeCollidedWith");
|
|
+ if (!this.getClass().getMethod(hasHardCollisionBoxMethod.getName(), hasHardCollisionBoxMethod.getParameterTypes()).equals(hasHardCollisionBoxMethod)
|
|
+ || !this.getClass().getMethod(getHardCollisionBoxEntityMethod.getName(), getHardCollisionBoxEntityMethod.getParameterTypes()).equals(getHardCollisionBoxEntityMethod)) {
|
|
+ hardCollides = Boolean.TRUE;
|
|
+ } else {
|
|
+ hardCollides = Boolean.FALSE;
|
|
+ }
|
|
+ cachedOverrides.put(this.getClass(), hardCollides);
|
|
+ }
|
|
+ catch (ThreadDeath thr) { throw thr; }
|
|
+ catch (Throwable thr) {
|
|
+ // shouldn't happen, just explode
|
|
+ throw new RuntimeException(thr);
|
|
+ }
|
|
+ } */
|
|
+ this.hardCollides = this instanceof Boat
|
|
+ || this instanceof net.minecraft.world.entity.monster.Shulker
|
|
+ || this instanceof net.minecraft.world.entity.vehicle.AbstractMinecart
|
|
+ || this.shouldHardCollide();
|
|
+ }
|
|
+
|
|
+ // plugins can override
|
|
+ protected boolean shouldHardCollide() {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ public final boolean hardCollides() {
|
|
+ return this.hardCollides;
|
|
+ }
|
|
+
|
|
+ public net.minecraft.server.level.ChunkHolder.FullChunkStatus chunkStatus;
|
|
+
|
|
+ public int sectionX = Integer.MIN_VALUE;
|
|
+ public int sectionY = Integer.MIN_VALUE;
|
|
+ public int sectionZ = Integer.MIN_VALUE;
|
|
+
|
|
+ public boolean updatingSectionStatus = false;
|
|
+ // Paper end
|
|
+
|
|
public Entity(EntityType<?> type, Level world) {
|
|
this.id = Entity.ENTITY_COUNTER.incrementAndGet();
|
|
this.passengers = ImmutableList.of();
|
|
@@ -2121,11 +2173,11 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource {
|
|
return InteractionResult.PASS;
|
|
}
|
|
|
|
- public boolean canCollideWith(Entity other) {
|
|
+ public boolean canCollideWith(Entity other) { // Paper - diff on change, hard colliding entities override this - TODO CHECK ON UPDATE - AbstractMinecart/Boat override
|
|
return other.canBeCollidedWith() && !this.isPassengerOfSameVehicle(other);
|
|
}
|
|
|
|
- public boolean canBeCollidedWith() {
|
|
+ public boolean canBeCollidedWith() { // Paper - diff on change, hard colliding entities override this TODO CHECK ON UPDATE - Boat/Shulker override
|
|
return false;
|
|
}
|
|
|
|
@@ -3389,6 +3441,16 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource {
|
|
};
|
|
}
|
|
|
|
+ // Paper start - rewrite chunk system
|
|
+ public boolean hasAnyPlayerPassengers() {
|
|
+ // copied from below
|
|
+ if (this.passengers.isEmpty()) { return false; }
|
|
+ return this.getIndirectPassengersStream().anyMatch((entity) -> {
|
|
+ return entity instanceof Player;
|
|
+ });
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+
|
|
public boolean hasExactlyOnePlayerPassenger() {
|
|
return this.getIndirectPassengersStream().filter((entity) -> {
|
|
return entity instanceof Player;
|
|
@@ -3697,6 +3759,12 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource {
|
|
}
|
|
|
|
public final void setPosRaw(double x, double y, double z) {
|
|
+ // Paper start - rewrite chunk system
|
|
+ if (this.updatingSectionStatus) {
|
|
+ LOGGER.error("Refusing to update position for entity " + this + " to position " + new Vec3(x, y, z) + " since it is processing a section status update", new Throwable());
|
|
+ return;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
if (this.position.x != x || this.position.y != y || this.position.z != z) {
|
|
this.position = new Vec3(x, y, z);
|
|
int i = Mth.floor(x);
|
|
@@ -3792,6 +3860,13 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource {
|
|
|
|
@Override
|
|
public final void setRemoved(Entity.RemovalReason reason) {
|
|
+ // Paper start - rewrite chunk system
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread(this, "Cannot remove entity off-main");
|
|
+ if (this.updatingSectionStatus) {
|
|
+ LOGGER.warn("Entity " + this + " is currently prevented from being added/removed to world since it is processing section status updates", new Throwable());
|
|
+ return;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
if (this.removalReason == null) {
|
|
this.removalReason = reason;
|
|
}
|
|
@@ -3800,7 +3875,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource {
|
|
this.stopRiding();
|
|
}
|
|
|
|
- this.getPassengers().forEach(Entity::stopRiding);
|
|
+ if (reason != RemovalReason.UNLOADED_TO_CHUNK) this.getPassengers().forEach(Entity::stopRiding); // Paper - chunk system - don't adjust passenger state when unloading, it's just not safe (and messes with our logic in entity chunk unload)
|
|
this.levelCallback.onRemove(reason);
|
|
}
|
|
|
|
@@ -3815,7 +3890,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource {
|
|
|
|
@Override
|
|
public boolean shouldBeSaved() {
|
|
- return this.removalReason != null && !this.removalReason.shouldSave() ? false : (this.isPassenger() ? false : !this.isVehicle() || !this.hasExactlyOnePlayerPassenger());
|
|
+ return this.removalReason != null && !this.removalReason.shouldSave() ? false : (this.isPassenger() ? false : !this.isVehicle() || !this.hasAnyPlayerPassengers()); // Paper - rewrite chunk system - it should check if the entity has ANY player passengers
|
|
}
|
|
|
|
@Override
|
|
diff --git a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java
|
|
index 4569f21643812e73897c9812be684a371df60f74..440e4f35e38d38e5407a6fecf09ab9511a44a670 100644
|
|
--- a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java
|
|
+++ b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java
|
|
@@ -38,12 +38,28 @@ import net.minecraft.world.level.chunk.storage.SectionStorage;
|
|
public class PoiManager extends SectionStorage<PoiSection> {
|
|
public static final int MAX_VILLAGE_DISTANCE = 6;
|
|
public static final int VILLAGE_SECTION_SIZE = 1;
|
|
- private final PoiManager.DistanceTracker distanceTracker;
|
|
- private final LongSet loadedChunks = new LongOpenHashSet();
|
|
+ // Paper start - rewrite chunk system
|
|
+ // the vanilla tracker needs to be replaced because it does not support level removes
|
|
+ public final net.minecraft.server.level.ServerLevel world;
|
|
+ private final io.papermc.paper.util.misc.Delayed26WayDistancePropagator3D villageDistanceTracker = new io.papermc.paper.util.misc.Delayed26WayDistancePropagator3D();
|
|
+ static final int POI_DATA_SOURCE = 7;
|
|
+ public static int convertBetweenLevels(final int level) {
|
|
+ return POI_DATA_SOURCE - level;
|
|
+ }
|
|
+
|
|
+ protected void updateDistanceTracking(long section) {
|
|
+ if (this.isVillageCenter(section)) {
|
|
+ this.villageDistanceTracker.setSource(section, POI_DATA_SOURCE);
|
|
+ } else {
|
|
+ this.villageDistanceTracker.removeSource(section);
|
|
+ }
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+
|
|
|
|
public PoiManager(Path path, DataFixer dataFixer, boolean dsync, RegistryAccess registryManager, LevelHeightAccessor world) {
|
|
super(path, PoiSection::codec, PoiSection::new, dataFixer, DataFixTypes.POI_CHUNK, dsync, registryManager, world);
|
|
- this.distanceTracker = new PoiManager.DistanceTracker();
|
|
+ this.world = (net.minecraft.server.level.ServerLevel)world; // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void add(BlockPos pos, Holder<PoiType> type) {
|
|
@@ -180,8 +196,8 @@ public class PoiManager extends SectionStorage<PoiSection> {
|
|
}
|
|
|
|
public int sectionsToVillage(SectionPos pos) {
|
|
- this.distanceTracker.runAllUpdates();
|
|
- return this.distanceTracker.getLevel(pos.asLong());
|
|
+ this.villageDistanceTracker.propagateUpdates(); // Paper - replace distance tracking util
|
|
+ return convertBetweenLevels(this.villageDistanceTracker.getLevel(io.papermc.paper.util.CoordinateUtils.getChunkSectionKey(pos))); // Paper - replace distance tracking util
|
|
}
|
|
|
|
boolean isVillageCenter(long pos) {
|
|
@@ -195,21 +211,106 @@ public class PoiManager extends SectionStorage<PoiSection> {
|
|
|
|
@Override
|
|
public void tick(BooleanSupplier shouldKeepTicking) {
|
|
- super.tick(shouldKeepTicking);
|
|
- this.distanceTracker.runAllUpdates();
|
|
+ this.villageDistanceTracker.propagateUpdates(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
- protected void setDirty(long pos) {
|
|
- super.setDirty(pos);
|
|
- this.distanceTracker.update(pos, this.distanceTracker.getLevelFromSource(pos), false);
|
|
+ public void setDirty(long pos) {
|
|
+ // Paper start - rewrite chunk system
|
|
+ int chunkX = io.papermc.paper.util.CoordinateUtils.getChunkSectionX(pos);
|
|
+ int chunkZ = io.papermc.paper.util.CoordinateUtils.getChunkSectionZ(pos);
|
|
+ io.papermc.paper.chunk.system.scheduling.ChunkHolderManager manager = this.world.chunkTaskScheduler.chunkHolderManager;
|
|
+ io.papermc.paper.chunk.system.poi.PoiChunk chunk = manager.getPoiChunkIfLoaded(chunkX, chunkZ, false);
|
|
+ if (chunk != null) {
|
|
+ chunk.setDirty(true);
|
|
+ }
|
|
+ this.updateDistanceTracking(pos);
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
protected void onSectionLoad(long pos) {
|
|
- this.distanceTracker.update(pos, this.distanceTracker.getLevelFromSource(pos), false);
|
|
+ this.updateDistanceTracking(pos); // Paper - move to new distance tracking util
|
|
}
|
|
|
|
+ @Override
|
|
+ public Optional<PoiSection> get(long pos) {
|
|
+ int chunkX = io.papermc.paper.util.CoordinateUtils.getChunkSectionX(pos);
|
|
+ int chunkY = io.papermc.paper.util.CoordinateUtils.getChunkSectionY(pos);
|
|
+ int chunkZ = io.papermc.paper.util.CoordinateUtils.getChunkSectionZ(pos);
|
|
+
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Accessing poi chunk off-main");
|
|
+
|
|
+ io.papermc.paper.chunk.system.scheduling.ChunkHolderManager manager = this.world.chunkTaskScheduler.chunkHolderManager;
|
|
+ io.papermc.paper.chunk.system.poi.PoiChunk ret = manager.getPoiChunkIfLoaded(chunkX, chunkZ, true);
|
|
+
|
|
+ return ret == null ? Optional.empty() : ret.getSectionForVanilla(chunkY);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public Optional<PoiSection> getOrLoad(long pos) {
|
|
+ int chunkX = io.papermc.paper.util.CoordinateUtils.getChunkSectionX(pos);
|
|
+ int chunkY = io.papermc.paper.util.CoordinateUtils.getChunkSectionY(pos);
|
|
+ int chunkZ = io.papermc.paper.util.CoordinateUtils.getChunkSectionZ(pos);
|
|
+
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Accessing poi chunk off-main");
|
|
+
|
|
+ io.papermc.paper.chunk.system.scheduling.ChunkHolderManager manager = this.world.chunkTaskScheduler.chunkHolderManager;
|
|
+
|
|
+ if (chunkY >= io.papermc.paper.util.WorldUtil.getMinSection(this.world) &&
|
|
+ chunkY <= io.papermc.paper.util.WorldUtil.getMaxSection(this.world)) {
|
|
+ io.papermc.paper.chunk.system.poi.PoiChunk ret = manager.getPoiChunkIfLoaded(chunkX, chunkZ, true);
|
|
+ if (ret != null) {
|
|
+ return ret.getSectionForVanilla(chunkY);
|
|
+ } else {
|
|
+ return manager.loadPoiChunk(chunkX, chunkZ).getSectionForVanilla(chunkY);
|
|
+ }
|
|
+ }
|
|
+ // retain vanilla behavior: do not load section if out of bounds!
|
|
+ return Optional.empty();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ protected PoiSection getOrCreate(long pos) {
|
|
+ int chunkX = io.papermc.paper.util.CoordinateUtils.getChunkSectionX(pos);
|
|
+ int chunkY = io.papermc.paper.util.CoordinateUtils.getChunkSectionY(pos);
|
|
+ int chunkZ = io.papermc.paper.util.CoordinateUtils.getChunkSectionZ(pos);
|
|
+
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Accessing poi chunk off-main");
|
|
+
|
|
+ io.papermc.paper.chunk.system.scheduling.ChunkHolderManager manager = this.world.chunkTaskScheduler.chunkHolderManager;
|
|
+
|
|
+ io.papermc.paper.chunk.system.poi.PoiChunk ret = manager.getPoiChunkIfLoaded(chunkX, chunkZ, true);
|
|
+ if (ret != null) {
|
|
+ return ret.getOrCreateSection(chunkY);
|
|
+ } else {
|
|
+ return manager.loadPoiChunk(chunkX, chunkZ).getOrCreateSection(chunkY);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void onUnload(long coordinate) { // Paper - rewrite chunk system
|
|
+ int chunkX = io.papermc.paper.util.MCUtil.getCoordinateX(coordinate);
|
|
+ int chunkZ = io.papermc.paper.util.MCUtil.getCoordinateZ(coordinate);
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Unloading poi chunk off-main");
|
|
+ for (int section = this.levelHeightAccessor.getMinSection(); section < this.levelHeightAccessor.getMaxSection(); ++section) {
|
|
+ long sectionPos = SectionPos.asLong(chunkX, section, chunkZ);
|
|
+ this.updateDistanceTracking(sectionPos);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ public void loadInPoiChunk(io.papermc.paper.chunk.system.poi.PoiChunk poiChunk) {
|
|
+ int chunkX = poiChunk.chunkX;
|
|
+ int chunkZ = poiChunk.chunkZ;
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Loading poi chunk off-main");
|
|
+ for (int sectionY = this.levelHeightAccessor.getMinSection(); sectionY < this.levelHeightAccessor.getMaxSection(); ++sectionY) {
|
|
+ PoiSection section = poiChunk.getSection(sectionY);
|
|
+ if (section != null && !section.isEmpty()) {
|
|
+ this.onSectionLoad(SectionPos.asLong(chunkX, sectionY, chunkZ));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+
|
|
public void checkConsistencyWithBlocks(ChunkPos chunkPos, LevelChunkSection chunkSection) {
|
|
SectionPos sectionPos = SectionPos.of(chunkPos, SectionPos.blockToSectionCoord(chunkSection.bottomBlockY()));
|
|
Util.ifElse(this.getOrLoad(sectionPos.asLong()), (poiSet) -> {
|
|
@@ -249,7 +350,7 @@ public class PoiManager extends SectionStorage<PoiSection> {
|
|
}).map((pair) -> {
|
|
return pair.getFirst().chunk();
|
|
}).filter((chunkPos) -> {
|
|
- return this.loadedChunks.add(chunkPos.toLong());
|
|
+ return true; // Paper - rewrite chunk system
|
|
}).forEach((chunkPos) -> {
|
|
world.getChunk(chunkPos.x, chunkPos.z, ChunkStatus.EMPTY);
|
|
});
|
|
@@ -265,7 +366,7 @@ public class PoiManager extends SectionStorage<PoiSection> {
|
|
|
|
@Override
|
|
protected int getLevelFromSource(long id) {
|
|
- return PoiManager.this.isVillageCenter(id) ? 0 : 7;
|
|
+ return PoiManager.this.isVillageCenter(id) ? 0 : 7; // Paper - rewrite chunk system - diff on change, this specifies the source level to use for distance tracking
|
|
}
|
|
|
|
@Override
|
|
@@ -288,6 +389,35 @@ public class PoiManager extends SectionStorage<PoiSection> {
|
|
}
|
|
}
|
|
|
|
+ // Paper start - Asynchronous chunk io
|
|
+ @javax.annotation.Nullable
|
|
+ @Override
|
|
+ public net.minecraft.nbt.CompoundTag read(ChunkPos chunkcoordintpair) throws java.io.IOException {
|
|
+ // Paper start - rewrite chunk system
|
|
+ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) {
|
|
+ return io.papermc.paper.chunk.system.io.RegionFileIOThread.loadData(
|
|
+ this.world, chunkcoordintpair.x, chunkcoordintpair.z, io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA,
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.getIOBlockingPriorityForCurrentThread()
|
|
+ );
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+ return super.read(chunkcoordintpair);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void write(ChunkPos chunkcoordintpair, net.minecraft.nbt.CompoundTag nbttagcompound) throws java.io.IOException {
|
|
+ // Paper start - rewrite chunk system
|
|
+ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) {
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.scheduleSave(
|
|
+ this.world, chunkcoordintpair.x, chunkcoordintpair.z, nbttagcompound,
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA);
|
|
+ return;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+ super.write(chunkcoordintpair, nbttagcompound);
|
|
+ }
|
|
+ // Paper end
|
|
+
|
|
public static enum Occupancy {
|
|
HAS_SPACE(PoiRecord::hasSpace),
|
|
IS_OCCUPIED(PoiRecord::isOccupied),
|
|
diff --git a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiSection.java b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiSection.java
|
|
index 795a02941d7cecb58ec45b5e79c8d510ff21163a..3fc17817906876e83f040f908b8b1ba6cfa37b8b 100644
|
|
--- a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiSection.java
|
|
+++ b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiSection.java
|
|
@@ -29,6 +29,7 @@ public class PoiSection {
|
|
private final Map<Holder<PoiType>, Set<PoiRecord>> byType = Maps.newHashMap();
|
|
private final Runnable setDirty;
|
|
private boolean isValid;
|
|
+ public final Optional<PoiSection> noAllocateOptional = Optional.of(this); // Paper - rewrite chunk system
|
|
|
|
public static Codec<PoiSection> codec(Runnable updateListener) {
|
|
return RecordCodecBuilder.<PoiSection>create((instance) -> {
|
|
@@ -46,6 +47,12 @@ public class PoiSection {
|
|
this(updateListener, true, ImmutableList.of());
|
|
}
|
|
|
|
+ // Paper start - isEmpty
|
|
+ public boolean isEmpty() {
|
|
+ return this.isValid && this.records.isEmpty() && this.byType.isEmpty();
|
|
+ }
|
|
+ // Paper end
|
|
+
|
|
private PoiSection(Runnable updateListener, boolean valid, List<PoiRecord> pois) {
|
|
this.setDirty = updateListener;
|
|
this.isValid = valid;
|
|
diff --git a/src/main/java/net/minecraft/world/level/EntityGetter.java b/src/main/java/net/minecraft/world/level/EntityGetter.java
|
|
index 127c4ebedb94631ceac92dbdcd465e904217d715..be6e3e21ad62da01e5e2dd78e300cbc8efdbeb42 100644
|
|
--- a/src/main/java/net/minecraft/world/level/EntityGetter.java
|
|
+++ b/src/main/java/net/minecraft/world/level/EntityGetter.java
|
|
@@ -18,6 +18,18 @@ import net.minecraft.world.phys.shapes.Shapes;
|
|
import net.minecraft.world.phys.shapes.VoxelShape;
|
|
|
|
public interface EntityGetter {
|
|
+
|
|
+ // Paper start
|
|
+ List<Entity> getHardCollidingEntities(Entity except, AABB box, Predicate<? super Entity> predicate);
|
|
+
|
|
+ void getEntities(Entity except, AABB box, Predicate<? super Entity> predicate, List<Entity> into);
|
|
+
|
|
+ void getHardCollidingEntities(Entity except, AABB box, Predicate<? super Entity> predicate, List<Entity> into);
|
|
+
|
|
+ <T> void getEntitiesByClass(Class<? extends T> clazz, Entity except, final AABB box, List<? super T> into,
|
|
+ Predicate<? super T> predicate);
|
|
+ // Paper end
|
|
+
|
|
List<Entity> getEntities(@Nullable Entity except, AABB box, Predicate<? super Entity> predicate);
|
|
|
|
<T extends Entity> List<T> getEntities(EntityTypeTest<Entity, T> filter, AABB box, Predicate<? super T> predicate);
|
|
diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java
|
|
index 0640df782cff1aec27e1915f726c89275edeec69..bcbbb266302cbd1ac3b0b42fac58d09737357dd3 100644
|
|
--- a/src/main/java/net/minecraft/world/level/Level.java
|
|
+++ b/src/main/java/net/minecraft/world/level/Level.java
|
|
@@ -453,6 +453,11 @@ public abstract class Level implements LevelAccessor, AutoCloseable {
|
|
|
|
if ((i & 2) != 0 && (!this.isClientSide || (i & 4) == 0) && (this.isClientSide || chunk == null || (chunk.getFullStatus() != null && chunk.getFullStatus().isOrAfter(ChunkHolder.FullChunkStatus.TICKING)))) { // allow chunk to be null here as chunk.isReady() is false when we send our notification during block placement
|
|
this.sendBlockUpdated(blockposition, iblockdata1, iblockdata, i);
|
|
+ // Paper start - per player view distance - allow block updates for non-ticking chunks in player view distance
|
|
+ // if copied from above
|
|
+ } else if ((i & 2) != 0 && (!this.isClientSide || (i & 4) == 0) && (this.isClientSide || chunk == null || ((ServerLevel)this).getChunkSource().chunkMap.playerChunkManager.broadcastMap.getObjectsInRange(io.papermc.paper.util.MCUtil.getCoordinateKey(blockposition)) != null)) { // Paper - replace old player chunk management
|
|
+ ((ServerLevel)this).getChunkSource().blockChanged(blockposition);
|
|
+ // Paper end - per player view distance
|
|
}
|
|
|
|
if ((i & 1) != 0) {
|
|
@@ -805,7 +810,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable {
|
|
return this.capturedTileEntities.get(blockposition);
|
|
}
|
|
// CraftBukkit end
|
|
- return this.isOutsideBuildHeight(blockposition) ? null : (!this.isClientSide && Thread.currentThread() != this.thread ? null : this.getChunkAt(blockposition).getBlockEntity(blockposition, LevelChunk.EntityCreationType.IMMEDIATE));
|
|
+ return this.isOutsideBuildHeight(blockposition) ? null : (!this.isClientSide && !io.papermc.paper.util.TickThread.isTickThread() ? null : this.getChunkAt(blockposition).getBlockEntity(blockposition, LevelChunk.EntityCreationType.IMMEDIATE)); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void setBlockEntity(BlockEntity blockEntity) {
|
|
@@ -896,26 +901,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable {
|
|
public List<Entity> getEntities(@Nullable Entity except, AABB box, Predicate<? super Entity> predicate) {
|
|
this.getProfiler().incrementCounter("getEntities");
|
|
List<Entity> list = Lists.newArrayList();
|
|
-
|
|
- this.getEntities().get(box, (entity1) -> {
|
|
- if (entity1 != except && predicate.test(entity1)) {
|
|
- list.add(entity1);
|
|
- }
|
|
-
|
|
- if (entity1 instanceof EnderDragon) {
|
|
- EnderDragonPart[] aentitycomplexpart = ((EnderDragon) entity1).getSubEntities();
|
|
- int i = aentitycomplexpart.length;
|
|
-
|
|
- for (int j = 0; j < i; ++j) {
|
|
- EnderDragonPart entitycomplexpart = aentitycomplexpart[j];
|
|
-
|
|
- if (entity1 != except && predicate.test(entitycomplexpart)) {
|
|
- list.add(entitycomplexpart);
|
|
- }
|
|
- }
|
|
- }
|
|
-
|
|
- });
|
|
+ ((ServerLevel)this).getEntityLookup().getEntities(except, box, list, predicate); // Paper - optimise this call
|
|
return list;
|
|
}
|
|
|
|
@@ -933,34 +919,23 @@ public abstract class Level implements LevelAccessor, AutoCloseable {
|
|
|
|
public <T extends Entity> void getEntities(EntityTypeTest<Entity, T> filter, AABB box, Predicate<? super T> predicate, List<? super T> result, int limit) {
|
|
this.getProfiler().incrementCounter("getEntities");
|
|
- this.getEntities().get(filter, box, (entity) -> {
|
|
- if (predicate.test(entity)) {
|
|
- result.add(entity);
|
|
- if (result.size() >= limit) {
|
|
- return AbortableIterationConsumer.Continuation.ABORT;
|
|
- }
|
|
- }
|
|
-
|
|
- if (entity instanceof EnderDragon) {
|
|
- EnderDragon entityenderdragon = (EnderDragon) entity;
|
|
- EnderDragonPart[] aentitycomplexpart = entityenderdragon.getSubEntities();
|
|
- int j = aentitycomplexpart.length;
|
|
-
|
|
- for (int k = 0; k < j; ++k) {
|
|
- EnderDragonPart entitycomplexpart = aentitycomplexpart[k];
|
|
- T t0 = filter.tryCast(entitycomplexpart); // CraftBukkit - decompile error
|
|
-
|
|
- if (t0 != null && predicate.test(t0)) {
|
|
- result.add(t0);
|
|
- if (result.size() >= limit) {
|
|
- return AbortableIterationConsumer.Continuation.ABORT;
|
|
- }
|
|
- }
|
|
- }
|
|
+ // Paper start - optimise this call
|
|
+ //TODO use limit
|
|
+ if (filter instanceof net.minecraft.world.entity.EntityType entityTypeTest) {
|
|
+ ((ServerLevel) this).getEntityLookup().getEntities(entityTypeTest, box, result, predicate);
|
|
+ } else {
|
|
+ Predicate<? super T> test = (obj) -> {
|
|
+ return filter.tryCast(obj) != null;
|
|
+ };
|
|
+ predicate = predicate == null ? test : test.and((Predicate) predicate);
|
|
+ Class base;
|
|
+ if (filter == null || (base = filter.getBaseClass()) == null || base == Entity.class) {
|
|
+ ((ServerLevel) this).getEntityLookup().getEntities((Entity) null, box, (List) result, (Predicate)predicate);
|
|
+ } else {
|
|
+ ((ServerLevel) this).getEntityLookup().getEntities(base, null, box, (List) result, (Predicate)predicate); // Paper - optimise this call
|
|
}
|
|
-
|
|
- return AbortableIterationConsumer.Continuation.CONTINUE;
|
|
- });
|
|
+ }
|
|
+ // Paper end - optimise this call
|
|
}
|
|
|
|
@Nullable
|
|
@@ -1293,4 +1268,45 @@ public abstract class Level implements LevelAccessor, AutoCloseable {
|
|
|
|
private ExplosionInteraction() {}
|
|
}
|
|
+ // Paper start
|
|
+ //protected final io.papermc.paper.world.EntitySliceManager entitySliceManager; // Paper - rewrite chunk system
|
|
+
|
|
+ public org.bukkit.entity.Entity[] getChunkEntities(int chunkX, int chunkZ) {
|
|
+ io.papermc.paper.world.ChunkEntitySlices slices = ((ServerLevel)this).getEntityLookup().getChunk(chunkX, chunkZ);
|
|
+ if (slices == null) {
|
|
+ return new org.bukkit.entity.Entity[0];
|
|
+ }
|
|
+ return slices.getChunkEntities();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public List<Entity> getHardCollidingEntities(Entity except, AABB box, Predicate<? super Entity> predicate) {
|
|
+ List<Entity> ret = new java.util.ArrayList<>();
|
|
+ ((ServerLevel)this).getEntityLookup().getHardCollidingEntities(except, box, ret, predicate);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void getEntities(Entity except, AABB box, Predicate<? super Entity> predicate, List<Entity> into) {
|
|
+ ((ServerLevel)this).getEntityLookup().getEntities(except, box, into, predicate);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void getHardCollidingEntities(Entity except, AABB box, Predicate<? super Entity> predicate, List<Entity> into) {
|
|
+ ((ServerLevel)this).getEntityLookup().getHardCollidingEntities(except, box, into, predicate);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public <T> void getEntitiesByClass(Class<? extends T> clazz, Entity except, final AABB box, List<? super T> into,
|
|
+ Predicate<? super T> predicate) {
|
|
+ ((ServerLevel)this).getEntityLookup().getEntities((Class)clazz, except, box, (List)into, (Predicate)predicate);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public <T extends Entity> List<T> getEntitiesOfClass(Class<T> entityClass, AABB box, Predicate<? super T> predicate) {
|
|
+ List<T> ret = new java.util.ArrayList<>();
|
|
+ ((ServerLevel)this).getEntityLookup().getEntities(entityClass, null, box, ret, predicate);
|
|
+ return ret;
|
|
+ }
|
|
+ // Paper end
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java b/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java
|
|
index ab358cfc606ad24c8f3a49da4294ecc6a1d22630..2d89c415fe4e76bff51374d50f1a7efbaace134e 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java
|
|
@@ -114,7 +114,7 @@ public abstract class ChunkGenerator {
|
|
return CompletableFuture.supplyAsync(Util.wrapThreadWithTaskName("init_biomes", () -> {
|
|
chunk.fillBiomesFromNoise(this.biomeSource, noiseConfig.sampler());
|
|
return chunk;
|
|
- }), Util.backgroundExecutor());
|
|
+ }), executor); // Paper - run with supplied executor
|
|
}
|
|
|
|
public abstract void applyCarvers(WorldGenRegion chunkRegion, long seed, RandomState noiseConfig, BiomeManager biomeAccess, StructureManager structureAccessor, ChunkAccess chunk, GenerationStep.Carving carverStep);
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java b/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java
|
|
index e2278ed457a7342d0d1b1a5fc1b5bdef6358816b..71df7c590e31932f2b8fc26a2afaaa54f52674ac 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java
|
|
@@ -30,6 +30,30 @@ import net.minecraft.world.level.levelgen.structure.templatesystem.StructureTemp
|
|
|
|
public class ChunkStatus {
|
|
|
|
+ // Paper start - rewrite chunk system
|
|
+ public boolean isParallelCapable; // Paper
|
|
+ public int writeRadius = -1;
|
|
+ public int loadRange = 0;
|
|
+
|
|
+ protected static final java.util.List<ChunkStatus> statuses = new java.util.ArrayList<>();
|
|
+
|
|
+ private ChunkStatus nextStatus;
|
|
+
|
|
+ public final ChunkStatus getNextStatus() {
|
|
+ return this.nextStatus;
|
|
+ }
|
|
+
|
|
+ public final boolean isEmptyLoadStatus() {
|
|
+ return this.loadingTask == PASSTHROUGH_LOAD_TASK;
|
|
+ }
|
|
+
|
|
+ public final boolean isEmptyGenStatus() {
|
|
+ return this == ChunkStatus.EMPTY || this == ChunkStatus.HEIGHTMAPS || this == ChunkStatus.LIQUID_CARVERS;
|
|
+ }
|
|
+
|
|
+ public final java.util.concurrent.atomic.AtomicBoolean warnedAboutNoImmediateComplete = new java.util.concurrent.atomic.AtomicBoolean();
|
|
+ // Paper end - rewrite chunk system
|
|
+
|
|
public static final int MAX_STRUCTURE_DISTANCE = 8;
|
|
private static final EnumSet<Heightmap.Types> PRE_FEATURES = EnumSet.of(Heightmap.Types.OCEAN_FLOOR_WG, Heightmap.Types.WORLD_SURFACE_WG);
|
|
public static final EnumSet<Heightmap.Types> POST_FEATURES = EnumSet.of(Heightmap.Types.OCEAN_FLOOR, Heightmap.Types.WORLD_SURFACE, Heightmap.Types.MOTION_BLOCKING, Heightmap.Types.MOTION_BLOCKING_NO_LEAVES);
|
|
@@ -151,10 +175,8 @@ public class ChunkStatus {
|
|
protochunk.setStatus(chunkstatus);
|
|
}
|
|
|
|
- return lightenginethreaded.retainData(ichunkaccess).thenApply(Either::left);
|
|
- }, (chunkstatus, worldserver, structuretemplatemanager, lightenginethreaded, function, ichunkaccess) -> {
|
|
- return lightenginethreaded.retainData(ichunkaccess).thenApply(Either::left);
|
|
- });
|
|
+ return CompletableFuture.completedFuture(Either.left(ichunkaccess)); // Paper - rewrite chunk system
|
|
+ }); // Paper - rewrite chunk system
|
|
public static final ChunkStatus LIGHT = ChunkStatus.register("light", ChunkStatus.FEATURES, 1, ChunkStatus.POST_FEATURES, ChunkStatus.ChunkType.PROTOCHUNK, (chunkstatus, executor, worldserver, chunkgenerator, structuretemplatemanager, lightenginethreaded, function, list, ichunkaccess, flag) -> {
|
|
return ChunkStatus.lightChunk(chunkstatus, lightenginethreaded, ichunkaccess);
|
|
}, (chunkstatus, worldserver, structuretemplatemanager, lightenginethreaded, function, ichunkaccess) -> {
|
|
@@ -256,6 +278,13 @@ public class ChunkStatus {
|
|
this.chunkType = chunkType;
|
|
this.heightmapsAfter = heightMapTypes;
|
|
this.index = previous == null ? 0 : previous.getIndex() + 1;
|
|
+ // Paper start
|
|
+ this.nextStatus = this;
|
|
+ if (statuses.size() > 0) {
|
|
+ statuses.get(statuses.size() - 1).nextStatus = this;
|
|
+ }
|
|
+ statuses.add(this);
|
|
+ // Paper end
|
|
}
|
|
|
|
public int getIndex() {
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
|
|
index be181106a223f6bb824ddf58690674617c6dacb5..8c2b562e46a35369389da453012b7af570145380 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java
|
|
@@ -183,6 +183,43 @@ public class LevelChunk extends ChunkAccess {
|
|
|
|
protected void onNeighbourChange(final long bitsetBefore, final long bitsetAfter) {
|
|
|
|
+ // Paper start - no-tick view distance
|
|
+ ServerChunkCache chunkProviderServer = ((ServerLevel)this.level).getChunkSource();
|
|
+ net.minecraft.server.level.ChunkMap chunkMap = chunkProviderServer.chunkMap;
|
|
+ // this code handles the addition of ticking tickets - the distance map handles the removal
|
|
+ if (!areNeighboursLoaded(bitsetBefore, 2) && areNeighboursLoaded(bitsetAfter, 2)) {
|
|
+ if (chunkMap.playerChunkManager.tickMap.getObjectsInRange(this.coordinateKey) != null) { // Paper - replace old player chunk loading system
|
|
+ // now we're ready for entity ticking
|
|
+ chunkProviderServer.mainThreadProcessor.execute(() -> {
|
|
+ // double check that this condition still holds.
|
|
+ if (LevelChunk.this.areNeighboursLoaded(2) && chunkMap.playerChunkManager.tickMap.getObjectsInRange(LevelChunk.this.coordinateKey) != null) { // Paper - replace old player chunk loading system
|
|
+ chunkMap.playerChunkManager.onChunkPlayerTickReady(this.chunkPos.x, this.chunkPos.z); // Paper - replace old player chunk
|
|
+ chunkProviderServer.addTicketAtLevel(net.minecraft.server.level.TicketType.PLAYER, LevelChunk.this.chunkPos, 31, LevelChunk.this.chunkPos); // 31 -> entity ticking, TODO check on update
|
|
+ }
|
|
+ });
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // this code handles the chunk sending
|
|
+ if (!areNeighboursLoaded(bitsetBefore, 1) && areNeighboursLoaded(bitsetAfter, 1)) {
|
|
+ // Paper start - replace old player chunk loading system
|
|
+ if (chunkMap.playerChunkManager.isChunkNearPlayers(this.chunkPos.x, this.chunkPos.z)) {
|
|
+ // the post processing is expensive, so we don't want to run it unless we're actually near
|
|
+ // a player.
|
|
+ chunkProviderServer.mainThreadProcessor.execute(() -> {
|
|
+ if (!LevelChunk.this.areNeighboursLoaded(1)) {
|
|
+ return;
|
|
+ }
|
|
+ LevelChunk.this.postProcessGeneration();
|
|
+ if (!LevelChunk.this.areNeighboursLoaded(1)) {
|
|
+ return;
|
|
+ }
|
|
+ chunkMap.playerChunkManager.onChunkSendReady(this.chunkPos.x, this.chunkPos.z);
|
|
+ });
|
|
+ }
|
|
+ // Paper end - replace old player chunk loading system
|
|
+ }
|
|
+ // Paper end - no-tick view distance
|
|
}
|
|
|
|
public final boolean isAnyNeighborsLoaded() {
|
|
@@ -660,9 +697,26 @@ public class LevelChunk extends ChunkAccess {
|
|
|
|
}
|
|
|
|
- // CraftBukkit start
|
|
- public void loadCallback() {
|
|
- // Paper start - neighbour cache
|
|
+ // Paper start - new load callbacks
|
|
+ private io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder;
|
|
+ public io.papermc.paper.chunk.system.scheduling.NewChunkHolder getChunkHolder() {
|
|
+ return this.chunkHolder;
|
|
+ }
|
|
+
|
|
+ public void setChunkHolder(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) {
|
|
+ if (chunkHolder == null) {
|
|
+ throw new NullPointerException("Chunkholder cannot be null");
|
|
+ }
|
|
+ if (this.chunkHolder != null) {
|
|
+ throw new IllegalStateException("Already have chunkholder: " + this.chunkHolder + ", cannot replace with " + chunkHolder);
|
|
+ }
|
|
+ this.chunkHolder = chunkHolder;
|
|
+ this.playerChunk = chunkHolder.vanillaChunkHolder;
|
|
+ }
|
|
+
|
|
+ /* Note: We skip the light neighbour chunk loading done for the vanilla full chunk */
|
|
+ /* Starlight does not need these chunks for lighting purposes because of edge checks */
|
|
+ public void pushChunkIntoLoadedMap() {
|
|
int chunkX = this.chunkPos.x;
|
|
int chunkZ = this.chunkPos.z;
|
|
ServerChunkCache chunkProvider = this.level.getChunkSource();
|
|
@@ -677,10 +731,56 @@ public class LevelChunk extends ChunkAccess {
|
|
}
|
|
}
|
|
this.setNeighbourLoaded(0, 0, this);
|
|
+ this.level.getChunkSource().addLoadedChunk(this);
|
|
+ }
|
|
+
|
|
+ public void onChunkLoad(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) {
|
|
+ // figure out how this should interface with:
|
|
+ // the entity chunk load event // -> moved to the FULL status
|
|
+ // the chunk load event // -> stays here
|
|
+ // any entity add to world events // -> in FULL status
|
|
+ this.loadCallback();
|
|
+ io.papermc.paper.chunk.system.ChunkSystem.onChunkBorder(this, chunkHolder.vanillaChunkHolder);
|
|
+ }
|
|
+
|
|
+ public void onChunkUnload(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) {
|
|
+ // figure out how this should interface with:
|
|
+ // the entity chunk load event // -> moved to chunk unload to disk (not written yet)
|
|
+ // the chunk load event // -> stays here
|
|
+ // any entity add to world events // -> goes into the unload logic, it will completely explode
|
|
+ // etc later
|
|
+ this.unloadCallback();
|
|
+ io.papermc.paper.chunk.system.ChunkSystem.onChunkNotBorder(this, chunkHolder.vanillaChunkHolder);
|
|
+ }
|
|
+
|
|
+ public void onChunkTicking(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) {
|
|
+ this.postProcessGeneration();
|
|
+ this.level.startTickingChunk(this);
|
|
+ io.papermc.paper.chunk.system.ChunkSystem.onChunkTicking(this, chunkHolder.vanillaChunkHolder);
|
|
+ }
|
|
+
|
|
+ public void onChunkNotTicking(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) {
|
|
+ io.papermc.paper.chunk.system.ChunkSystem.onChunkNotTicking(this, chunkHolder.vanillaChunkHolder);
|
|
+ }
|
|
+
|
|
+ public void onChunkEntityTicking(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) {
|
|
+ io.papermc.paper.chunk.system.ChunkSystem.onChunkEntityTicking(this, chunkHolder.vanillaChunkHolder);
|
|
+ }
|
|
+
|
|
+ public void onChunkNotEntityTicking(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) {
|
|
+ io.papermc.paper.chunk.system.ChunkSystem.onChunkNotEntityTicking(this, chunkHolder.vanillaChunkHolder);
|
|
+ }
|
|
+ // Paper end - new load callbacks
|
|
+
|
|
+ // CraftBukkit start
|
|
+ public void loadCallback() {
|
|
+ if (this.loadedTicketLevel) { LOGGER.error("Double calling chunk load!", new Throwable()); } // Paper
|
|
+ // Paper - rewrite chunk system - move into separate callback
|
|
this.loadedTicketLevel = true;
|
|
- // Paper end - neighbour cache
|
|
+ // Paper - rewrite chunk system - move into separate callback
|
|
org.bukkit.Server server = this.level.getCraftServer();
|
|
- this.level.getChunkSource().addLoadedChunk(this); // Paper
|
|
+ // Paper - rewrite chunk system - move into separate callback
|
|
+ ((ServerLevel)this.level).getChunkSource().chunkMap.playerChunkManager.onChunkLoad(this.chunkPos.x, this.chunkPos.z); // Paper - rewrite player chunk management
|
|
if (server != null) {
|
|
/*
|
|
* If it's a new world, the first few chunks are generated inside
|
|
@@ -688,6 +788,7 @@ public class LevelChunk extends ChunkAccess {
|
|
* no way of creating a CraftWorld/CraftServer at that point.
|
|
*/
|
|
server.getPluginManager().callEvent(new org.bukkit.event.world.ChunkLoadEvent(this.bukkitChunk, this.needsDecoration));
|
|
+ this.chunkHolder.getEntityChunk().callEntitiesLoadEvent(); // Paper - rewrite chunk system
|
|
|
|
if (this.needsDecoration) {
|
|
try (co.aikar.timings.Timing ignored = this.level.timings.chunkLoadPopulate.startTiming()) { // Paper
|
|
@@ -716,8 +817,10 @@ public class LevelChunk extends ChunkAccess {
|
|
}
|
|
|
|
public void unloadCallback() {
|
|
+ if (!this.loadedTicketLevel) { LOGGER.error("Double calling chunk unload!", new Throwable()); } // Paper
|
|
org.bukkit.Server server = this.level.getCraftServer();
|
|
- org.bukkit.event.world.ChunkUnloadEvent unloadEvent = new org.bukkit.event.world.ChunkUnloadEvent(this.bukkitChunk, this.isUnsaved());
|
|
+ this.chunkHolder.getEntityChunk().callEntitiesUnloadEvent(); // Paper - rewrite chunk system
|
|
+ org.bukkit.event.world.ChunkUnloadEvent unloadEvent = new org.bukkit.event.world.ChunkUnloadEvent(this.bukkitChunk, true); // Paper - rewrite chunk system - force save to true so that mustNotSave is correctly set below
|
|
server.getPluginManager().callEvent(unloadEvent);
|
|
// note: saving can be prevented, but not forced if no saving is actually required
|
|
this.mustNotSave = !unloadEvent.isSaveChunk();
|
|
@@ -739,9 +842,26 @@ public class LevelChunk extends ChunkAccess {
|
|
// Paper end
|
|
}
|
|
|
|
+ // Paper start - add dirty system to tick lists
|
|
+ @Override
|
|
+ public void setUnsaved(boolean needsSaving) {
|
|
+ if (!needsSaving) {
|
|
+ this.blockTicks.clearDirty();
|
|
+ this.fluidTicks.clearDirty();
|
|
+ }
|
|
+ super.setUnsaved(needsSaving);
|
|
+ }
|
|
+ // Paper end - add dirty system to tick lists
|
|
+
|
|
@Override
|
|
public boolean isUnsaved() {
|
|
- return super.isUnsaved() && !this.mustNotSave;
|
|
+ // Paper start - add dirty system to tick lists
|
|
+ long gameTime = this.level.getLevelData().getGameTime();
|
|
+ if (this.blockTicks.isDirty(gameTime) || this.fluidTicks.isDirty(gameTime)) {
|
|
+ return true;
|
|
+ }
|
|
+ // Paper end - add dirty system to tick lists
|
|
+ return super.isUnsaved(); // Paper - rewrite chunk system - do NOT clobber the dirty flag
|
|
}
|
|
// CraftBukkit end
|
|
|
|
@@ -804,7 +924,10 @@ public class LevelChunk extends ChunkAccess {
|
|
});
|
|
}
|
|
|
|
+ public boolean isPostProcessingDone; // Paper - replace chunk loader system
|
|
+
|
|
public void postProcessGeneration() {
|
|
+ try { // Paper - replace chunk loader system
|
|
ChunkPos chunkcoordintpair = this.getPos();
|
|
|
|
for (int i = 0; i < this.postProcessing.length; ++i) {
|
|
@@ -842,6 +965,11 @@ public class LevelChunk extends ChunkAccess {
|
|
|
|
this.pendingBlockEntities.clear();
|
|
this.upgradeData.upgrade(this);
|
|
+ } finally { // Paper start - replace chunk loader system
|
|
+ this.isPostProcessingDone = true;
|
|
+ this.level.getChunkSource().chunkMap.playerChunkManager.onChunkPostProcessing(this.chunkPos.x, this.chunkPos.z);
|
|
+ }
|
|
+ // Paper end - replace chunk loader system
|
|
}
|
|
|
|
@Nullable
|
|
@@ -891,7 +1019,7 @@ public class LevelChunk extends ChunkAccess {
|
|
}
|
|
|
|
public ChunkHolder.FullChunkStatus getFullStatus() {
|
|
- return this.fullStatus == null ? ChunkHolder.FullChunkStatus.BORDER : (ChunkHolder.FullChunkStatus) this.fullStatus.get();
|
|
+ return this.chunkHolder == null ? ChunkHolder.FullChunkStatus.INACCESSIBLE : this.chunkHolder.getChunkStatus(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void setFullStatus(Supplier<ChunkHolder.FullChunkStatus> levelTypeProvider) {
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
|
|
index a3c14a470ac7055abd00ac6f4f1c02065a485d3b..b8cf01fbfc3293bf78b1094a90da3594fa2067b4 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
|
|
@@ -94,7 +94,31 @@ public class ChunkSerializer {
|
|
|
|
public ChunkSerializer() {}
|
|
|
|
+ // Paper start
|
|
+ public static final class InProgressChunkHolder {
|
|
+
|
|
+ public final ProtoChunk protoChunk;
|
|
+ public final java.util.ArrayDeque<Runnable> tasks;
|
|
+
|
|
+ public CompoundTag poiData;
|
|
+
|
|
+ public InProgressChunkHolder(final ProtoChunk protoChunk, final java.util.ArrayDeque<Runnable> tasks) {
|
|
+ this.protoChunk = protoChunk;
|
|
+ this.tasks = tasks;
|
|
+ }
|
|
+ }
|
|
+ // Paper end
|
|
+
|
|
public static ProtoChunk read(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt) {
|
|
+ // Paper start - add variant for async calls
|
|
+ InProgressChunkHolder holder = loadChunk(world, poiStorage, chunkPos, nbt, true);
|
|
+ holder.tasks.forEach(Runnable::run);
|
|
+ return holder.protoChunk;
|
|
+ }
|
|
+
|
|
+ public static InProgressChunkHolder loadChunk(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt, boolean distinguish) {
|
|
+ java.util.ArrayDeque<Runnable> tasksToExecuteOnMain = new java.util.ArrayDeque<>();
|
|
+ // Paper end
|
|
ChunkPos chunkcoordintpair1 = new ChunkPos(nbt.getInt("xPos"), nbt.getInt("zPos"));
|
|
|
|
if (!Objects.equals(chunkPos, chunkcoordintpair1)) {
|
|
@@ -158,7 +182,9 @@ public class ChunkSerializer {
|
|
LevelChunkSection chunksection = new LevelChunkSection(b0, datapaletteblock, (PalettedContainer) object); // CraftBukkit - read/write
|
|
|
|
achunksection[k] = chunksection;
|
|
+ tasksToExecuteOnMain.add(() -> { // Paper - delay this task since we're executing off-main
|
|
poiStorage.checkConsistencyWithBlocks(chunkPos, chunksection);
|
|
+ }); // Paper - delay this task since we're executing off-main
|
|
}
|
|
|
|
boolean flag3 = nbttagcompound1.contains("BlockLight", 7);
|
|
@@ -319,7 +345,7 @@ public class ChunkSerializer {
|
|
}
|
|
|
|
if (chunkstatus_type == ChunkStatus.ChunkType.LEVELCHUNK) {
|
|
- return new ImposterProtoChunk((LevelChunk) object1, false);
|
|
+ return new InProgressChunkHolder(new ImposterProtoChunk((LevelChunk) object1, false), tasksToExecuteOnMain); // Paper - Async chunk loading
|
|
} else {
|
|
ProtoChunk protochunk1 = (ProtoChunk) object1;
|
|
|
|
@@ -362,9 +388,41 @@ public class ChunkSerializer {
|
|
protochunk1.setCarvingMask(worldgenstage_features, new CarvingMask(nbttagcompound4.getLongArray(s1), ((ChunkAccess) object1).getMinBuildHeight()));
|
|
}
|
|
|
|
- return protochunk1;
|
|
+ return new InProgressChunkHolder(protochunk1, tasksToExecuteOnMain); // Paper - Async chunk loading
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // Paper start - async chunk save for unload
|
|
+ public record AsyncSaveData(
|
|
+ Tag blockTickList, // non-null if we had to go to the server's tick list
|
|
+ Tag fluidTickList, // non-null if we had to go to the server's tick list
|
|
+ ListTag blockEntities,
|
|
+ long worldTime
|
|
+ ) {}
|
|
+
|
|
+ // must be called sync
|
|
+ public static AsyncSaveData getAsyncSaveData(ServerLevel world, ChunkAccess chunk) {
|
|
+ org.spigotmc.AsyncCatcher.catchOp("preparation of chunk data for async save");
|
|
+
|
|
+ final CompoundTag tickLists = new CompoundTag();
|
|
+ ChunkSerializer.saveTicks(world, tickLists, chunk.getTicksForSerialization());
|
|
+
|
|
+ ListTag blockEntitiesSerialized = new ListTag();
|
|
+ for (final BlockPos blockPos : chunk.getBlockEntitiesPos()) {
|
|
+ final CompoundTag blockEntityNbt = chunk.getBlockEntityNbtForSaving(blockPos);
|
|
+ if (blockEntityNbt != null) {
|
|
+ blockEntitiesSerialized.add(blockEntityNbt);
|
|
+ }
|
|
}
|
|
+
|
|
+ return new AsyncSaveData(
|
|
+ tickLists.get(BLOCK_TICKS_TAG),
|
|
+ tickLists.get(FLUID_TICKS_TAG),
|
|
+ blockEntitiesSerialized,
|
|
+ world.getGameTime()
|
|
+ );
|
|
}
|
|
+ // Paper end
|
|
|
|
private static void logErrors(ChunkPos chunkPos, int y, String message) {
|
|
ChunkSerializer.LOGGER.error("Recoverable errors when loading section [" + chunkPos.x + ", " + y + ", " + chunkPos.z + "]: " + message);
|
|
@@ -381,6 +439,11 @@ public class ChunkSerializer {
|
|
// CraftBukkit end
|
|
|
|
public static CompoundTag write(ServerLevel world, ChunkAccess chunk) {
|
|
+ // Paper start
|
|
+ return saveChunk(world, chunk, null);
|
|
+ }
|
|
+ public static CompoundTag saveChunk(ServerLevel world, ChunkAccess chunk, @org.checkerframework.checker.nullness.qual.Nullable AsyncSaveData asyncsavedata) {
|
|
+ // Paper end
|
|
// Paper start - rewrite light impl
|
|
final int minSection = io.papermc.paper.util.WorldUtil.getMinLightSection(world);
|
|
final int maxSection = io.papermc.paper.util.WorldUtil.getMaxLightSection(world);
|
|
@@ -394,7 +457,7 @@ public class ChunkSerializer {
|
|
nbttagcompound.putInt("xPos", chunkcoordintpair.x);
|
|
nbttagcompound.putInt("yPos", chunk.getMinSection());
|
|
nbttagcompound.putInt("zPos", chunkcoordintpair.z);
|
|
- nbttagcompound.putLong("LastUpdate", world.getGameTime());
|
|
+ nbttagcompound.putLong("LastUpdate", asyncsavedata != null ? asyncsavedata.worldTime : world.getGameTime()); // Paper - async chunk unloading
|
|
nbttagcompound.putLong("InhabitedTime", chunk.getInhabitedTime());
|
|
nbttagcompound.putString("Status", chunk.getStatus().getName());
|
|
BlendingData blendingdata = chunk.getBlendingData();
|
|
@@ -494,8 +557,17 @@ public class ChunkSerializer {
|
|
nbttagcompound.putBoolean("isLightOn", false); // Paper - set to false but still store, this allows us to detect --eraseCache (as eraseCache _removes_)
|
|
}
|
|
|
|
- ListTag nbttaglist1 = new ListTag();
|
|
- Iterator iterator = chunk.getBlockEntitiesPos().iterator();
|
|
+ // Paper start
|
|
+ ListTag nbttaglist1;
|
|
+ Iterator<BlockPos> iterator;
|
|
+ if (asyncsavedata != null) {
|
|
+ nbttaglist1 = asyncsavedata.blockEntities;
|
|
+ iterator = java.util.Collections.emptyIterator();
|
|
+ } else {
|
|
+ nbttaglist1 = new ListTag();
|
|
+ iterator = chunk.getBlockEntitiesPos().iterator();
|
|
+ }
|
|
+ // Paper end
|
|
|
|
CompoundTag nbttagcompound2;
|
|
|
|
@@ -532,7 +604,14 @@ public class ChunkSerializer {
|
|
nbttagcompound.put("CarvingMasks", nbttagcompound2);
|
|
}
|
|
|
|
+ // Paper start
|
|
+ if (asyncsavedata != null) {
|
|
+ nbttagcompound.put(BLOCK_TICKS_TAG, asyncsavedata.blockTickList);
|
|
+ nbttagcompound.put(FLUID_TICKS_TAG, asyncsavedata.fluidTickList);
|
|
+ } else {
|
|
ChunkSerializer.saveTicks(world, nbttagcompound, chunk.getTicksForSerialization());
|
|
+ }
|
|
+ // Paper end
|
|
nbttagcompound.put("PostProcessing", ChunkSerializer.packOffsets(chunk.getPostProcessing()));
|
|
CompoundTag nbttagcompound3 = new CompoundTag();
|
|
Iterator iterator1 = chunk.getHeightmaps().iterator();
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
|
|
index aa6d4996e1b9fce21ebf184eefb9d7a92d2a340b..c3305cb49741523724ff7b3c9254a0df2cf3d6c6 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
|
|
@@ -28,26 +28,33 @@ import net.minecraft.world.level.storage.DimensionDataStorage;
|
|
public class ChunkStorage implements AutoCloseable {
|
|
|
|
public static final int LAST_MONOLYTH_STRUCTURE_DATA_VERSION = 1493;
|
|
- private final IOWorker worker;
|
|
+ // Paper - nuke IO worker
|
|
protected final DataFixer fixerUpper;
|
|
@Nullable
|
|
private volatile LegacyStructureDataHandler legacyStructureHandler;
|
|
+ // Paper start - async chunk loading
|
|
+ private final Object persistentDataLock = new Object(); // Paper
|
|
+ public final RegionFileStorage regionFileCache;
|
|
+ // Paper end - async chunk loading
|
|
|
|
public ChunkStorage(Path directory, DataFixer dataFixer, boolean dsync) {
|
|
this.fixerUpper = dataFixer;
|
|
- this.worker = new IOWorker(directory, dsync, "chunk");
|
|
+ // Paper start - async chunk io
|
|
+ // remove IO worker
|
|
+ this.regionFileCache = new RegionFileStorage(directory, dsync); // Paper - nuke IOWorker
|
|
+ // Paper end - async chunk io
|
|
}
|
|
|
|
public boolean isOldChunkAround(ChunkPos chunkPos, int checkRadius) {
|
|
- return this.worker.isOldChunkAround(chunkPos, checkRadius);
|
|
+ return true; // Paper - (for now, old unoptimised behavior) TODO implement later? the chunk status that blender uses SHOULD already have this radius loaded, no need to go back for it...
|
|
}
|
|
|
|
// CraftBukkit start
|
|
private boolean check(ServerChunkCache cps, int x, int z) {
|
|
ChunkPos pos = new ChunkPos(x, z);
|
|
if (cps != null) {
|
|
- com.google.common.base.Preconditions.checkState(org.bukkit.Bukkit.isPrimaryThread(), "primary thread");
|
|
- if (cps.hasChunk(x, z)) {
|
|
+ //com.google.common.base.Preconditions.checkState(org.bukkit.Bukkit.isPrimaryThread(), "primary thread"); // Paper - this function is now MT-Safe
|
|
+ if (cps.getChunkAtIfCachedImmediately(x, z) != null) { // Paper - isLoaded is a ticket level check, not a chunk loaded check!
|
|
return true;
|
|
}
|
|
}
|
|
@@ -75,6 +82,7 @@ public class ChunkStorage implements AutoCloseable {
|
|
|
|
public CompoundTag upgradeChunkTag(ResourceKey<LevelStem> resourcekey, Supplier<DimensionDataStorage> supplier, CompoundTag nbttagcompound, Optional<ResourceKey<Codec<? extends ChunkGenerator>>> optional, ChunkPos pos, @Nullable LevelAccessor generatoraccess) {
|
|
// CraftBukkit end
|
|
+ nbttagcompound = nbttagcompound.copy(); // Paper - defensive copy, another thread might modify this
|
|
int i = ChunkStorage.getVersion(nbttagcompound);
|
|
|
|
// CraftBukkit start
|
|
@@ -92,9 +100,11 @@ public class ChunkStorage implements AutoCloseable {
|
|
if (i < 1493) {
|
|
ca.spottedleaf.dataconverter.minecraft.MCDataConverter.convertTag(ca.spottedleaf.dataconverter.minecraft.datatypes.MCTypeRegistry.CHUNK, nbttagcompound, i, 1493); // Paper - replace chunk converter
|
|
if (nbttagcompound.getCompound("Level").getBoolean("hasLegacyStructureData")) {
|
|
+ synchronized (this.persistentDataLock) { // Paper - Async chunk loading
|
|
LegacyStructureDataHandler persistentstructurelegacy = this.getLegacyStructureHandler(resourcekey, supplier);
|
|
|
|
nbttagcompound = persistentstructurelegacy.updateFromLegacy(nbttagcompound);
|
|
+ } // Paper - Async chunk loading
|
|
}
|
|
}
|
|
|
|
@@ -127,7 +137,7 @@ public class ChunkStorage implements AutoCloseable {
|
|
LegacyStructureDataHandler persistentstructurelegacy = this.legacyStructureHandler;
|
|
|
|
if (persistentstructurelegacy == null) {
|
|
- synchronized (this) {
|
|
+ synchronized (this.persistentDataLock) { // Paper - async chunk loading
|
|
persistentstructurelegacy = this.legacyStructureHandler;
|
|
if (persistentstructurelegacy == null) {
|
|
this.legacyStructureHandler = persistentstructurelegacy = LegacyStructureDataHandler.getLegacyStructureHandler(worldKey, (DimensionDataStorage) stateManagerGetter.get());
|
|
@@ -153,26 +163,49 @@ public class ChunkStorage implements AutoCloseable {
|
|
}
|
|
|
|
public CompletableFuture<Optional<CompoundTag>> read(ChunkPos chunkPos) {
|
|
- return this.worker.loadAsync(chunkPos);
|
|
+ // Paper start - async chunk io
|
|
+ try {
|
|
+ return CompletableFuture.completedFuture(Optional.ofNullable(this.readSync(chunkPos)));
|
|
+ } catch (Throwable thr) {
|
|
+ return CompletableFuture.failedFuture(thr);
|
|
+ }
|
|
+ }
|
|
+ @Nullable
|
|
+ public CompoundTag readSync(ChunkPos chunkPos) throws IOException {
|
|
+ return this.regionFileCache.read(chunkPos);
|
|
}
|
|
+ // Paper end - async chunk io
|
|
|
|
- public void write(ChunkPos chunkPos, CompoundTag nbt) {
|
|
- this.worker.store(chunkPos, nbt);
|
|
+ // Paper start - async chunk io
|
|
+ public void write(ChunkPos chunkPos, CompoundTag nbt) throws IOException {
|
|
+ this.regionFileCache.write(chunkPos, nbt);
|
|
+ // Paper end - Async chunk loading
|
|
if (this.legacyStructureHandler != null) {
|
|
+ synchronized (this.persistentDataLock) { // Paper - Async chunk loading
|
|
this.legacyStructureHandler.removeIndex(chunkPos.toLong());
|
|
+ } // Paper - Async chunk loading
|
|
}
|
|
|
|
}
|
|
|
|
public void flushWorker() {
|
|
- this.worker.synchronize(true).join();
|
|
+ io.papermc.paper.chunk.system.io.RegionFileIOThread.flush(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
public void close() throws IOException {
|
|
- this.worker.close();
|
|
+ this.regionFileCache.close(); // Paper - nuke IO worker
|
|
}
|
|
|
|
public ChunkScanAccess chunkScanner() {
|
|
- return this.worker;
|
|
+ // Paper start - nuke IO worker
|
|
+ return ((chunkPos, streamTagVisitor) -> {
|
|
+ try {
|
|
+ this.regionFileCache.scanChunk(chunkPos, streamTagVisitor);
|
|
+ return java.util.concurrent.CompletableFuture.completedFuture(null);
|
|
+ } catch (IOException e) {
|
|
+ throw new RuntimeException(e);
|
|
+ }
|
|
+ });
|
|
+ // Paper end
|
|
}
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java
|
|
index 0ede151943109e81f66875340261d77f67f63c95..0b92db95416b878f41b83b5c74d1c0a1031ff6af 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java
|
|
@@ -31,43 +31,31 @@ public class EntityStorage implements EntityPersistentStorage<Entity> {
|
|
private static final String ENTITIES_TAG = "Entities";
|
|
private static final String POSITION_TAG = "Position";
|
|
public final ServerLevel level;
|
|
- private final IOWorker worker;
|
|
+ // Paper - rewrite chunk system
|
|
private final LongSet emptyChunks = new LongOpenHashSet();
|
|
- public final ProcessorMailbox<Runnable> entityDeserializerQueue;
|
|
+ // Paper - rewrite chunk system
|
|
protected final DataFixer fixerUpper;
|
|
|
|
public EntityStorage(ServerLevel world, Path path, DataFixer dataFixer, boolean dsync, Executor executor) {
|
|
this.level = world;
|
|
this.fixerUpper = dataFixer;
|
|
- this.entityDeserializerQueue = ProcessorMailbox.create(executor, "entity-deserializer");
|
|
- this.worker = new IOWorker(path, dsync, "entities");
|
|
+ // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
public CompletableFuture<ChunkEntities<Entity>> loadEntities(ChunkPos pos) {
|
|
- return this.emptyChunks.contains(pos.toLong()) ? CompletableFuture.completedFuture(emptyChunk(pos)) : this.worker.loadAsync(pos).thenApplyAsync((nbt) -> {
|
|
- if (nbt.isEmpty()) {
|
|
- this.emptyChunks.add(pos.toLong());
|
|
- return emptyChunk(pos);
|
|
- } else {
|
|
- try {
|
|
- ChunkPos chunkPos2 = readChunkPos(nbt.get());
|
|
- if (!Objects.equals(pos, chunkPos2)) {
|
|
- LOGGER.error("Chunk file at {} is in the wrong location. (Expected {}, got {})", pos, pos, chunkPos2);
|
|
- }
|
|
- } catch (Exception var6) {
|
|
- LOGGER.warn("Failed to parse chunk {} position info", pos, var6);
|
|
- }
|
|
-
|
|
- CompoundTag compoundTag = this.upgradeChunkTag(nbt.get());
|
|
- ListTag listTag = compoundTag.getList("Entities", 10);
|
|
- List<Entity> list = EntityType.loadEntitiesRecursive(listTag, this.level).collect(ImmutableList.toImmutableList());
|
|
- return new ChunkEntities<>(pos, list);
|
|
- }
|
|
- }, this.entityDeserializerQueue::tell);
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system - copy out read logic into readEntities
|
|
+ }
|
|
+
|
|
+ // Paper start - rewrite chunk system
|
|
+ public static List<Entity> readEntities(ServerLevel level, CompoundTag compoundTag) {
|
|
+ ListTag listTag = compoundTag.getList("Entities", 10);
|
|
+ List<Entity> list = EntityType.loadEntitiesRecursive(listTag, level).collect(ImmutableList.toImmutableList());
|
|
+ return list;
|
|
}
|
|
+ // Paper end - rewrite chunk system
|
|
|
|
- private static ChunkPos readChunkPos(CompoundTag chunkNbt) {
|
|
+ public static ChunkPos readChunkPos(CompoundTag chunkNbt) { // Paper - public
|
|
int[] is = chunkNbt.getIntArray("Position");
|
|
return new ChunkPos(is[0], is[1]);
|
|
}
|
|
@@ -82,40 +70,68 @@ public class EntityStorage implements EntityPersistentStorage<Entity> {
|
|
|
|
@Override
|
|
public void storeEntities(ChunkEntities<Entity> dataList) {
|
|
+ // Paper start - rewrite chunk system
|
|
+ if (true) {
|
|
+ throw new UnsupportedOperationException();
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
ChunkPos chunkPos = dataList.getPos();
|
|
if (dataList.isEmpty()) {
|
|
if (this.emptyChunks.add(chunkPos.toLong())) {
|
|
- this.worker.store(chunkPos, (CompoundTag)null);
|
|
+ // Paper - rewrite chunk system
|
|
}
|
|
|
|
} else {
|
|
- ListTag listTag = new ListTag();
|
|
- dataList.getEntities().forEach((entity) -> {
|
|
- CompoundTag compoundTag = new CompoundTag();
|
|
- if (entity.save(compoundTag)) {
|
|
- listTag.add(compoundTag);
|
|
- }
|
|
-
|
|
- });
|
|
- CompoundTag compoundTag = new CompoundTag();
|
|
- compoundTag.putInt("DataVersion", SharedConstants.getCurrentVersion().getWorldVersion());
|
|
- compoundTag.put("Entities", listTag);
|
|
- writeChunkPos(compoundTag, chunkPos);
|
|
- this.worker.store(chunkPos, compoundTag).exceptionally((ex) -> {
|
|
- LOGGER.error("Failed to store chunk {}", chunkPos, ex);
|
|
- return null;
|
|
- });
|
|
+ // Paper - move into saveEntityChunk0
|
|
this.emptyChunks.remove(chunkPos.toLong());
|
|
}
|
|
}
|
|
|
|
+ // Paper start - rewrite chunk system
|
|
+ public static void copyEntities(final CompoundTag from, final CompoundTag into) {
|
|
+ if (from == null) {
|
|
+ return;
|
|
+ }
|
|
+ final ListTag entitiesFrom = from.getList("Entities", net.minecraft.nbt.Tag.TAG_COMPOUND);
|
|
+ if (entitiesFrom == null || entitiesFrom.isEmpty()) {
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ final ListTag entitiesInto = into.getList("Entities", net.minecraft.nbt.Tag.TAG_COMPOUND);
|
|
+ into.put("Entities", entitiesInto); // this is in case into doesn't have any entities
|
|
+ entitiesInto.addAll(0, entitiesFrom.copy()); // need to copy, this is coming from the save thread
|
|
+ }
|
|
+
|
|
+ public static CompoundTag saveEntityChunk(List<Entity> entities, ChunkPos chunkPos, ServerLevel level) {
|
|
+ return saveEntityChunk0(entities, chunkPos, level, false);
|
|
+ }
|
|
+ private static CompoundTag saveEntityChunk0(List<Entity> entities, ChunkPos chunkPos, ServerLevel level, boolean force) {
|
|
+ if (!force && entities.isEmpty()) {
|
|
+ return null;
|
|
+ }
|
|
+ ListTag listTag = new ListTag();
|
|
+ entities.forEach((entity) -> { // diff here: use entities parameter
|
|
+ CompoundTag compoundTag = new CompoundTag();
|
|
+ if (entity.save(compoundTag)) {
|
|
+ listTag.add(compoundTag);
|
|
+ }
|
|
+
|
|
+ });
|
|
+ CompoundTag compoundTag = new CompoundTag();
|
|
+ compoundTag.putInt("DataVersion", SharedConstants.getCurrentVersion().getWorldVersion());
|
|
+ compoundTag.put("Entities", listTag);
|
|
+ writeChunkPos(compoundTag, chunkPos);
|
|
+
|
|
+ return !force && listTag.isEmpty() ? null : compoundTag;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+
|
|
@Override
|
|
public void flush(boolean sync) {
|
|
- this.worker.synchronize(sync).join();
|
|
- this.entityDeserializerQueue.runAll();
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
- private CompoundTag upgradeChunkTag(CompoundTag chunkNbt) {
|
|
+ public static CompoundTag upgradeChunkTag(CompoundTag chunkNbt) { // Paper - public and static
|
|
int i = getVersion(chunkNbt);
|
|
return ca.spottedleaf.dataconverter.minecraft.MCDataConverter.convertTag(ca.spottedleaf.dataconverter.minecraft.datatypes.MCTypeRegistry.ENTITY_CHUNK, chunkNbt, i, SharedConstants.getCurrentVersion().getWorldVersion()); // Paper - route to new converter system
|
|
}
|
|
@@ -126,6 +142,6 @@ public class EntityStorage implements EntityPersistentStorage<Entity> {
|
|
|
|
@Override
|
|
public void close() throws IOException {
|
|
- this.worker.close();
|
|
+ throw new UnsupportedOperationException(); // Paper - rewrite chunk system
|
|
}
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
|
index d9daf07132c46548964a75588b69d7a74680e917..e68205fe7169c7c5b7c6fdada2ee97d86107ca97 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
|
@@ -44,6 +44,7 @@ public class RegionFile implements AutoCloseable {
|
|
private final IntBuffer timestamps;
|
|
@VisibleForTesting
|
|
protected final RegionBitmap usedSectors;
|
|
+ public final java.util.concurrent.locks.ReentrantLock fileLock = new java.util.concurrent.locks.ReentrantLock(true); // Paper
|
|
|
|
public RegionFile(Path file, Path directory, boolean dsync) throws IOException {
|
|
this(file, directory, RegionFileVersion.VERSION_DEFLATE, dsync);
|
|
@@ -228,7 +229,7 @@ public class RegionFile implements AutoCloseable {
|
|
return (byteCount + 4096 - 1) / 4096;
|
|
}
|
|
|
|
- public boolean doesChunkExist(ChunkPos pos) {
|
|
+ public synchronized boolean doesChunkExist(ChunkPos pos) { // Paper - synchronized
|
|
int i = this.getOffset(pos);
|
|
|
|
if (i == 0) {
|
|
@@ -393,6 +394,11 @@ public class RegionFile implements AutoCloseable {
|
|
}
|
|
|
|
public void close() throws IOException {
|
|
+ // Paper start - Prevent regionfiles from being closed during use
|
|
+ this.fileLock.lock();
|
|
+ synchronized (this) {
|
|
+ try {
|
|
+ // Paper end
|
|
try {
|
|
this.padToFullSector();
|
|
} finally {
|
|
@@ -402,6 +408,10 @@ public class RegionFile implements AutoCloseable {
|
|
this.file.close();
|
|
}
|
|
}
|
|
+ } finally { // Paper start - Prevent regionfiles from being closed during use
|
|
+ this.fileLock.unlock();
|
|
+ }
|
|
+ } // Paper end
|
|
|
|
}
|
|
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
|
index b9b50c56e79297bb824a92355f437a5d4d7e6760..18ef7025f7f4dc2a4aff85ca65ff5a2d35a1ef06 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
|
@@ -24,16 +24,37 @@ public class RegionFileStorage implements AutoCloseable {
|
|
private final Path folder;
|
|
private final boolean sync;
|
|
|
|
- RegionFileStorage(Path directory, boolean dsync) {
|
|
+ protected RegionFileStorage(Path directory, boolean dsync) { // Paper - protected constructor
|
|
this.folder = directory;
|
|
this.sync = dsync;
|
|
}
|
|
|
|
- private RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly) throws IOException { // CraftBukkit
|
|
+ // Paper start
|
|
+ public synchronized RegionFile getRegionFileIfLoaded(ChunkPos chunkcoordintpair) {
|
|
+ return this.regionCache.getAndMoveToFirst(ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()));
|
|
+ }
|
|
+
|
|
+ public synchronized boolean chunkExists(ChunkPos pos) throws IOException {
|
|
+ RegionFile regionfile = getRegionFile(pos, true);
|
|
+
|
|
+ return regionfile != null ? regionfile.hasChunk(pos) : false;
|
|
+ }
|
|
+
|
|
+ public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly) throws IOException { // CraftBukkit
|
|
+ return this.getRegionFile(chunkcoordintpair, existingOnly, false);
|
|
+ }
|
|
+ public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly, boolean lock) throws IOException {
|
|
+ // Paper end
|
|
long i = ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ());
|
|
RegionFile regionfile = (RegionFile) this.regionCache.getAndMoveToFirst(i);
|
|
|
|
if (regionfile != null) {
|
|
+ // Paper start
|
|
+ if (lock) {
|
|
+ // must be in this synchronized block
|
|
+ regionfile.fileLock.lock();
|
|
+ }
|
|
+ // Paper end
|
|
return regionfile;
|
|
} else {
|
|
if (this.regionCache.size() >= 256) {
|
|
@@ -48,6 +69,12 @@ public class RegionFileStorage implements AutoCloseable {
|
|
RegionFile regionfile1 = new RegionFile(path1, this.folder, this.sync);
|
|
|
|
this.regionCache.putAndMoveToFirst(i, regionfile1);
|
|
+ // Paper start
|
|
+ if (lock) {
|
|
+ // must be in this synchronized block
|
|
+ regionfile1.fileLock.lock();
|
|
+ }
|
|
+ // Paper end
|
|
return regionfile1;
|
|
}
|
|
}
|
|
@@ -55,11 +82,12 @@ public class RegionFileStorage implements AutoCloseable {
|
|
@Nullable
|
|
public CompoundTag read(ChunkPos pos) throws IOException {
|
|
// CraftBukkit start - SPIGOT-5680: There's no good reason to preemptively create files on read, save that for writing
|
|
- RegionFile regionfile = this.getRegionFile(pos, true);
|
|
+ RegionFile regionfile = this.getRegionFile(pos, true, true); // Paper
|
|
if (regionfile == null) {
|
|
return null;
|
|
}
|
|
// CraftBukkit end
|
|
+ try { // Paper
|
|
DataInputStream datainputstream = regionfile.getChunkDataInputStream(pos);
|
|
|
|
CompoundTag nbttagcompound;
|
|
@@ -96,6 +124,9 @@ public class RegionFileStorage implements AutoCloseable {
|
|
}
|
|
|
|
return nbttagcompound;
|
|
+ } finally { // Paper start
|
|
+ regionfile.fileLock.unlock();
|
|
+ } // Paper end
|
|
}
|
|
|
|
public void scanChunk(ChunkPos chunkPos, StreamTagVisitor scanner) throws IOException {
|
|
@@ -130,7 +161,12 @@ public class RegionFileStorage implements AutoCloseable {
|
|
}
|
|
|
|
protected void write(ChunkPos pos, @Nullable CompoundTag nbt) throws IOException {
|
|
- RegionFile regionfile = this.getRegionFile(pos, false); // CraftBukkit
|
|
+ RegionFile regionfile = this.getRegionFile(pos, nbt == null, true); // CraftBukkit // Paper // Paper start - rewrite chunk system
|
|
+ if (nbt == null && regionfile == null) {
|
|
+ return;
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
+ try { // Paper
|
|
|
|
if (nbt == null) {
|
|
regionfile.clear(pos);
|
|
@@ -156,9 +192,12 @@ public class RegionFileStorage implements AutoCloseable {
|
|
}
|
|
}
|
|
|
|
+ } finally { // Paper start
|
|
+ regionfile.fileLock.unlock();
|
|
+ } // Paper end
|
|
}
|
|
|
|
- public void close() throws IOException {
|
|
+ public synchronized void close() throws IOException { // Paper -> synchronized
|
|
ExceptionCollector<IOException> exceptionsuppressor = new ExceptionCollector<>();
|
|
ObjectIterator objectiterator = this.regionCache.values().iterator();
|
|
|
|
@@ -175,7 +214,7 @@ public class RegionFileStorage implements AutoCloseable {
|
|
exceptionsuppressor.throwIfPresent();
|
|
}
|
|
|
|
- public void flush() throws IOException {
|
|
+ public synchronized void flush() throws IOException { // Paper - synchronize
|
|
ObjectIterator objectiterator = this.regionCache.values().iterator();
|
|
|
|
while (objectiterator.hasNext()) {
|
|
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java
|
|
index 5631345dee84001be1053fbf92def683be7a97f5..c396076bfb1d41cc0f8248d6f3aa4fc3f7d1c998 100644
|
|
--- a/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java
|
|
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java
|
|
@@ -34,27 +34,28 @@ import net.minecraft.world.level.ChunkPos;
|
|
import net.minecraft.world.level.LevelHeightAccessor;
|
|
import org.slf4j.Logger;
|
|
|
|
-public class SectionStorage<R> implements AutoCloseable {
|
|
+public class SectionStorage<R> extends RegionFileStorage implements AutoCloseable { // Paper - nuke IOWorker
|
|
private static final Logger LOGGER = LogUtils.getLogger();
|
|
private static final String SECTIONS_TAG = "Sections";
|
|
- private final IOWorker worker;
|
|
+ // Paper - remove mojang I/O thread
|
|
private final Long2ObjectMap<Optional<R>> storage = new Long2ObjectOpenHashMap<>();
|
|
private final LongLinkedOpenHashSet dirty = new LongLinkedOpenHashSet();
|
|
private final Function<Runnable, Codec<R>> codec;
|
|
private final Function<Runnable, R> factory;
|
|
private final DataFixer fixerUpper;
|
|
private final DataFixTypes type;
|
|
- private final RegistryAccess registryAccess;
|
|
+ public final RegistryAccess registryAccess; // Paper - rewrite chunk system
|
|
protected final LevelHeightAccessor levelHeightAccessor;
|
|
|
|
public SectionStorage(Path path, Function<Runnable, Codec<R>> codecFactory, Function<Runnable, R> factory, DataFixer dataFixer, DataFixTypes dataFixTypes, boolean dsync, RegistryAccess dynamicRegistryManager, LevelHeightAccessor world) {
|
|
+ super(path, dsync); // Paper - remove mojang I/O thread
|
|
this.codec = codecFactory;
|
|
this.factory = factory;
|
|
this.fixerUpper = dataFixer;
|
|
this.type = dataFixTypes;
|
|
this.registryAccess = dynamicRegistryManager;
|
|
this.levelHeightAccessor = world;
|
|
- this.worker = new IOWorker(path, dsync, path.getFileName().toString());
|
|
+ // Paper - remove mojang I/O thread
|
|
}
|
|
|
|
protected void tick(BooleanSupplier shouldKeepTicking) {
|
|
@@ -116,23 +117,21 @@ public class SectionStorage<R> implements AutoCloseable {
|
|
}
|
|
|
|
private void readColumn(ChunkPos pos) {
|
|
- Optional<CompoundTag> optional = this.tryRead(pos).join();
|
|
- RegistryOps<Tag> registryOps = RegistryOps.create(NbtOps.INSTANCE, this.registryAccess);
|
|
- this.readColumn(pos, registryOps, optional.orElse((CompoundTag)null));
|
|
+ throw new IllegalStateException("Only chunk system can load in state, offending class:" + this.getClass().getName()); // Paper - rewrite chunk system
|
|
}
|
|
|
|
private CompletableFuture<Optional<CompoundTag>> tryRead(ChunkPos pos) {
|
|
- return this.worker.loadAsync(pos).exceptionally((throwable) -> {
|
|
- if (throwable instanceof IOException iOException) {
|
|
- LOGGER.error("Error reading chunk {} data from disk", pos, iOException);
|
|
- return Optional.empty();
|
|
- } else {
|
|
- throw new CompletionException(throwable);
|
|
- }
|
|
- });
|
|
+ // Paper start - rewrite chunk system
|
|
+ try {
|
|
+ return CompletableFuture.completedFuture(Optional.ofNullable(this.read(pos)));
|
|
+ } catch (Throwable thr) {
|
|
+ return CompletableFuture.failedFuture(thr);
|
|
+ }
|
|
+ // Paper end - rewrite chunk system
|
|
}
|
|
|
|
private <T> void readColumn(ChunkPos pos, DynamicOps<T> ops, @Nullable T data) {
|
|
+ if (true) throw new IllegalStateException("Only chunk system can load in state, offending class:" + this.getClass().getName()); // Paper - rewrite chunk system
|
|
if (data == null) {
|
|
for(int i = this.levelHeightAccessor.getMinSection(); i < this.levelHeightAccessor.getMaxSection(); ++i) {
|
|
this.storage.put(getKey(pos, i), Optional.empty());
|
|
@@ -177,7 +176,7 @@ public class SectionStorage<R> implements AutoCloseable {
|
|
Dynamic<Tag> dynamic = this.writeColumn(pos, registryOps);
|
|
Tag tag = dynamic.getValue();
|
|
if (tag instanceof CompoundTag) {
|
|
- this.worker.store(pos, (CompoundTag)tag);
|
|
+ try { this.write(pos, (CompoundTag)tag); } catch (IOException ioexception) { SectionStorage.LOGGER.error("Error writing data to disk", ioexception); } // Paper - nuke IOWorker
|
|
} else {
|
|
LOGGER.error("Expected compound tag, got {}", (Object)tag);
|
|
}
|
|
@@ -222,7 +221,7 @@ public class SectionStorage<R> implements AutoCloseable {
|
|
}
|
|
|
|
private static int getVersion(Dynamic<?> dynamic) {
|
|
- return dynamic.get("DataVersion").asInt(1945);
|
|
+ return dynamic.get("DataVersion").asInt(1945); // Paper - diff on change, constant used in ChunkLoadTask
|
|
}
|
|
|
|
public void flush(ChunkPos pos) {
|
|
@@ -240,6 +239,9 @@ public class SectionStorage<R> implements AutoCloseable {
|
|
|
|
@Override
|
|
public void close() throws IOException {
|
|
- this.worker.close();
|
|
+ //this.worker.close(); // Paper - nuke I/O worker - don't call the worker
|
|
+ super.close(); // Paper - nuke I/O worker - call super.close method which is responsible for closing used files.
|
|
}
|
|
+
|
|
+ // Paper - rewrite chunk system
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/world/level/entity/EntityTickList.java b/src/main/java/net/minecraft/world/level/entity/EntityTickList.java
|
|
index 2830d32bba3dc85847e3a5d9b4d98f822e34b606..4cdfc433df67afcd455422e9baf56f167dd712ae 100644
|
|
--- a/src/main/java/net/minecraft/world/level/entity/EntityTickList.java
|
|
+++ b/src/main/java/net/minecraft/world/level/entity/EntityTickList.java
|
|
@@ -8,54 +8,42 @@ import javax.annotation.Nullable;
|
|
import net.minecraft.world.entity.Entity;
|
|
|
|
public class EntityTickList {
|
|
- private Int2ObjectMap<Entity> active = new Int2ObjectLinkedOpenHashMap<>();
|
|
- private Int2ObjectMap<Entity> passive = new Int2ObjectLinkedOpenHashMap<>();
|
|
- @Nullable
|
|
- private Int2ObjectMap<Entity> iterated;
|
|
+ private final io.papermc.paper.util.maplist.IteratorSafeOrderedReferenceSet<Entity> entities = new io.papermc.paper.util.maplist.IteratorSafeOrderedReferenceSet<>(true); // Paper - rewrite this, always keep this updated - why would we EVER tick an entity that's not ticking?
|
|
|
|
private void ensureActiveIsNotIterated() {
|
|
- if (this.iterated == this.active) {
|
|
- this.passive.clear();
|
|
-
|
|
- for(Int2ObjectMap.Entry<Entity> entry : Int2ObjectMaps.fastIterable(this.active)) {
|
|
- this.passive.put(entry.getIntKey(), entry.getValue());
|
|
- }
|
|
-
|
|
- Int2ObjectMap<Entity> int2ObjectMap = this.active;
|
|
- this.active = this.passive;
|
|
- this.passive = int2ObjectMap;
|
|
- }
|
|
+ // Paper - replace with better logic, do not delay removals
|
|
|
|
}
|
|
|
|
public void add(Entity entity) {
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread("Asynchronous entity ticklist addition"); // Paper
|
|
this.ensureActiveIsNotIterated();
|
|
- this.active.put(entity.getId(), entity);
|
|
+ this.entities.add(entity); // Paper - replace with better logic, do not delay removals/additions
|
|
}
|
|
|
|
public void remove(Entity entity) {
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread("Asynchronous entity ticklist removal"); // Paper
|
|
this.ensureActiveIsNotIterated();
|
|
- this.active.remove(entity.getId());
|
|
+ this.entities.remove(entity); // Paper - replace with better logic, do not delay removals/additions
|
|
}
|
|
|
|
public boolean contains(Entity entity) {
|
|
- return this.active.containsKey(entity.getId());
|
|
+ return this.entities.contains(entity); // Paper - replace with better logic, do not delay removals/additions
|
|
}
|
|
|
|
public void forEach(Consumer<Entity> action) {
|
|
- if (this.iterated != null) {
|
|
- throw new UnsupportedOperationException("Only one concurrent iteration supported");
|
|
- } else {
|
|
- this.iterated = this.active;
|
|
-
|
|
- try {
|
|
- for(Entity entity : this.active.values()) {
|
|
- action.accept(entity);
|
|
- }
|
|
- } finally {
|
|
- this.iterated = null;
|
|
+ io.papermc.paper.util.TickThread.ensureTickThread("Asynchronous entity ticklist iteration"); // Paper
|
|
+ // Paper start - replace with better logic, do not delay removals/additions
|
|
+ // To ensure nothing weird happens with dimension travelling, do not iterate over new entries...
|
|
+ // (by dfl iterator() is configured to not iterate over new entries)
|
|
+ io.papermc.paper.util.maplist.IteratorSafeOrderedReferenceSet.Iterator<Entity> iterator = this.entities.iterator();
|
|
+ try {
|
|
+ while (iterator.hasNext()) {
|
|
+ action.accept(iterator.next());
|
|
}
|
|
-
|
|
+ } finally {
|
|
+ iterator.finishedIterating();
|
|
}
|
|
+ // Paper end - replace with better logic, do not delay removals/additions
|
|
}
|
|
}
|
|
diff --git a/src/main/java/net/minecraft/world/level/levelgen/NoiseBasedChunkGenerator.java b/src/main/java/net/minecraft/world/level/levelgen/NoiseBasedChunkGenerator.java
|
|
index e6c22cf9a682a88b271a78d8ca0b312c91a60177..cdc87f14e5d6336856c74d5089e50ae5ebad9923 100644
|
|
--- a/src/main/java/net/minecraft/world/level/levelgen/NoiseBasedChunkGenerator.java
|
|
+++ b/src/main/java/net/minecraft/world/level/levelgen/NoiseBasedChunkGenerator.java
|
|
@@ -87,7 +87,7 @@ public final class NoiseBasedChunkGenerator extends ChunkGenerator {
|
|
return CompletableFuture.supplyAsync(Util.wrapThreadWithTaskName("init_biomes", () -> {
|
|
this.doCreateBiomes(blender, noiseConfig, structureAccessor, chunk);
|
|
return chunk;
|
|
- }), Util.backgroundExecutor());
|
|
+ }), executor); // Paper - run with supplied executor
|
|
}
|
|
|
|
private void doCreateBiomes(Blender blender, RandomState noiseConfig, StructureManager structureAccessor, ChunkAccess chunk) {
|
|
@@ -286,7 +286,7 @@ public final class NoiseBasedChunkGenerator extends ChunkGenerator {
|
|
|
|
return CompletableFuture.supplyAsync(Util.wrapThreadWithTaskName("wgen_fill_noise", () -> {
|
|
return this.doFill(blender, structureAccessor, noiseConfig, chunk, j, k);
|
|
- }), Util.backgroundExecutor()).whenCompleteAsync((ichunkaccess1, throwable) -> {
|
|
+ }), executor).whenCompleteAsync((ichunkaccess1, throwable) -> { // Paper - run with supplied executor
|
|
Iterator iterator = set.iterator();
|
|
|
|
while (iterator.hasNext()) {
|
|
diff --git a/src/main/java/net/minecraft/world/ticks/LevelChunkTicks.java b/src/main/java/net/minecraft/world/ticks/LevelChunkTicks.java
|
|
index 9f6c2e5b5d9e8d714a47c770e255d06c0ef7c190..ac807277a6b26d140ea9873d17c7aa4fb5fe37b2 100644
|
|
--- a/src/main/java/net/minecraft/world/ticks/LevelChunkTicks.java
|
|
+++ b/src/main/java/net/minecraft/world/ticks/LevelChunkTicks.java
|
|
@@ -25,6 +25,19 @@ public class LevelChunkTicks<T> implements SerializableTickContainer<T>, TickCon
|
|
@Nullable
|
|
private BiConsumer<LevelChunkTicks<T>, ScheduledTick<T>> onTickAdded;
|
|
|
|
+ // Paper start - add dirty flag
|
|
+ private boolean dirty;
|
|
+ private long lastSaved = Long.MIN_VALUE;
|
|
+
|
|
+ public boolean isDirty(final long tick) {
|
|
+ return this.dirty || (!this.tickQueue.isEmpty() && tick != this.lastSaved);
|
|
+ }
|
|
+
|
|
+ public void clearDirty() {
|
|
+ this.dirty = false;
|
|
+ }
|
|
+ // Paper end - add dirty flag
|
|
+
|
|
public LevelChunkTicks() {
|
|
}
|
|
|
|
@@ -50,6 +63,7 @@ public class LevelChunkTicks<T> implements SerializableTickContainer<T>, TickCon
|
|
public ScheduledTick<T> poll() {
|
|
ScheduledTick<T> scheduledTick = this.tickQueue.poll();
|
|
if (scheduledTick != null) {
|
|
+ this.dirty = true; // Paper - add dirty flag
|
|
this.ticksPerPosition.remove(scheduledTick);
|
|
}
|
|
|
|
@@ -59,6 +73,7 @@ public class LevelChunkTicks<T> implements SerializableTickContainer<T>, TickCon
|
|
@Override
|
|
public void schedule(ScheduledTick<T> orderedTick) {
|
|
if (this.ticksPerPosition.add(orderedTick)) {
|
|
+ this.dirty = true; // Paper - add dirty flag
|
|
this.scheduleUnchecked(orderedTick);
|
|
}
|
|
|
|
@@ -83,7 +98,7 @@ public class LevelChunkTicks<T> implements SerializableTickContainer<T>, TickCon
|
|
while(iterator.hasNext()) {
|
|
ScheduledTick<T> scheduledTick = iterator.next();
|
|
if (predicate.test(scheduledTick)) {
|
|
- iterator.remove();
|
|
+ iterator.remove(); this.dirty = true; // Paper - add dirty flag
|
|
this.ticksPerPosition.remove(scheduledTick);
|
|
}
|
|
}
|
|
@@ -101,6 +116,7 @@ public class LevelChunkTicks<T> implements SerializableTickContainer<T>, TickCon
|
|
|
|
@Override
|
|
public ListTag save(long l, Function<T, String> function) {
|
|
+ this.lastSaved = l; // Paper - add dirty system to level ticks
|
|
ListTag listTag = new ListTag();
|
|
if (this.pendingTicks != null) {
|
|
for(SavedTick<T> savedTick : this.pendingTicks) {
|
|
@@ -117,6 +133,11 @@ public class LevelChunkTicks<T> implements SerializableTickContainer<T>, TickCon
|
|
|
|
public void unpack(long time) {
|
|
if (this.pendingTicks != null) {
|
|
+ // Paper start - add dirty system to level chunk ticks
|
|
+ if (this.tickQueue.isEmpty()) {
|
|
+ this.lastSaved = time;
|
|
+ }
|
|
+ // Paper end - add dirty system to level chunk ticks
|
|
int i = -this.pendingTicks.size();
|
|
|
|
for(SavedTick<T> savedTick : this.pendingTicks) {
|
|
diff --git a/src/main/java/org/bukkit/craftbukkit/CraftChunk.java b/src/main/java/org/bukkit/craftbukkit/CraftChunk.java
|
|
index 738d3ce38a42ff8cd53eec042ef8bc74f2b8d059..a895c81ea6af0822c8371ae93cfe4b72d419439d 100644
|
|
--- a/src/main/java/org/bukkit/craftbukkit/CraftChunk.java
|
|
+++ b/src/main/java/org/bukkit/craftbukkit/CraftChunk.java
|
|
@@ -120,7 +120,7 @@ public class CraftChunk implements Chunk {
|
|
|
|
@Override
|
|
public boolean isEntitiesLoaded() {
|
|
- return this.getCraftWorld().getHandle().entityManager.areEntitiesLoaded(ChunkPos.asLong(x, z));
|
|
+ return this.getCraftWorld().getHandle().areEntitiesLoaded(io.papermc.paper.util.CoordinateUtils.getChunkKey(this.x, this.z)); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
@@ -129,51 +129,7 @@ public class CraftChunk implements Chunk {
|
|
this.getWorld().getChunkAt(x, z); // Transient load for this tick
|
|
}
|
|
|
|
- PersistentEntitySectionManager<net.minecraft.world.entity.Entity> entityManager = this.getCraftWorld().getHandle().entityManager;
|
|
- long pair = ChunkPos.asLong(x, z);
|
|
-
|
|
- if (entityManager.areEntitiesLoaded(pair)) {
|
|
- return entityManager.getEntities(new ChunkPos(this.x, this.z)).stream()
|
|
- .map(net.minecraft.world.entity.Entity::getBukkitEntity)
|
|
- .filter(Objects::nonNull).toArray(Entity[]::new);
|
|
- }
|
|
-
|
|
- entityManager.ensureChunkQueuedForLoad(pair); // Start entity loading
|
|
-
|
|
- // SPIGOT-6772: Use entity mailbox and re-schedule entities if they get unloaded
|
|
- ProcessorMailbox<Runnable> mailbox = ((EntityStorage) entityManager.permanentStorage).entityDeserializerQueue;
|
|
- BooleanSupplier supplier = () -> {
|
|
- // only execute inbox if our entities are not present
|
|
- if (entityManager.areEntitiesLoaded(pair)) {
|
|
- return true;
|
|
- }
|
|
-
|
|
- if (!entityManager.isPending(pair)) {
|
|
- // Our entities got unloaded, this should normally not happen.
|
|
- entityManager.ensureChunkQueuedForLoad(pair); // Re-start entity loading
|
|
- }
|
|
-
|
|
- // tick loading inbox, which loads the created entities to the world
|
|
- // (if present)
|
|
- entityManager.tick();
|
|
- // check if our entities are loaded
|
|
- return entityManager.areEntitiesLoaded(pair);
|
|
- };
|
|
-
|
|
- // now we wait until the entities are loaded,
|
|
- // the converting from NBT to entity object is done on the main Thread which is why we wait
|
|
- while (!supplier.getAsBoolean()) {
|
|
- if (mailbox.size() != 0) {
|
|
- mailbox.run();
|
|
- } else {
|
|
- Thread.yield();
|
|
- LockSupport.parkNanos("waiting for entity loading", 100000L);
|
|
- }
|
|
- }
|
|
-
|
|
- return entityManager.getEntities(new ChunkPos(this.x, this.z)).stream()
|
|
- .map(net.minecraft.world.entity.Entity::getBukkitEntity)
|
|
- .filter(Objects::nonNull).toArray(Entity[]::new);
|
|
+ return getCraftWorld().getHandle().getChunkEntities(this.x, this.z); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java
|
|
index c684460c91ad41ba9741616c25dae5c3efede704..720a078acfe770dba76413a8cfa4ef15616ae292 100644
|
|
--- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java
|
|
+++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java
|
|
@@ -1142,7 +1142,7 @@ public final class CraftServer implements Server {
|
|
this.console.addLevel(internal);
|
|
|
|
this.getServer().prepareLevels(internal.getChunkSource().chunkMap.progressListener, internal);
|
|
- internal.entityManager.tick(); // SPIGOT-6526: Load pending entities so they are available to the API
|
|
+ //internal.entityManager.tick(); // SPIGOT-6526: Load pending entities so they are available to the API // Paper - rewrite chunk system
|
|
|
|
this.pluginManager.callEvent(new WorldLoadEvent(internal.getWorld()));
|
|
return internal.getWorld();
|
|
@@ -1186,7 +1186,7 @@ public final class CraftServer implements Server {
|
|
}
|
|
|
|
handle.getChunkSource().close(save);
|
|
- handle.entityManager.close(save); // SPIGOT-6722: close entityManager
|
|
+ // handle.entityManager.close(save); // SPIGOT-6722: close entityManager // Paper - rewrite chunk system
|
|
handle.convertable.close();
|
|
} catch (Exception ex) {
|
|
this.getLogger().log(Level.SEVERE, null, ex);
|
|
@@ -2001,7 +2001,7 @@ public final class CraftServer implements Server {
|
|
|
|
@Override
|
|
public boolean isPrimaryThread() {
|
|
- return Thread.currentThread().equals(console.serverThread) || this.console.hasStopped() || !org.spigotmc.AsyncCatcher.enabled; // All bets are off if we have shut down (e.g. due to watchdog)
|
|
+ return io.papermc.paper.util.TickThread.isTickThread(); // Paper - rewrite chunk system
|
|
}
|
|
|
|
// Paper start
|
|
diff --git a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
|
|
index fd1da8431578832bcd1f3ca7890dd2150b916ebd..958c5faeddd3ee54ac2880eb3eb9e4ab2bba2540 100644
|
|
--- a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
|
|
+++ b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java
|
|
@@ -313,10 +313,14 @@ public class CraftWorld extends CraftRegionAccessor implements World {
|
|
ChunkHolder playerChunk = this.world.getChunkSource().chunkMap.getVisibleChunkIfPresent(ChunkPos.asLong(x, z));
|
|
if (playerChunk == null) return false;
|
|
|
|
- playerChunk.getTickingChunkFuture().thenAccept(either -> {
|
|
- either.left().ifPresent(chunk -> {
|
|
+ // Paper start - rewrite player chunk loader
|
|
+ net.minecraft.world.level.chunk.LevelChunk chunk = playerChunk.getSendingChunk();
|
|
+ if (chunk == null) {
|
|
+ return false;
|
|
+ }
|
|
+ // Paper end - rewrite player chunk loader
|
|
List<ServerPlayer> playersInRange = playerChunk.playerProvider.getPlayers(playerChunk.getPos(), false);
|
|
- if (playersInRange.isEmpty()) return;
|
|
+ if (playersInRange.isEmpty()) return true; // Paper - rewrite player chunk loader
|
|
|
|
ClientboundLevelChunkWithLightPacket refreshPacket = new ClientboundLevelChunkWithLightPacket(chunk, this.world.getLightEngine(), null, null, true);
|
|
for (ServerPlayer player : playersInRange) {
|
|
@@ -324,8 +328,7 @@ public class CraftWorld extends CraftRegionAccessor implements World {
|
|
|
|
player.connection.send(refreshPacket);
|
|
}
|
|
- });
|
|
- });
|
|
+ // Paper - rewrite player chunk loader
|
|
|
|
return true;
|
|
}
|
|
@@ -403,20 +406,7 @@ public class CraftWorld extends CraftRegionAccessor implements World {
|
|
@Override
|
|
public Collection<Plugin> getPluginChunkTickets(int x, int z) {
|
|
DistanceManager chunkDistanceManager = this.world.getChunkSource().chunkMap.distanceManager;
|
|
- SortedArraySet<Ticket<?>> tickets = chunkDistanceManager.tickets.get(ChunkPos.asLong(x, z));
|
|
-
|
|
- if (tickets == null) {
|
|
- return Collections.emptyList();
|
|
- }
|
|
-
|
|
- ImmutableList.Builder<Plugin> ret = ImmutableList.builder();
|
|
- for (Ticket<?> ticket : tickets) {
|
|
- if (ticket.getType() == TicketType.PLUGIN_TICKET) {
|
|
- ret.add((Plugin) ticket.key);
|
|
- }
|
|
- }
|
|
-
|
|
- return ret.build();
|
|
+ return chunkDistanceManager.getChunkHolderManager().getPluginChunkTickets(x, z); // Paper - rewrite chunk system
|
|
}
|
|
|
|
@Override
|
|
@@ -424,7 +414,7 @@ public class CraftWorld extends CraftRegionAccessor implements World {
|
|
Map<Plugin, ImmutableList.Builder<Chunk>> ret = new HashMap<>();
|
|
DistanceManager chunkDistanceManager = this.world.getChunkSource().chunkMap.distanceManager;
|
|
|
|
- for (Long2ObjectMap.Entry<SortedArraySet<Ticket<?>>> chunkTickets : chunkDistanceManager.tickets.long2ObjectEntrySet()) {
|
|
+ for (Long2ObjectMap.Entry<SortedArraySet<Ticket<?>>> chunkTickets : chunkDistanceManager.getChunkHolderManager().getTicketsCopy().long2ObjectEntrySet()) { // Paper - rewrite chunk system
|
|
long chunkKey = chunkTickets.getLongKey();
|
|
SortedArraySet<Ticket<?>> tickets = chunkTickets.getValue();
|
|
|
|
@@ -1920,14 +1910,53 @@ public class CraftWorld extends CraftRegionAccessor implements World {
|
|
// Spigot start
|
|
@Override
|
|
public int getViewDistance() {
|
|
- return world.spigotConfig.viewDistance;
|
|
+ return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance(); // Paper - replace old player chunk management
|
|
}
|
|
|
|
@Override
|
|
public int getSimulationDistance() {
|
|
- return world.spigotConfig.simulationDistance;
|
|
+ return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance(); // Paper - replace old player chunk management
|
|
}
|
|
// Spigot end
|
|
+ // Paper start - view distance api
|
|
+ @Override
|
|
+ public void setViewDistance(int viewDistance) {
|
|
+ if (viewDistance < 2 || viewDistance > 32) {
|
|
+ throw new IllegalArgumentException("View distance " + viewDistance + " is out of range of [2, 32]");
|
|
+ }
|
|
+ net.minecraft.server.level.ChunkMap chunkMap = getHandle().getChunkSource().chunkMap;
|
|
+ chunkMap.setViewDistance(viewDistance);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setSimulationDistance(int simulationDistance) {
|
|
+ if (simulationDistance < 2 || simulationDistance > 32) {
|
|
+ throw new IllegalArgumentException("Simulation distance " + simulationDistance + " is out of range of [2, 32]");
|
|
+ }
|
|
+ net.minecraft.server.level.ChunkMap chunkMap = getHandle().getChunkSource().chunkMap;
|
|
+ chunkMap.setTickViewDistance(simulationDistance);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public int getNoTickViewDistance() {
|
|
+ return this.getViewDistance();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setNoTickViewDistance(int viewDistance) {
|
|
+ this.setViewDistance(viewDistance);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public int getSendViewDistance() {
|
|
+ return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setSendViewDistance(int viewDistance) {
|
|
+ getHandle().getChunkSource().chunkMap.playerChunkManager.setSendDistance(viewDistance);
|
|
+ }
|
|
+ // Paper end - view distance api
|
|
|
|
// Spigot start
|
|
private final org.bukkit.World.Spigot spigot = new org.bukkit.World.Spigot()
|
|
diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java
|
|
index c0b89ccf7b7776d010e6e3632a43b2114ac6c128..8dc5aeaf9525dca447f718153d6210a039aa080e 100644
|
|
--- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java
|
|
+++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java
|
|
@@ -176,6 +176,81 @@ public class CraftPlayer extends CraftHumanEntity implements Player {
|
|
this.firstPlayed = System.currentTimeMillis();
|
|
}
|
|
|
|
+ // Paper start - implement view distances
|
|
+ @Override
|
|
+ public int getViewDistance() {
|
|
+ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
|
|
+ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
|
|
+ if (data == null) {
|
|
+ return chunkMap.playerChunkManager.getTargetNoTickViewDistance();
|
|
+ }
|
|
+ return data.getTargetNoTickViewDistance();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setViewDistance(int viewDistance) {
|
|
+ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
|
|
+ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
|
|
+ if (data == null) {
|
|
+ throw new IllegalStateException("Player is not attached to world");
|
|
+ }
|
|
+
|
|
+ data.setTargetNoTickViewDistance(viewDistance);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public int getSimulationDistance() {
|
|
+ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
|
|
+ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
|
|
+ if (data == null) {
|
|
+ return chunkMap.playerChunkManager.getTargetTickViewDistance();
|
|
+ }
|
|
+ return data.getTargetTickViewDistance();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setSimulationDistance(int simulationDistance) {
|
|
+ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
|
|
+ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
|
|
+ if (data == null) {
|
|
+ throw new IllegalStateException("Player is not attached to world");
|
|
+ }
|
|
+
|
|
+ data.setTargetTickViewDistance(simulationDistance);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public int getNoTickViewDistance() {
|
|
+ return this.getViewDistance();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setNoTickViewDistance(int viewDistance) {
|
|
+ this.setViewDistance(viewDistance);
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public int getSendViewDistance() {
|
|
+ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
|
|
+ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
|
|
+ if (data == null) {
|
|
+ return chunkMap.playerChunkManager.getTargetSendDistance();
|
|
+ }
|
|
+ return data.getTargetSendViewDistance();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void setSendViewDistance(int viewDistance) {
|
|
+ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap;
|
|
+ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle());
|
|
+ if (data == null) {
|
|
+ throw new IllegalStateException("Player is not attached to world");
|
|
+ }
|
|
+
|
|
+ data.setTargetSendViewDistance(viewDistance);
|
|
+ }
|
|
+ // Paper end - implement view distances
|
|
+
|
|
public GameProfile getProfile() {
|
|
return this.getHandle().getGameProfile();
|
|
}
|
|
diff --git a/src/main/java/org/bukkit/craftbukkit/generator/CustomChunkGenerator.java b/src/main/java/org/bukkit/craftbukkit/generator/CustomChunkGenerator.java
|
|
index 61767be202f95df16155e7a4c36701b21619f317..a9673a804d597599c35c83f4f245510c83005328 100644
|
|
--- a/src/main/java/org/bukkit/craftbukkit/generator/CustomChunkGenerator.java
|
|
+++ b/src/main/java/org/bukkit/craftbukkit/generator/CustomChunkGenerator.java
|
|
@@ -270,7 +270,7 @@ public class CustomChunkGenerator extends InternalChunkGenerator {
|
|
return ichunkaccess1;
|
|
};
|
|
|
|
- return future == null ? CompletableFuture.supplyAsync(() -> function.apply(chunk), net.minecraft.Util.backgroundExecutor()) : future.thenApply(function);
|
|
+ return future == null ? CompletableFuture.supplyAsync(() -> function.apply(chunk), executor) : future.thenApply(function); // Paper - run with supplied executor
|
|
}
|
|
|
|
@Override
|
|
diff --git a/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java b/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java
|
|
index 9a80e0c390d13453a4a79e00d18c20b79afd3c7f..57acc7195f83b23c9b84ec4b94e0d7ab22851604 100644
|
|
--- a/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java
|
|
+++ b/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java
|
|
@@ -259,4 +259,20 @@ public class DummyGeneratorAccess implements WorldGenLevel {
|
|
public boolean destroyBlock(BlockPos pos, boolean drop, Entity breakingEntity, int maxUpdateDepth) {
|
|
return false; // SPIGOT-6515
|
|
}
|
|
+
|
|
+ // Paper start
|
|
+ @Override
|
|
+ public List<Entity> getHardCollidingEntities(Entity except, AABB box, Predicate<? super Entity> predicate) {
|
|
+ return java.util.Collections.emptyList();
|
|
+ }
|
|
+
|
|
+ @Override
|
|
+ public void getEntities(Entity except, AABB box, Predicate<? super Entity> predicate, List<Entity> into) {}
|
|
+
|
|
+ @Override
|
|
+ public void getHardCollidingEntities(Entity except, AABB box, Predicate<? super Entity> predicate, List<Entity> into) {}
|
|
+
|
|
+ @Override
|
|
+ public <T> void getEntitiesByClass(Class<? extends T> clazz, Entity except, AABB box, List<? super T> into, Predicate<? super T> predicate) {}
|
|
+ // Paper end
|
|
}
|
|
diff --git a/src/main/java/org/spigotmc/ActivationRange.java b/src/main/java/org/spigotmc/ActivationRange.java
|
|
index 38cf408899cef72bc9d2888109a7ac7ce0aec638..d3639643cda7d8ccf3c1208502605120590a2d30 100644
|
|
--- a/src/main/java/org/spigotmc/ActivationRange.java
|
|
+++ b/src/main/java/org/spigotmc/ActivationRange.java
|
|
@@ -132,7 +132,13 @@ public class ActivationRange
|
|
ActivationType.ANIMAL.boundingBox = player.getBoundingBox().inflate( animalActivationRange, 256, animalActivationRange );
|
|
ActivationType.MONSTER.boundingBox = player.getBoundingBox().inflate( monsterActivationRange, 256, monsterActivationRange );
|
|
|
|
- world.getEntities().get(maxBB, ActivationRange::activateEntity);
|
|
+ // Paper start
|
|
+ java.util.List<Entity> entities = world.getEntities((Entity)null, maxBB, null);
|
|
+ for (int i = 0; i < entities.size(); i++) {
|
|
+ Entity entity = entities.get(i);
|
|
+ ActivationRange.activateEntity(entity);
|
|
+ }
|
|
+ // Paper end
|
|
}
|
|
MinecraftTimings.entityActivationCheckTimer.stopTiming();
|
|
}
|
|
diff --git a/src/main/java/org/spigotmc/AsyncCatcher.java b/src/main/java/org/spigotmc/AsyncCatcher.java
|
|
index 78669fa035b7537ff7e533cf32aaf2995625424f..05e94702e42b8f5c35d2a112c486d57948a3acba 100644
|
|
--- a/src/main/java/org/spigotmc/AsyncCatcher.java
|
|
+++ b/src/main/java/org/spigotmc/AsyncCatcher.java
|
|
@@ -9,7 +9,7 @@ public class AsyncCatcher
|
|
|
|
public static void catchOp(String reason)
|
|
{
|
|
- if ( (AsyncCatcher.enabled || io.papermc.paper.util.TickThread.STRICT_THREAD_CHECKS) && Thread.currentThread() != MinecraftServer.getServer().serverThread ) // Paper
|
|
+ if ( !io.papermc.paper.util.TickThread.isTickThread() ) // Paper // Paper - rewrite chunk system
|
|
{
|
|
throw new IllegalStateException( "Asynchronous " + reason + "!" );
|
|
}
|
|
diff --git a/src/main/java/org/spigotmc/WatchdogThread.java b/src/main/java/org/spigotmc/WatchdogThread.java
|
|
index 335120afc88a8fc1543c2e6df516fd728e3ab032..f1194eb6fdfba60959e00080d0562f2820d13b27 100644
|
|
--- a/src/main/java/org/spigotmc/WatchdogThread.java
|
|
+++ b/src/main/java/org/spigotmc/WatchdogThread.java
|
|
@@ -8,7 +8,7 @@ import java.util.logging.Logger;
|
|
import net.minecraft.server.MinecraftServer;
|
|
import org.bukkit.Bukkit;
|
|
|
|
-public class WatchdogThread extends Thread
|
|
+public final class WatchdogThread extends io.papermc.paper.util.TickThread // Paper - rewrite chunk system
|
|
{
|
|
|
|
private static WatchdogThread instance;
|
|
@@ -83,6 +83,7 @@ public class WatchdogThread extends Thread
|
|
//
|
|
log.log( Level.SEVERE, "------------------------------" );
|
|
log.log( Level.SEVERE, "Server thread dump (Look for plugins here before reporting to Spigot!):" );
|
|
+ io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.dumpAllChunkLoadInfo(isLongTimeout); // Paper // Paper - rewrite chunk system
|
|
WatchdogThread.dumpThread( ManagementFactory.getThreadMXBean().getThreadInfo( MinecraftServer.getServer().serverThread.getId(), Integer.MAX_VALUE ), log );
|
|
log.log( Level.SEVERE, "------------------------------" );
|
|
//
|