diff --git a/patches/server/0019-Rewrite-chunk-system.patch b/patches/server/0019-Rewrite-chunk-system.patch new file mode 100644 index 0000000000..c3959dbe25 --- /dev/null +++ b/patches/server/0019-Rewrite-chunk-system.patch @@ -0,0 +1,18241 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Spottedleaf +Date: Thu, 11 Mar 2021 02:32:30 -0800 +Subject: [PATCH] Rewrite chunk system + +== AT == +public net.minecraft.server.level.ChunkMap setViewDistance(I)V +public net.minecraft.server.level.ChunkHolder pos +public net.minecraft.server.level.ChunkMap overworldDataStorage +public-f net.minecraft.world.level.chunk.storage.RegionFileStorage +public net.minecraft.server.level.ChunkMap getPoiManager()Lnet/minecraft/world/entity/ai/village/poi/PoiManager; + +diff --git a/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java b/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java +index 146c78a333e47cb4d8aa97700e19a12ca176ce76..691239e65b0870ceb0d071b57793cff9b2593f62 100644 +--- a/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java ++++ b/src/main/java/ca/spottedleaf/starlight/common/light/StarLightInterface.java +@@ -41,14 +41,14 @@ public final class StarLightInterface { + protected final ArrayDeque cachedSkyPropagators; + protected final ArrayDeque cachedBlockPropagators; + +- protected final LightQueue lightQueue = new LightQueue(this); ++ public final io.papermc.paper.chunk.system.light.LightQueue lightQueue; // Paper - replace light queue + + protected final LayerLightEventListener skyReader; + protected final LayerLightEventListener blockReader; + protected final boolean isClientSide; + +- protected final int minSection; +- protected final int maxSection; ++ public final int minSection; // Paper - public ++ public final int maxSection; // Paper - public + protected final int minLightSection; + protected final int maxLightSection; + +@@ -182,6 +182,7 @@ public final class StarLightInterface { + StarLightInterface.this.sectionChange(pos, notReady); + } + }; ++ this.lightQueue = new io.papermc.paper.chunk.system.light.LightQueue(this); // Paper - replace light queue + } + + public boolean hasSkyLight() { +@@ -333,7 +334,7 @@ public final class StarLightInterface { + return this.lightAccess; + } + +- protected final SkyStarLightEngine getSkyLightEngine() { ++ public final SkyStarLightEngine getSkyLightEngine() { // Paper - public + if (this.cachedSkyPropagators == null) { + return null; + } +@@ -348,7 +349,7 @@ public final class StarLightInterface { + return ret; + } + +- protected final void releaseSkyLightEngine(final SkyStarLightEngine engine) { ++ public final void releaseSkyLightEngine(final SkyStarLightEngine engine) { // Paper - public + if (this.cachedSkyPropagators == null) { + return; + } +@@ -357,7 +358,7 @@ public final class StarLightInterface { + } + } + +- protected final BlockStarLightEngine getBlockLightEngine() { ++ public final BlockStarLightEngine getBlockLightEngine() { // Paper - public + if (this.cachedBlockPropagators == null) { + return null; + } +@@ -372,7 +373,7 @@ public final class StarLightInterface { + return ret; + } + +- protected final void releaseBlockLightEngine(final BlockStarLightEngine engine) { ++ public final void releaseBlockLightEngine(final BlockStarLightEngine engine) { // Paper - public + if (this.cachedBlockPropagators == null) { + return; + } +@@ -519,57 +520,15 @@ public final class StarLightInterface { + } + + public void scheduleChunkLight(final ChunkPos pos, final Runnable run) { +- this.lightQueue.queueChunkLighting(pos, run); ++ throw new UnsupportedOperationException("No longer implemented, use the new lightQueue field to queue tasks"); // Paper - replace light queue + } + + public void removeChunkTasks(final ChunkPos pos) { +- this.lightQueue.removeChunk(pos); ++ throw new UnsupportedOperationException("No longer implemented, use the new lightQueue field to queue tasks"); // Paper - replace light queue + } + + public void propagateChanges() { +- if (this.lightQueue.isEmpty()) { +- return; +- } +- +- final SkyStarLightEngine skyEngine = this.getSkyLightEngine(); +- final BlockStarLightEngine blockEngine = this.getBlockLightEngine(); +- +- try { +- LightQueue.ChunkTasks task; +- while ((task = this.lightQueue.removeFirstTask()) != null) { +- if (task.lightTasks != null) { +- for (final Runnable run : task.lightTasks) { +- run.run(); +- } +- } +- +- final long coordinate = task.chunkCoordinate; +- final int chunkX = CoordinateUtils.getChunkX(coordinate); +- final int chunkZ = CoordinateUtils.getChunkZ(coordinate); +- +- final Set positions = task.changedPositions; +- final Boolean[] sectionChanges = task.changedSectionSet; +- +- if (skyEngine != null && (!positions.isEmpty() || sectionChanges != null)) { +- skyEngine.blocksChangedInChunk(this.lightAccess, chunkX, chunkZ, positions, sectionChanges); +- } +- if (blockEngine != null && (!positions.isEmpty() || sectionChanges != null)) { +- blockEngine.blocksChangedInChunk(this.lightAccess, chunkX, chunkZ, positions, sectionChanges); +- } +- +- if (skyEngine != null && task.queuedEdgeChecksSky != null) { +- skyEngine.checkChunkEdges(this.lightAccess, chunkX, chunkZ, task.queuedEdgeChecksSky); +- } +- if (blockEngine != null && task.queuedEdgeChecksBlock != null) { +- blockEngine.checkChunkEdges(this.lightAccess, chunkX, chunkZ, task.queuedEdgeChecksBlock); +- } +- +- task.onComplete.complete(null); +- } +- } finally { +- this.releaseSkyLightEngine(skyEngine); +- this.releaseBlockLightEngine(blockEngine); +- } ++ throw new UnsupportedOperationException("No longer implemented, task draining is now performed by the light thread"); // Paper - replace light queue + } + + protected static final class LightQueue { +diff --git a/src/main/java/co/aikar/timings/TimingsExport.java b/src/main/java/co/aikar/timings/TimingsExport.java +index 38f01952153348d937e326da0ec102cd9b0f80af..43380d5e3a40b64bebdf3c0e7c48eca8998c8ac0 100644 +--- a/src/main/java/co/aikar/timings/TimingsExport.java ++++ b/src/main/java/co/aikar/timings/TimingsExport.java +@@ -163,7 +163,11 @@ public class TimingsExport extends Thread { + pair("gamerules", toObjectMapper(world.getWorld().getGameRules(), rule -> { + return pair(rule, world.getWorld().getGameRuleValue(rule)); + })), +- pair("ticking-distance", world.getChunkSource().chunkMap.getEffectiveViewDistance()) ++ // Paper start - replace chunk loader system ++ pair("ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()), ++ pair("no-ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()), ++ pair("sending-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance()) ++ // Paper end - replace chunk loader system + )); + })); + +diff --git a/src/main/java/co/aikar/timings/WorldTimingsHandler.java b/src/main/java/co/aikar/timings/WorldTimingsHandler.java +index 2f0d9b953802dee821cfde82d22b0567cce8ee91..22687667ec69a954261e55e59261286ac1b8b8cd 100644 +--- a/src/main/java/co/aikar/timings/WorldTimingsHandler.java ++++ b/src/main/java/co/aikar/timings/WorldTimingsHandler.java +@@ -59,6 +59,16 @@ public class WorldTimingsHandler { + + public final Timing miscMobSpawning; + ++ public final Timing poiUnload; ++ public final Timing chunkUnload; ++ public final Timing poiSaveDataSerialization; ++ public final Timing chunkSave; ++ public final Timing chunkSaveDataSerialization; ++ public final Timing chunkSaveIOWait; ++ public final Timing chunkUnloadPrepareSave; ++ public final Timing chunkUnloadPOISerialization; ++ public final Timing chunkUnloadDataSave; ++ + public WorldTimingsHandler(Level server) { + String name = ((PrimaryLevelData) server.getLevelData()).getLevelName() + " - "; + +@@ -112,6 +122,16 @@ public class WorldTimingsHandler { + + + miscMobSpawning = Timings.ofSafe(name + "Mob spawning - Misc"); ++ ++ poiUnload = Timings.ofSafe(name + "Chunk unload - POI"); ++ chunkUnload = Timings.ofSafe(name + "Chunk unload - Chunk"); ++ poiSaveDataSerialization = Timings.ofSafe(name + "Chunk save - POI Data serialization"); ++ chunkSave = Timings.ofSafe(name + "Chunk save - Chunk"); ++ chunkSaveDataSerialization = Timings.ofSafe(name + "Chunk save - Chunk Data serialization"); ++ chunkSaveIOWait = Timings.ofSafe(name + "Chunk save - Chunk IO Wait"); ++ chunkUnloadPrepareSave = Timings.ofSafe(name + "Chunk unload - Async Save Prepare"); ++ chunkUnloadPOISerialization = Timings.ofSafe(name + "Chunk unload - POI Data Serialization"); ++ chunkUnloadDataSave = Timings.ofSafe(name + "Chunk unload - Data Serialization"); + } + + public static Timing getTickList(ServerLevel worldserver, String timingsType) { +diff --git a/src/main/java/com/destroystokyo/paper/io/IOUtil.java b/src/main/java/com/destroystokyo/paper/io/IOUtil.java +new file mode 100644 +index 0000000000000000000000000000000000000000..e064f96c90afd1a4890060baa055cfd0469b6a6f +--- /dev/null ++++ b/src/main/java/com/destroystokyo/paper/io/IOUtil.java +@@ -0,0 +1,63 @@ ++package com.destroystokyo.paper.io; ++ ++import org.bukkit.Bukkit; ++ ++@Deprecated(forRemoval = true) ++public final class IOUtil { ++ ++ /* Copied from concrete or concurrentutil */ ++ ++ public static long getCoordinateKey(final int x, final int z) { ++ return ((long)z << 32) | (x & 0xFFFFFFFFL); ++ } ++ ++ public static int getCoordinateX(final long key) { ++ return (int)key; ++ } ++ ++ public static int getCoordinateZ(final long key) { ++ return (int)(key >>> 32); ++ } ++ ++ public static int getRegionCoordinate(final int chunkCoordinate) { ++ return chunkCoordinate >> 5; ++ } ++ ++ public static int getChunkInRegion(final int chunkCoordinate) { ++ return chunkCoordinate & 31; ++ } ++ ++ public static String genericToString(final Object object) { ++ return object == null ? "null" : object.getClass().getName() + ":" + object.toString(); ++ } ++ ++ public static T notNull(final T obj) { ++ if (obj == null) { ++ throw new NullPointerException(); ++ } ++ return obj; ++ } ++ ++ public static T notNull(final T obj, final String msgIfNull) { ++ if (obj == null) { ++ throw new NullPointerException(msgIfNull); ++ } ++ return obj; ++ } ++ ++ public static void arrayBounds(final int off, final int len, final int arrayLength, final String msgPrefix) { ++ if (off < 0 || len < 0 || (arrayLength - off) < len) { ++ throw new ArrayIndexOutOfBoundsException(msgPrefix + ": off: " + off + ", len: " + len + ", array length: " + arrayLength); ++ } ++ } ++ ++ public static int getPriorityForCurrentThread() { ++ return Bukkit.isPrimaryThread() ? PrioritizedTaskQueue.HIGHEST_PRIORITY : PrioritizedTaskQueue.NORMAL_PRIORITY; ++ } ++ ++ @SuppressWarnings("unchecked") ++ public static void rethrow(final Throwable throwable) throws T { ++ throw (T)throwable; ++ } ++ ++} +diff --git a/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java +new file mode 100644 +index 0000000000000000000000000000000000000000..f2c27e0ac65be4b75c1d86ef6fd45fdb538d96ac +--- /dev/null ++++ b/src/main/java/com/destroystokyo/paper/io/PaperFileIOThread.java +@@ -0,0 +1,474 @@ ++package com.destroystokyo.paper.io; ++ ++import com.mojang.logging.LogUtils; ++import net.minecraft.nbt.CompoundTag; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.storage.RegionFile; ++import org.slf4j.Logger; ++ ++import java.io.IOException; ++import java.util.concurrent.CompletableFuture; ++import java.util.concurrent.ConcurrentHashMap; ++import java.util.concurrent.atomic.AtomicLong; ++import java.util.function.Consumer; ++import java.util.function.Function; ++ ++/** ++ * Prioritized singleton thread responsible for all chunk IO that occurs in a minecraft server. ++ * ++ *

++ * Singleton access: {@link Holder#INSTANCE} ++ *

++ * ++ *

++ * All functions provided are MT-Safe, however certain ordering constraints are (but not enforced): ++ *

  • ++ * Chunk saves may not occur for unloaded chunks. ++ *
  • ++ *
  • ++ * Tasks must be scheduled on the main thread. ++ *
  • ++ *

    ++ * ++ * @see Holder#INSTANCE ++ * @see #scheduleSave(ServerLevel, int, int, CompoundTag, CompoundTag, int) ++ * @see #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean) ++ * @deprecated ++ */ ++@Deprecated(forRemoval = true) ++public final class PaperFileIOThread extends QueueExecutorThread { ++ ++ public static final Logger LOGGER = LogUtils.getLogger(); ++ public static final CompoundTag FAILURE_VALUE = new CompoundTag(); ++ ++ public static final class Holder { ++ ++ public static final PaperFileIOThread INSTANCE = new PaperFileIOThread(); ++ ++ static { ++ // Paper - fail hard on usage ++ } ++ } ++ ++ private final AtomicLong writeCounter = new AtomicLong(); ++ ++ private PaperFileIOThread() { ++ super(new PrioritizedTaskQueue<>(), (int)(1.0e6)); // 1.0ms spinwait time ++ this.setName("Paper RegionFile IO Thread"); ++ this.setPriority(Thread.NORM_PRIORITY - 1); // we keep priority close to normal because threads can wait on us ++ this.setUncaughtExceptionHandler((final Thread unused, final Throwable thr) -> { ++ LOGGER.error("Uncaught exception thrown from IO thread, report this!", thr); ++ }); ++ } ++ ++ /* run() is implemented by superclass */ ++ ++ /* ++ * ++ * IO thread will perform reads before writes ++ * ++ * How reads/writes are scheduled: ++ * ++ * If read in progress while scheduling write, ignore read and schedule write ++ * If read in progress while scheduling read (no write in progress), chain the read task ++ * ++ * ++ * If write in progress while scheduling read, use the pending write data and ret immediately ++ * If write in progress while scheduling write (ignore read in progress), overwrite the write in progress data ++ * ++ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however ++ * it fails to properly propagate write failures. When writes fail the data is kept so future reads will actually ++ * read the failed write data. This should hopefully act as a way to prevent data loss for spurious fails for writing data. ++ * ++ */ ++ ++ /** ++ * Attempts to bump the priority of all IO tasks for the given chunk coordinates. This has no effect if no tasks are queued. ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param priority Priority level to try to bump to ++ */ ++ public void bumpPriority(final ServerLevel world, final int chunkX, final int chunkZ, final int priority) { ++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage ++ } ++ ++ public CompoundTag getPendingWrite(final ServerLevel world, final int chunkX, final int chunkZ, final boolean poiData) { ++ // Paper start - rewrite chunk system ++ return io.papermc.paper.chunk.system.io.RegionFileIOThread.getPendingWrite( ++ world, chunkX, chunkZ, poiData ? io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA : ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA ++ ); ++ // Paper end - rewrite chunk system ++ } ++ ++ /** ++ * Sets the priority of all IO tasks for the given chunk coordinates. This has no effect if no tasks are queued. ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param priority Priority level to set to ++ */ ++ public void setPriority(final ServerLevel world, final int chunkX, final int chunkZ, final int priority) { ++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage ++ } ++ ++ /** ++ * Schedules the chunk data to be written asynchronously. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means ++ * saves must be scheduled before a chunk is unloaded. ++ *
  • ++ *
  • ++ * Writes may be called concurrently, although only the "later" write will go through. ++ *
  • ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param poiData Chunk point of interest data. If {@code null}, then no poi data is saved. ++ * @param chunkData Chunk data. If {@code null}, then no chunk data is saved. ++ * @param priority Priority level for this task. See {@link PrioritizedTaskQueue} ++ * @throws IllegalArgumentException If both {@code poiData} and {@code chunkData} are {@code null}. ++ * @throws IllegalStateException If the file io thread has shutdown. ++ */ ++ public void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ, ++ final CompoundTag poiData, final CompoundTag chunkData, ++ final int priority) throws IllegalArgumentException { ++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage ++ } ++ ++ private void scheduleWrite(final ChunkDataController dataController, final ServerLevel world, ++ final int chunkX, final int chunkZ, final CompoundTag data, final int priority, final long writeCounter) { ++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage ++ } ++ ++ /** ++ * Same as {@link #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)}, except this function returns ++ * a {@link CompletableFuture} which is potentially completed ASYNCHRONOUSLY ON THE FILE IO THREAD when the load task ++ * has completed. ++ *

    ++ * Note that if the chunk fails to load the returned future is completed with {@code null}. ++ *

    ++ */ ++ public CompletableFuture loadChunkDataAsyncFuture(final ServerLevel world, final int chunkX, final int chunkZ, ++ final int priority, final boolean readPoiData, final boolean readChunkData, ++ final boolean intendingToBlock) { ++ final CompletableFuture future = new CompletableFuture<>(); ++ this.loadChunkDataAsync(world, chunkX, chunkZ, priority, future::complete, readPoiData, readChunkData, intendingToBlock); ++ return future; ++ } ++ ++ /** ++ * Schedules a load to be executed asynchronously. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * If a chunk fails to load, the {@code onComplete} parameter is completed with {@code null}. ++ *
  • ++ *
  • ++ * It is possible for the {@code onComplete} parameter to be given {@link ChunkData} containing data ++ * this call did not request. ++ *
  • ++ *
  • ++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may ++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of ++ * data is undefined behaviour, and can cause deadlock. ++ *
  • ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param priority Priority level for this task. See {@link PrioritizedTaskQueue} ++ * @param onComplete Consumer to execute once this task has completed ++ * @param readPoiData Whether to read point of interest data. If {@code false}, the {@code NBTTagCompound} will be {@code null}. ++ * @param readChunkData Whether to read chunk data. If {@code false}, the {@code NBTTagCompound} will be {@code null}. ++ * @return The {@link PrioritizedTaskQueue.PrioritizedTask} associated with this task. Note that this task does not support ++ * cancellation. ++ */ ++ public void loadChunkDataAsync(final ServerLevel world, final int chunkX, final int chunkZ, ++ final int priority, final Consumer onComplete, ++ final boolean readPoiData, final boolean readChunkData, ++ final boolean intendingToBlock) { ++ if (!PrioritizedTaskQueue.validPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority: " + priority); ++ } ++ ++ if (!(readPoiData | readChunkData)) { ++ throw new IllegalArgumentException("Must read chunk data or poi data"); ++ } ++ ++ final ChunkData complete = new ChunkData(); ++ // Paper start - rewrite chunk system ++ final java.util.List types = new java.util.ArrayList<>(); ++ if (readPoiData) { ++ types.add(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA); ++ } ++ if (readChunkData) { ++ types.add(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA); ++ } ++ final ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority newPriority; ++ switch (priority) { ++ case PrioritizedTaskQueue.HIGHEST_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.BLOCKING; ++ case PrioritizedTaskQueue.HIGHER_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGHEST; ++ case PrioritizedTaskQueue.HIGH_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGH; ++ case PrioritizedTaskQueue.NORMAL_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.NORMAL; ++ case PrioritizedTaskQueue.LOW_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.LOW; ++ case PrioritizedTaskQueue.LOWEST_PRIORITY -> newPriority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.IDLE; ++ default -> throw new IllegalStateException("Legacy priority " + priority + " should be valid"); ++ } ++ final Consumer transformComplete = (io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileData data) -> { ++ if (readPoiData) { ++ if (data.getThrowable(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA) != null) { ++ complete.poiData = FAILURE_VALUE; ++ } else { ++ complete.poiData = data.getData(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA); ++ } ++ } ++ ++ if (readChunkData) { ++ if (data.getThrowable(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA) != null) { ++ complete.chunkData = FAILURE_VALUE; ++ } else { ++ complete.chunkData = data.getData(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA); ++ } ++ } ++ ++ onComplete.accept(complete); ++ }; ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, transformComplete, intendingToBlock, newPriority, types.toArray(new io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType[0])); ++ // Paper end - rewrite chunk system ++ ++ } ++ ++ // Note: the onComplete may be called asynchronously or synchronously here. ++ private void scheduleRead(final ChunkDataController dataController, final ServerLevel world, ++ final int chunkX, final int chunkZ, final Consumer onComplete, final int priority, ++ final boolean intendingToBlock) { ++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage ++ } ++ ++ /** ++ * Same as {@link #loadChunkDataAsync(ServerLevel, int, int, int, Consumer, boolean, boolean, boolean)}, except this function returns ++ * the {@link ChunkData} associated with the specified chunk when the task is complete. ++ * @return The chunk data, or {@code null} if the chunk failed to load. ++ */ ++ public ChunkData loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ, final int priority, ++ final boolean readPoiData, final boolean readChunkData) { ++ return this.loadChunkDataAsyncFuture(world, chunkX, chunkZ, priority, readPoiData, readChunkData, true).join(); ++ } ++ ++ /** ++ * Schedules the given task at the specified priority to be executed on the IO thread. ++ *

    ++ * Internal api. Do not use. ++ *

    ++ */ ++ public void runTask(final int priority, final Runnable runnable) { ++ throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage ++ } ++ ++ static final class GeneralTask extends PrioritizedTaskQueue.PrioritizedTask implements Runnable { ++ ++ private final Runnable run; ++ ++ public GeneralTask(final int priority, final Runnable run) { ++ super(priority); ++ this.run = IOUtil.notNull(run, "Task may not be null"); ++ } ++ ++ @Override ++ public void run() { ++ try { ++ this.run.run(); ++ } catch (final Throwable throwable) { ++ if (throwable instanceof ThreadDeath) { ++ throw (ThreadDeath)throwable; ++ } ++ LOGGER.error("Failed to execute general task on IO thread " + IOUtil.genericToString(this.run), throwable); ++ } ++ } ++ } ++ ++ public static final class ChunkData { ++ ++ public CompoundTag poiData; ++ public CompoundTag chunkData; ++ ++ public ChunkData() {} ++ ++ public ChunkData(final CompoundTag poiData, final CompoundTag chunkData) { ++ this.poiData = poiData; ++ this.chunkData = chunkData; ++ } ++ } ++ ++ public static abstract class ChunkDataController { ++ ++ // ConcurrentHashMap synchronizes per chain, so reduce the chance of task's hashes colliding. ++ public final ConcurrentHashMap tasks = new ConcurrentHashMap<>(64, 0.5f); ++ ++ public abstract void writeData(final int x, final int z, final CompoundTag compound) throws IOException; ++ public abstract CompoundTag readData(final int x, final int z) throws IOException; ++ ++ public abstract T computeForRegionFile(final int chunkX, final int chunkZ, final Function function); ++ public abstract T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function function); ++ ++ public static final class InProgressWrite { ++ public long writeCounter; ++ public CompoundTag data; ++ } ++ ++ public static final class InProgressRead { ++ public final CompletableFuture readFuture = new CompletableFuture<>(); ++ } ++ } ++ ++ public static final class ChunkDataTask extends PrioritizedTaskQueue.PrioritizedTask implements Runnable { ++ ++ public ChunkDataController.InProgressWrite inProgressWrite; ++ public ChunkDataController.InProgressRead inProgressRead; ++ ++ private final ServerLevel world; ++ private final int x; ++ private final int z; ++ private final ChunkDataController taskController; ++ ++ public ChunkDataTask(final int priority, final ServerLevel world, final int x, final int z, final ChunkDataController taskController) { ++ super(priority); ++ this.world = world; ++ this.x = x; ++ this.z = z; ++ this.taskController = taskController; ++ } ++ ++ @Override ++ public String toString() { ++ return "Task for world: '" + this.world.getWorld().getName() + "' at " + this.x + "," + this.z + ++ " poi: " + (this.taskController == null) + ", hash: " + this.hashCode(); // Paper - TODO rewrite chunk system ++ } ++ ++ /* ++ * ++ * IO thread will perform reads before writes ++ * ++ * How reads/writes are scheduled: ++ * ++ * If read in progress while scheduling write, ignore read and schedule write ++ * If read in progress while scheduling read (no write in progress), chain the read task ++ * ++ * ++ * If write in progress while scheduling read, use the pending write data and ret immediately ++ * If write in progress while scheduling write (ignore read in progress), overwrite the write in progress data ++ * ++ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however ++ * it fails to properly propagate write failures ++ * ++ */ ++ ++ void reschedule(final int priority) { ++ // priority is checked before this stage // TODO what ++ this.queue.lazySet(null); ++ this.priority.lazySet(priority); ++ PaperFileIOThread.Holder.INSTANCE.queueTask(this); ++ } ++ ++ @Override ++ public void run() { ++ if (true) throw new IllegalStateException("Shouldn't get here, use RegionFileIOThread"); // Paper - rewrite chunk system, fail hard on usage ++ ChunkDataController.InProgressRead read = this.inProgressRead; ++ if (read != null) { ++ CompoundTag compound = PaperFileIOThread.FAILURE_VALUE; ++ try { ++ compound = this.taskController.readData(this.x, this.z); ++ } catch (final Throwable thr) { ++ if (thr instanceof ThreadDeath) { ++ throw (ThreadDeath)thr; ++ } ++ LOGGER.error("Failed to read chunk data for task: " + this.toString(), thr); ++ // fall through to complete with null data ++ } ++ read.readFuture.complete(compound); ++ } ++ ++ final Long chunkKey = Long.valueOf(IOUtil.getCoordinateKey(this.x, this.z)); ++ ++ ChunkDataController.InProgressWrite write = this.inProgressWrite; ++ ++ if (write == null) { ++ // IntelliJ warns this is invalid, however it does not consider that writes to the task map & the inProgress field can occur concurrently. ++ ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final Long keyInMap, final ChunkDataTask valueInMap) -> { ++ if (valueInMap == null) { ++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!"); ++ } ++ if (valueInMap != ChunkDataTask.this) { ++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!"); ++ } ++ return valueInMap.inProgressWrite == null ? null : valueInMap; ++ }); ++ ++ if (inMap == null) { ++ return; // set the task value to null, indicating we're done ++ } ++ ++ // not null, which means there was a concurrent write ++ write = this.inProgressWrite; ++ } ++ ++ for (;;) { ++ final long writeCounter; ++ final CompoundTag data; ++ ++ //noinspection SynchronizationOnLocalVariableOrMethodParameter ++ synchronized (write) { ++ writeCounter = write.writeCounter; ++ data = write.data; ++ } ++ ++ boolean failedWrite = false; ++ ++ try { ++ this.taskController.writeData(this.x, this.z, data); ++ } catch (final Throwable thr) { ++ if (thr instanceof ThreadDeath) { ++ throw (ThreadDeath)thr; ++ } ++ LOGGER.error("Failed to write chunk data for task: " + this.toString(), thr); ++ failedWrite = true; ++ } ++ ++ boolean finalFailWrite = failedWrite; ++ ++ ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final Long keyInMap, final ChunkDataTask valueInMap) -> { ++ if (valueInMap == null) { ++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!"); ++ } ++ if (valueInMap != ChunkDataTask.this) { ++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!"); ++ } ++ if (valueInMap.inProgressWrite.writeCounter == writeCounter) { ++ if (finalFailWrite) { ++ valueInMap.inProgressWrite.writeCounter = -1L; ++ } ++ ++ return null; ++ } ++ return valueInMap; ++ // Hack end ++ }); ++ ++ if (inMap == null) { ++ // write counter matched, so we wrote the most up-to-date pending data, we're done here ++ // or we failed to write and successfully set the write counter to -1 ++ return; // we're done here ++ } ++ ++ // fetch & write new data ++ continue; ++ } ++ } ++ } ++} +diff --git a/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java b/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java +new file mode 100644 +index 0000000000000000000000000000000000000000..7844a3515430472bd829ff246396bceb0797de1b +--- /dev/null ++++ b/src/main/java/com/destroystokyo/paper/io/PrioritizedTaskQueue.java +@@ -0,0 +1,299 @@ ++package com.destroystokyo.paper.io; ++ ++import java.util.concurrent.ConcurrentLinkedQueue; ++import java.util.concurrent.atomic.AtomicBoolean; ++import java.util.concurrent.atomic.AtomicInteger; ++import java.util.concurrent.atomic.AtomicReference; ++ ++@Deprecated(forRemoval = true) ++public class PrioritizedTaskQueue { ++ ++ // lower numbers are a higher priority (except < 0) ++ // higher priorities are always executed before lower priorities ++ ++ /** ++ * Priority value indicating the task has completed or is being completed. ++ */ ++ public static final int COMPLETING_PRIORITY = -1; ++ ++ /** ++ * Highest priority, should only be used for main thread tasks or tasks that are blocking the main thread. ++ */ ++ public static final int HIGHEST_PRIORITY = 0; ++ ++ /** ++ * Should be only used in an IO task so that chunk loads do not wait on other IO tasks. ++ * This only exists because IO tasks are scheduled before chunk load tasks to decrease IO waiting times. ++ */ ++ public static final int HIGHER_PRIORITY = 1; ++ ++ /** ++ * Should be used for scheduling chunk loads/generation that would increase response times to users. ++ */ ++ public static final int HIGH_PRIORITY = 2; ++ ++ /** ++ * Default priority. ++ */ ++ public static final int NORMAL_PRIORITY = 3; ++ ++ /** ++ * Use for tasks not at all critical and can potentially be delayed. ++ */ ++ public static final int LOW_PRIORITY = 4; ++ ++ /** ++ * Use for tasks that should "eventually" execute. ++ */ ++ public static final int LOWEST_PRIORITY = 5; ++ ++ private static final int TOTAL_PRIORITIES = 6; ++ ++ final ConcurrentLinkedQueue[] queues = (ConcurrentLinkedQueue[])new ConcurrentLinkedQueue[TOTAL_PRIORITIES]; ++ ++ private final AtomicBoolean shutdown = new AtomicBoolean(); ++ ++ { ++ for (int i = 0; i < TOTAL_PRIORITIES; ++i) { ++ this.queues[i] = new ConcurrentLinkedQueue<>(); ++ } ++ } ++ ++ /** ++ * Returns whether the specified priority is valid ++ */ ++ public static boolean validPriority(final int priority) { ++ return priority >= 0 && priority < TOTAL_PRIORITIES; ++ } ++ ++ /** ++ * Queues a task. ++ * @throws IllegalStateException If the task has already been queued. Use {@link PrioritizedTask#raisePriority(int)} to ++ * raise a task's priority. ++ * This can also be thrown if the queue has shutdown. ++ */ ++ public void add(final T task) throws IllegalStateException { ++ int priority = task.getPriority(); ++ if (priority != COMPLETING_PRIORITY) { ++ task.setQueue(this); ++ this.queues[priority].add(task); ++ } ++ if (this.shutdown.get()) { ++ // note: we're not actually sure at this point if our task will go through ++ throw new IllegalStateException("Queue has shutdown, refusing to execute task " + IOUtil.genericToString(task)); ++ } ++ } ++ ++ /** ++ * Polls the highest priority task currently available. {@code null} if none. ++ */ ++ public T poll() { ++ T task; ++ for (int i = 0; i < TOTAL_PRIORITIES; ++i) { ++ final ConcurrentLinkedQueue queue = this.queues[i]; ++ ++ while ((task = queue.poll()) != null) { ++ final int prevPriority = task.tryComplete(i); ++ if (prevPriority != COMPLETING_PRIORITY && prevPriority <= i) { ++ // if the prev priority was greater-than or equal to our current priority ++ return task; ++ } ++ } ++ } ++ ++ return null; ++ } ++ ++ /** ++ * Polls the highest priority task currently available. {@code null} if none. ++ */ ++ public T poll(final int lowestPriority) { ++ T task; ++ final int max = Math.min(LOWEST_PRIORITY, lowestPriority); ++ for (int i = 0; i <= max; ++i) { ++ final ConcurrentLinkedQueue queue = this.queues[i]; ++ ++ while ((task = queue.poll()) != null) { ++ final int prevPriority = task.tryComplete(i); ++ if (prevPriority != COMPLETING_PRIORITY && prevPriority <= i) { ++ // if the prev priority was greater-than or equal to our current priority ++ return task; ++ } ++ } ++ } ++ ++ return null; ++ } ++ ++ /** ++ * Returns whether this queue may have tasks queued. ++ *

    ++ * This operation is not atomic, but is MT-Safe. ++ *

    ++ * @return {@code true} if tasks may be queued, {@code false} otherwise ++ */ ++ public boolean hasTasks() { ++ for (int i = 0; i < TOTAL_PRIORITIES; ++i) { ++ final ConcurrentLinkedQueue queue = this.queues[i]; ++ ++ if (queue.peek() != null) { ++ return true; ++ } ++ } ++ return false; ++ } ++ ++ /** ++ * Prevent further additions to this queue. Attempts to add after this call has completed (potentially during) will ++ * result in {@link IllegalStateException} being thrown. ++ *

    ++ * This operation is atomic with respect to other shutdown calls ++ *

    ++ *

    ++ * After this call has completed, regardless of return value, this queue will be shutdown. ++ *

    ++ * @return {@code true} if the queue was shutdown, {@code false} if it has shut down already ++ */ ++ public boolean shutdown() { ++ return this.shutdown.getAndSet(false); ++ } ++ ++ public abstract static class PrioritizedTask { ++ ++ protected final AtomicReference queue = new AtomicReference<>(); ++ ++ protected final AtomicInteger priority; ++ ++ protected PrioritizedTask() { ++ this(PrioritizedTaskQueue.NORMAL_PRIORITY); ++ } ++ ++ protected PrioritizedTask(final int priority) { ++ if (!PrioritizedTaskQueue.validPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.priority = new AtomicInteger(priority); ++ } ++ ++ /** ++ * Returns the current priority. Note that {@link PrioritizedTaskQueue#COMPLETING_PRIORITY} will be returned ++ * if this task is completing or has completed. ++ */ ++ public final int getPriority() { ++ return this.priority.get(); ++ } ++ ++ /** ++ * Returns whether this task is scheduled to execute, or has been already executed. ++ */ ++ public boolean isScheduled() { ++ return this.queue.get() != null; ++ } ++ ++ final int tryComplete(final int minPriority) { ++ for (int curr = this.getPriorityVolatile();;) { ++ if (curr == COMPLETING_PRIORITY) { ++ return COMPLETING_PRIORITY; ++ } ++ if (curr > minPriority) { ++ // curr is lower priority ++ return curr; ++ } ++ ++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, COMPLETING_PRIORITY))) { ++ return curr; ++ } ++ continue; ++ } ++ } ++ ++ /** ++ * Forces this task to be completed. ++ * @return {@code true} if the task was cancelled, {@code false} if the task has already completed or is being completed. ++ */ ++ public boolean cancel() { ++ return this.exchangePriorityVolatile(PrioritizedTaskQueue.COMPLETING_PRIORITY) != PrioritizedTaskQueue.COMPLETING_PRIORITY; ++ } ++ ++ /** ++ * Attempts to raise the priority to the priority level specified. ++ * @param priority Priority specified ++ * @return {@code true} if successful, {@code false} otherwise. ++ */ ++ public boolean raisePriority(final int priority) { ++ if (!PrioritizedTaskQueue.validPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority"); ++ } ++ ++ for (int curr = this.getPriorityVolatile();;) { ++ if (curr == COMPLETING_PRIORITY) { ++ return false; ++ } ++ if (priority >= curr) { ++ return true; ++ } ++ ++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority))) { ++ PrioritizedTaskQueue queue = this.queue.get(); ++ if (queue != null) { ++ //noinspection unchecked ++ queue.queues[priority].add(this); // silently fail on shutdown ++ } ++ return true; ++ } ++ continue; ++ } ++ } ++ ++ /** ++ * Attempts to set this task's priority level to the level specified. ++ * @param priority Specified priority level. ++ * @return {@code true} if successful, {@code false} if this task is completing or has completed. ++ */ ++ public boolean updatePriority(final int priority) { ++ if (!PrioritizedTaskQueue.validPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority"); ++ } ++ ++ for (int curr = this.getPriorityVolatile();;) { ++ if (curr == COMPLETING_PRIORITY) { ++ return false; ++ } ++ if (curr == priority) { ++ return true; ++ } ++ ++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority))) { ++ PrioritizedTaskQueue queue = this.queue.get(); ++ if (queue != null) { ++ //noinspection unchecked ++ queue.queues[priority].add(this); // silently fail on shutdown ++ } ++ return true; ++ } ++ continue; ++ } ++ } ++ ++ void setQueue(final PrioritizedTaskQueue queue) { ++ this.queue.set(queue); ++ } ++ ++ /* priority */ ++ ++ protected final int getPriorityVolatile() { ++ return this.priority.get(); ++ } ++ ++ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) { ++ if (this.priority.compareAndSet(expect, update)) { ++ return expect; ++ } ++ return this.priority.get(); ++ } ++ ++ protected final int exchangePriorityVolatile(final int value) { ++ return this.priority.getAndSet(value); ++ } ++ } ++} +diff --git a/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java b/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java +new file mode 100644 +index 0000000000000000000000000000000000000000..99f49b5625cf51d6c97640553cf5c420bb6fdd36 +--- /dev/null ++++ b/src/main/java/com/destroystokyo/paper/io/QueueExecutorThread.java +@@ -0,0 +1,255 @@ ++package com.destroystokyo.paper.io; ++ ++import com.mojang.logging.LogUtils; ++import org.slf4j.Logger; ++ ++import java.util.concurrent.ConcurrentLinkedQueue; ++import java.util.concurrent.atomic.AtomicBoolean; ++import java.util.concurrent.locks.LockSupport; ++ ++@Deprecated(forRemoval = true) ++public class QueueExecutorThread extends Thread { ++ ++ private static final Logger LOGGER = LogUtils.getLogger(); ++ ++ protected final PrioritizedTaskQueue queue; ++ protected final long spinWaitTime; ++ ++ protected volatile boolean closed; ++ ++ protected final AtomicBoolean parked = new AtomicBoolean(); ++ ++ protected volatile ConcurrentLinkedQueue flushQueue = new ConcurrentLinkedQueue<>(); ++ protected volatile long flushCycles; ++ ++ protected int lowestPriorityToPoll = PrioritizedTaskQueue.LOWEST_PRIORITY; ++ ++ public int getLowestPriorityToPoll() { ++ return this.lowestPriorityToPoll; ++ } ++ ++ public void setLowestPriorityToPoll(final int lowestPriorityToPoll) { ++ if (this.isAlive()) { ++ throw new IllegalStateException("Cannot set after starting"); ++ } ++ this.lowestPriorityToPoll = lowestPriorityToPoll; ++ } ++ ++ public QueueExecutorThread(final PrioritizedTaskQueue queue) { ++ this(queue, (int)(1.e6)); // 1.0ms ++ } ++ ++ public QueueExecutorThread(final PrioritizedTaskQueue queue, final long spinWaitTime) { // in ms ++ this.queue = queue; ++ this.spinWaitTime = spinWaitTime; ++ } ++ ++ @Override ++ public void run() { ++ final long spinWaitTime = this.spinWaitTime; ++ main_loop: ++ for (;;) { ++ this.pollTasks(true); ++ ++ // spinwait ++ ++ final long start = System.nanoTime(); ++ ++ for (;;) { ++ // If we are interrpted for any reason, park() will always return immediately. Clear so that we don't needlessly use cpu in such an event. ++ Thread.interrupted(); ++ LockSupport.parkNanos("Spinwaiting on tasks", 1000L); // 1us ++ ++ if (this.pollTasks(true)) { ++ // restart loop, found tasks ++ continue main_loop; ++ } ++ ++ if (this.handleClose()) { ++ return; // we're done ++ } ++ ++ if ((System.nanoTime() - start) >= spinWaitTime) { ++ break; ++ } ++ } ++ ++ if (this.handleClose()) { ++ return; ++ } ++ ++ this.parked.set(true); ++ ++ // We need to parse here to avoid a race condition where a thread queues a task before we set parked to true ++ // (i.e it will not notify us) ++ if (this.pollTasks(true)) { ++ this.parked.set(false); ++ continue; ++ } ++ ++ if (this.handleClose()) { ++ return; ++ } ++ ++ // we don't need to check parked before sleeping, but we do need to check parked in a do-while loop ++ // LockSupport.park() can fail for any reason ++ do { ++ Thread.interrupted(); ++ LockSupport.park("Waiting on tasks"); ++ } while (this.parked.get()); ++ } ++ } ++ ++ protected boolean handleClose() { ++ if (this.closed) { ++ this.pollTasks(true); // this ensures we've emptied the queue ++ this.handleFlushThreads(true); ++ return true; ++ } ++ return false; ++ } ++ ++ protected boolean pollTasks(boolean flushTasks) { ++ Runnable task; ++ boolean ret = false; ++ ++ while ((task = this.queue.poll(this.lowestPriorityToPoll)) != null) { ++ ret = true; ++ try { ++ task.run(); ++ } catch (final Throwable throwable) { ++ if (throwable instanceof ThreadDeath) { ++ throw (ThreadDeath)throwable; ++ } ++ LOGGER.error("Exception thrown from prioritized runnable task in thread '" + this.getName() + "': " + IOUtil.genericToString(task), throwable); ++ } ++ } ++ ++ if (flushTasks) { ++ this.handleFlushThreads(false); ++ } ++ ++ return ret; ++ } ++ ++ protected void handleFlushThreads(final boolean shutdown) { ++ Thread parking; ++ ConcurrentLinkedQueue flushQueue = this.flushQueue; ++ do { ++ ++flushCycles; // may be plain read opaque write ++ while ((parking = flushQueue.poll()) != null) { ++ LockSupport.unpark(parking); ++ } ++ } while (this.pollTasks(false)); ++ ++ if (shutdown) { ++ this.flushQueue = null; ++ ++ // defend against a race condition where a flush thread double-checks right before we set to null ++ while ((parking = flushQueue.poll()) != null) { ++ LockSupport.unpark(parking); ++ } ++ } ++ } ++ ++ /** ++ * Notify's this thread that a task has been added to its queue ++ * @return {@code true} if this thread was waiting for tasks, {@code false} if it is executing tasks ++ */ ++ public boolean notifyTasks() { ++ if (this.parked.get() && this.parked.getAndSet(false)) { ++ LockSupport.unpark(this); ++ return true; ++ } ++ return false; ++ } ++ ++ protected void queueTask(final T task) { ++ this.queue.add(task); ++ this.notifyTasks(); ++ } ++ ++ /** ++ * Waits until this thread's queue is empty. ++ * ++ * @throws IllegalStateException If the current thread is {@code this} thread. ++ */ ++ public void flush() { ++ final Thread currentThread = Thread.currentThread(); ++ ++ if (currentThread == this) { ++ // avoid deadlock ++ throw new IllegalStateException("Cannot flush the queue executor thread while on the queue executor thread"); ++ } ++ ++ // order is important ++ ++ int successes = 0; ++ long lastCycle = -1L; ++ ++ do { ++ final ConcurrentLinkedQueue flushQueue = this.flushQueue; ++ if (flushQueue == null) { ++ return; ++ } ++ ++ flushQueue.add(currentThread); ++ ++ // double check flush queue ++ if (this.flushQueue == null) { ++ return; ++ } ++ ++ final long currentCycle = this.flushCycles; // may be opaque read ++ ++ if (currentCycle == lastCycle) { ++ Thread.yield(); ++ continue; ++ } ++ ++ // force response ++ this.parked.set(false); ++ LockSupport.unpark(this); ++ ++ LockSupport.park("flushing queue executor thread"); ++ ++ // returns whether there are tasks queued, does not return whether there are tasks executing ++ // this is why we cycle twice twice through flush (we know a pollTask call is made after a flush cycle) ++ // we really only need to guarantee that the tasks this thread has queued has gone through, and can leave ++ // tasks queued concurrently that are unsychronized with this thread as undefined behavior ++ if (this.queue.hasTasks()) { ++ successes = 0; ++ } else { ++ ++successes; ++ } ++ ++ } while (successes != 2); ++ ++ } ++ ++ /** ++ * Closes this queue executor's queue and optionally waits for it to empty. ++ *

    ++ * If wait is {@code true}, then the queue will be empty by the time this call completes. ++ *

    ++ *

    ++ * This function is MT-Safe. ++ *

    ++ * @param wait If this call is to wait until the queue is empty ++ * @param killQueue Whether to shutdown this thread's queue ++ * @return whether this thread shut down the queue ++ */ ++ public boolean close(final boolean wait, final boolean killQueue) { ++ boolean ret = !killQueue ? false : this.queue.shutdown(); ++ this.closed = true; ++ ++ // force thread to respond to the shutdown ++ this.parked.set(false); ++ LockSupport.unpark(this); ++ ++ if (wait) { ++ this.flush(); ++ } ++ return ret; ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java +new file mode 100644 +index 0000000000000000000000000000000000000000..e77972c4c264100ffdd824bfa2dac58dbbc6d678 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java +@@ -0,0 +1,1128 @@ ++package io.papermc.paper.chunk; ++ ++import com.destroystokyo.paper.util.misc.PlayerAreaMap; ++import com.destroystokyo.paper.util.misc.PooledLinkedHashSets; ++import io.papermc.paper.configuration.GlobalConfiguration; ++import io.papermc.paper.util.CoordinateUtils; ++import io.papermc.paper.util.IntervalledCounter; ++import io.papermc.paper.util.TickThread; ++import it.unimi.dsi.fastutil.longs.LongOpenHashSet; ++import it.unimi.dsi.fastutil.objects.Reference2IntOpenHashMap; ++import it.unimi.dsi.fastutil.objects.Reference2ObjectLinkedOpenHashMap; ++import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet; ++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheCenterPacket; ++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheRadiusPacket; ++import net.minecraft.network.protocol.game.ClientboundSetSimulationDistancePacket; ++import io.papermc.paper.util.MCUtil; ++import net.minecraft.server.MinecraftServer; ++import net.minecraft.server.level.*; ++import net.minecraft.util.Mth; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.LevelChunk; ++import org.apache.commons.lang3.mutable.MutableObject; ++import org.bukkit.craftbukkit.entity.CraftPlayer; ++import org.bukkit.entity.Player; ++import java.util.ArrayDeque; ++import java.util.ArrayList; ++import java.util.List; ++import java.util.TreeSet; ++import java.util.concurrent.atomic.AtomicInteger; ++ ++public final class PlayerChunkLoader { ++ ++ public static final int MIN_VIEW_DISTANCE = 2; ++ public static final int MAX_VIEW_DISTANCE = 32; ++ ++ public static final int TICK_TICKET_LEVEL = 31; ++ public static final int LOADED_TICKET_LEVEL = 33; ++ ++ public static int getTickViewDistance(final Player player) { ++ return getTickViewDistance(((CraftPlayer)player).getHandle()); ++ } ++ ++ public static int getTickViewDistance(final ServerPlayer player) { ++ final ServerLevel level = (ServerLevel)player.level; ++ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player); ++ if (data == null) { ++ return level.chunkSource.chunkMap.playerChunkManager.getTargetTickViewDistance(); ++ } ++ return data.getTargetTickViewDistance(); ++ } ++ ++ public static int getLoadViewDistance(final Player player) { ++ return getLoadViewDistance(((CraftPlayer)player).getHandle()); ++ } ++ ++ public static int getLoadViewDistance(final ServerPlayer player) { ++ final ServerLevel level = (ServerLevel)player.level; ++ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player); ++ if (data == null) { ++ return level.chunkSource.chunkMap.playerChunkManager.getLoadDistance(); ++ } ++ return data.getLoadDistance(); ++ } ++ ++ public static int getSendViewDistance(final Player player) { ++ return getSendViewDistance(((CraftPlayer)player).getHandle()); ++ } ++ ++ public static int getSendViewDistance(final ServerPlayer player) { ++ final ServerLevel level = (ServerLevel)player.level; ++ final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player); ++ if (data == null) { ++ return level.chunkSource.chunkMap.playerChunkManager.getTargetSendDistance(); ++ } ++ return data.getTargetSendViewDistance(); ++ } ++ ++ protected final ChunkMap chunkMap; ++ protected final Reference2ObjectLinkedOpenHashMap playerMap = new Reference2ObjectLinkedOpenHashMap<>(512, 0.7f); ++ protected final ReferenceLinkedOpenHashSet chunkSendQueue = new ReferenceLinkedOpenHashSet<>(512, 0.7f); ++ ++ protected final TreeSet chunkLoadQueue = new TreeSet<>((final PlayerLoaderData p1, final PlayerLoaderData p2) -> { ++ if (p1 == p2) { ++ return 0; ++ } ++ ++ final ChunkPriorityHolder holder1 = p1.loadQueue.peekFirst(); ++ final ChunkPriorityHolder holder2 = p2.loadQueue.peekFirst(); ++ ++ final int priorityCompare = Double.compare(holder1 == null ? Double.MAX_VALUE : holder1.priority, holder2 == null ? Double.MAX_VALUE : holder2.priority); ++ ++ final int lastLoadTimeCompare = Long.compare(p1.lastChunkLoad - p2.lastChunkLoad, 0); ++ ++ if ((holder1 == null || holder2 == null || lastLoadTimeCompare == 0 || holder1.priority < 0.0 || holder2.priority < 0.0) && priorityCompare != 0) { ++ return priorityCompare; ++ } ++ ++ if (lastLoadTimeCompare != 0) { ++ return lastLoadTimeCompare; ++ } ++ ++ final int idCompare = Integer.compare(p1.player.getId(), p2.player.getId()); ++ ++ if (idCompare != 0) { ++ return idCompare; ++ } ++ ++ // last resort ++ return Integer.compare(System.identityHashCode(p1), System.identityHashCode(p2)); ++ }); ++ ++ protected final TreeSet chunkSendWaitQueue = new TreeSet<>((final PlayerLoaderData p1, final PlayerLoaderData p2) -> { ++ if (p1 == p2) { ++ return 0; ++ } ++ ++ final int timeCompare = Long.compare(p1.nextChunkSendTarget - p2.nextChunkSendTarget, 0); ++ if (timeCompare != 0) { ++ return timeCompare; ++ } ++ ++ final int idCompare = Integer.compare(p1.player.getId(), p2.player.getId()); ++ ++ if (idCompare != 0) { ++ return idCompare; ++ } ++ ++ // last resort ++ return Integer.compare(System.identityHashCode(p1), System.identityHashCode(p2)); ++ }); ++ ++ ++ // no throttling is applied below this VD for loading ++ ++ /** ++ * The chunks to be sent to players, provided they're send-ready. Send-ready means the chunk and its 1 radius neighbours are loaded. ++ */ ++ public final PlayerAreaMap broadcastMap; ++ ++ /** ++ * The chunks to be brought up to send-ready status. Send-ready means the chunk and its 1 radius neighbours are loaded. ++ */ ++ public final PlayerAreaMap loadMap; ++ ++ /** ++ * Areamap used only to remove tickets for send-ready chunks. View distance is always + 1 of load view distance. Thus, ++ * this map is always representing the chunks we are actually going to load. ++ */ ++ public final PlayerAreaMap loadTicketCleanup; ++ ++ /** ++ * The chunks to brought to ticking level. Each chunk must have 2 radius neighbours loaded before this can happen. ++ */ ++ public final PlayerAreaMap tickMap; ++ ++ /** ++ * -1 if defaulting to [load distance], else always in [2, load distance] ++ */ ++ protected int rawSendDistance = -1; ++ ++ /** ++ * -1 if defaulting to [tick view distance + 1], else always in [tick view distance + 1, 32 + 1] ++ */ ++ protected int rawLoadDistance = -1; ++ ++ /** ++ * Never -1, always in [2, 32] ++ */ ++ protected int rawTickDistance = -1; ++ ++ // methods to bridge for API ++ ++ public int getTargetTickViewDistance() { ++ return this.getTickDistance(); ++ } ++ ++ public void setTargetTickViewDistance(final int distance) { ++ this.setTickDistance(distance); ++ } ++ ++ public int getTargetNoTickViewDistance() { ++ return this.getLoadDistance() - 1; ++ } ++ ++ public void setTargetNoTickViewDistance(final int distance) { ++ this.setLoadDistance(distance == -1 ? -1 : distance + 1); ++ } ++ ++ public int getTargetSendDistance() { ++ return this.rawSendDistance == -1 ? this.getLoadDistance() : this.rawSendDistance; ++ } ++ ++ public void setTargetSendDistance(final int distance) { ++ this.setSendDistance(distance); ++ } ++ ++ // internal methods ++ ++ public int getSendDistance() { ++ final int loadDistance = this.getLoadDistance(); ++ return this.rawSendDistance == -1 ? loadDistance : Math.min(this.rawSendDistance, loadDistance); ++ } ++ ++ public void setSendDistance(final int distance) { ++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) { ++ throw new IllegalArgumentException("Send distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + ", or -1, got: " + distance); ++ } ++ this.rawSendDistance = distance; ++ } ++ ++ public int getLoadDistance() { ++ final int tickDistance = this.getTickDistance(); ++ return this.rawLoadDistance == -1 ? tickDistance + 1 : Math.max(tickDistance + 1, this.rawLoadDistance); ++ } ++ ++ public void setLoadDistance(final int distance) { ++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) { ++ throw new IllegalArgumentException("Load distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + ", or -1, got: " + distance); ++ } ++ this.rawLoadDistance = distance; ++ } ++ ++ public int getTickDistance() { ++ return this.rawTickDistance; ++ } ++ ++ public void setTickDistance(final int distance) { ++ if (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE) { ++ throw new IllegalArgumentException("View distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + ", got: " + distance); ++ } ++ this.rawTickDistance = distance; ++ } ++ ++ /* ++ Players have 3 different types of view distance: ++ 1. Sending view distance ++ 2. Loading view distance ++ 3. Ticking view distance ++ ++ But for configuration purposes (and API) there are: ++ 1. No-tick view distance ++ 2. Tick view distance ++ 3. Broadcast view distance ++ ++ These aren't always the same as the types we represent internally. ++ ++ Loading view distance is always max(no-tick + 1, tick + 1) ++ - no-tick has 1 added because clients need an extra radius to render chunks ++ - tick has 1 added because it needs an extra radius of chunks to load before they can be marked ticking ++ ++ Loading view distance is defined as the radius of chunks that will be brought to send-ready status, which means ++ it loads chunks in radius load-view-distance + 1. ++ ++ The maximum value for send view distance is the load view distance. API can set it lower. ++ */ ++ ++ public PlayerChunkLoader(final ChunkMap chunkMap, final PooledLinkedHashSets pooledHashSets) { ++ this.chunkMap = chunkMap; ++ this.broadcastMap = new PlayerAreaMap(pooledHashSets, ++ null, ++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ, ++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> { ++ PlayerChunkLoader.this.onChunkLeave(player, rangeX, rangeZ); ++ }); ++ this.loadMap = new PlayerAreaMap(pooledHashSets, ++ null, ++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ, ++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> { ++ if (newState != null) { ++ return; ++ } ++ PlayerChunkLoader.this.isTargetedForPlayerLoad.remove(CoordinateUtils.getChunkKey(rangeX, rangeZ)); ++ }); ++ this.loadTicketCleanup = new PlayerAreaMap(pooledHashSets, ++ null, ++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ, ++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> { ++ if (newState != null) { ++ return; ++ } ++ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ); ++ PlayerChunkLoader.this.chunkMap.level.getChunkSource().removeTicketAtLevel(TicketType.PLAYER, chunkPos, LOADED_TICKET_LEVEL, chunkPos); ++ if (PlayerChunkLoader.this.chunkTicketTracker.remove(chunkPos.toLong())) { ++ --PlayerChunkLoader.this.concurrentChunkLoads; ++ } ++ }); ++ this.tickMap = new PlayerAreaMap(pooledHashSets, ++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ, ++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> { ++ if (newState.size() != 1) { ++ return; ++ } ++ LevelChunk chunk = PlayerChunkLoader.this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(rangeX, rangeZ); ++ if (chunk == null || !chunk.areNeighboursLoaded(2)) { ++ return; ++ } ++ ++ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ); ++ PlayerChunkLoader.this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos); ++ }, ++ (ServerPlayer player, int rangeX, int rangeZ, int currPosX, int currPosZ, int prevPosX, int prevPosZ, ++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet newState) -> { ++ if (newState != null) { ++ return; ++ } ++ ChunkPos chunkPos = new ChunkPos(rangeX, rangeZ); ++ PlayerChunkLoader.this.chunkMap.level.getChunkSource().removeTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos); ++ }); ++ } ++ ++ protected final LongOpenHashSet isTargetedForPlayerLoad = new LongOpenHashSet(); ++ protected final LongOpenHashSet chunkTicketTracker = new LongOpenHashSet(); ++ ++ public boolean isChunkNearPlayers(final int chunkX, final int chunkZ) { ++ final PooledLinkedHashSets.PooledObjectLinkedOpenHashSet playersInSendRange = this.broadcastMap.getObjectsInRange(chunkX, chunkZ); ++ ++ return playersInSendRange != null; ++ } ++ ++ public void onChunkPostProcessing(final int chunkX, final int chunkZ) { ++ this.onChunkSendReady(chunkX, chunkZ); ++ } ++ ++ private boolean chunkNeedsPostProcessing(final int chunkX, final int chunkZ) { ++ final long key = CoordinateUtils.getChunkKey(chunkX, chunkZ); ++ final ChunkHolder chunk = this.chunkMap.getVisibleChunkIfPresent(key); ++ ++ if (chunk == null) { ++ return false; ++ } ++ ++ final LevelChunk levelChunk = chunk.getSendingChunk(); ++ ++ return levelChunk != null && !levelChunk.isPostProcessingDone; ++ } ++ ++ // rets whether the chunk is at a loaded stage that is ready to be sent to players ++ public boolean isChunkPlayerLoaded(final int chunkX, final int chunkZ) { ++ final long key = CoordinateUtils.getChunkKey(chunkX, chunkZ); ++ final ChunkHolder chunk = this.chunkMap.getVisibleChunkIfPresent(key); ++ ++ if (chunk == null) { ++ return false; ++ } ++ ++ final LevelChunk levelChunk = chunk.getSendingChunk(); ++ ++ return levelChunk != null && levelChunk.isPostProcessingDone && this.isTargetedForPlayerLoad.contains(key); ++ } ++ ++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ, final boolean borderOnly) { ++ return borderOnly ? this.isChunkSentBorderOnly(player, chunkX, chunkZ) : this.isChunkSent(player, chunkX, chunkZ); ++ } ++ ++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ) { ++ final PlayerLoaderData data = this.playerMap.get(player); ++ if (data == null) { ++ return false; ++ } ++ ++ return data.hasSentChunk(chunkX, chunkZ); ++ } ++ ++ public boolean isChunkSentBorderOnly(final ServerPlayer player, final int chunkX, final int chunkZ) { ++ final PlayerLoaderData data = this.playerMap.get(player); ++ if (data == null) { ++ return false; ++ } ++ ++ final boolean center = data.hasSentChunk(chunkX, chunkZ); ++ if (!center) { ++ return false; ++ } ++ ++ return !(data.hasSentChunk(chunkX - 1, chunkZ) && data.hasSentChunk(chunkX + 1, chunkZ) && ++ data.hasSentChunk(chunkX, chunkZ - 1) && data.hasSentChunk(chunkX, chunkZ + 1)); ++ } ++ ++ protected int getMaxConcurrentChunkSends() { ++ return GlobalConfiguration.get().chunkLoading.maxConcurrentSends; ++ } ++ ++ protected int getMaxChunkLoads() { ++ double config = GlobalConfiguration.get().chunkLoading.playerMaxConcurrentLoads; ++ double max = GlobalConfiguration.get().chunkLoading.globalMaxConcurrentLoads; ++ return (int)Math.ceil(Math.min(config * MinecraftServer.getServer().getPlayerCount(), max <= 1.0 ? Double.MAX_VALUE : max)); ++ } ++ ++ protected long getTargetSendPerPlayerAddend() { ++ return GlobalConfiguration.get().chunkLoading.targetPlayerChunkSendRate <= 1.0 ? 0L : (long)Math.round(1.0e9 / GlobalConfiguration.get().chunkLoading.targetPlayerChunkSendRate); ++ } ++ ++ protected long getMaxSendAddend() { ++ return GlobalConfiguration.get().chunkLoading.globalMaxChunkSendRate <= 1.0 ? 0L : (long)Math.round(1.0e9 / GlobalConfiguration.get().chunkLoading.globalMaxChunkSendRate); ++ } ++ ++ public void onChunkPlayerTickReady(final int chunkX, final int chunkZ) { ++ final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ); ++ this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, TICK_TICKET_LEVEL, chunkPos); ++ } ++ ++ public void onChunkSendReady(final int chunkX, final int chunkZ) { ++ final PooledLinkedHashSets.PooledObjectLinkedOpenHashSet playersInSendRange = this.broadcastMap.getObjectsInRange(chunkX, chunkZ); ++ ++ if (playersInSendRange == null) { ++ return; ++ } ++ ++ final Object[] rawData = playersInSendRange.getBackingSet(); ++ for (int i = 0, len = rawData.length; i < len; ++i) { ++ final Object raw = rawData[i]; ++ ++ if (!(raw instanceof ServerPlayer)) { ++ continue; ++ } ++ this.onChunkSendReady((ServerPlayer)raw, chunkX, chunkZ); ++ } ++ } ++ ++ public void onChunkSendReady(final ServerPlayer player, final int chunkX, final int chunkZ) { ++ final PlayerLoaderData data = this.playerMap.get(player); ++ ++ if (data == null) { ++ return; ++ } ++ ++ if (data.hasSentChunk(chunkX, chunkZ) || !this.isChunkPlayerLoaded(chunkX, chunkZ)) { ++ // if we don't have player tickets, then the load logic will pick this up and queue to send ++ return; ++ } ++ ++ if (!data.chunksToBeSent.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) { ++ // don't queue to send, we don't want the chunk ++ return; ++ } ++ ++ final long playerPos = this.broadcastMap.getLastCoordinate(player); ++ final int playerChunkX = CoordinateUtils.getChunkX(playerPos); ++ final int playerChunkZ = CoordinateUtils.getChunkZ(playerPos); ++ final int manhattanDistance = Math.abs(playerChunkX - chunkX) + Math.abs(playerChunkZ - chunkZ); ++ ++ final ChunkPriorityHolder holder = new ChunkPriorityHolder(chunkX, chunkZ, manhattanDistance, 0.0); ++ data.sendQueue.add(holder); ++ } ++ ++ public void onChunkLoad(final int chunkX, final int chunkZ) { ++ if (this.chunkTicketTracker.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) { ++ --this.concurrentChunkLoads; ++ } ++ } ++ ++ public void onChunkLeave(final ServerPlayer player, final int chunkX, final int chunkZ) { ++ final PlayerLoaderData data = this.playerMap.get(player); ++ ++ if (data == null) { ++ return; ++ } ++ ++ data.unloadChunk(chunkX, chunkZ); ++ } ++ ++ public void addPlayer(final ServerPlayer player) { ++ TickThread.ensureTickThread("Cannot add player async"); ++ if (!player.isRealPlayer) { ++ return; ++ } ++ final PlayerLoaderData data = new PlayerLoaderData(player, this); ++ if (this.playerMap.putIfAbsent(player, data) == null) { ++ data.update(); ++ } ++ } ++ ++ public void removePlayer(final ServerPlayer player) { ++ TickThread.ensureTickThread("Cannot remove player async"); ++ if (!player.isRealPlayer) { ++ return; ++ } ++ ++ final PlayerLoaderData loaderData = this.playerMap.remove(player); ++ if (loaderData == null) { ++ return; ++ } ++ loaderData.remove(); ++ this.chunkLoadQueue.remove(loaderData); ++ this.chunkSendQueue.remove(loaderData); ++ this.chunkSendWaitQueue.remove(loaderData); ++ synchronized (this.sendingChunkCounts) { ++ final int count = this.sendingChunkCounts.removeInt(loaderData); ++ if (count != 0) { ++ concurrentChunkSends.getAndAdd(-count); ++ } ++ } ++ } ++ ++ public void updatePlayer(final ServerPlayer player) { ++ TickThread.ensureTickThread("Cannot update player async"); ++ if (!player.isRealPlayer) { ++ return; ++ } ++ final PlayerLoaderData loaderData = this.playerMap.get(player); ++ if (loaderData != null) { ++ loaderData.update(); ++ } ++ } ++ ++ public PlayerLoaderData getData(final ServerPlayer player) { ++ return this.playerMap.get(player); ++ } ++ ++ public void tick() { ++ TickThread.ensureTickThread("Cannot tick async"); ++ for (final PlayerLoaderData data : this.playerMap.values()) { ++ data.update(); ++ } ++ this.tickMidTick(); ++ } ++ ++ protected static final AtomicInteger concurrentChunkSends = new AtomicInteger(); ++ protected final Reference2IntOpenHashMap sendingChunkCounts = new Reference2IntOpenHashMap<>(); ++ private static long nextChunkSend; ++ private void trySendChunks() { ++ final long time = System.nanoTime(); ++ if (nextChunkSend - time > 0) { ++ return; ++ } ++ // drain entries from wait queue ++ while (!this.chunkSendWaitQueue.isEmpty()) { ++ final PlayerLoaderData data = this.chunkSendWaitQueue.first(); ++ ++ if (data.nextChunkSendTarget - time > 0) { ++ break; ++ } ++ ++ this.chunkSendWaitQueue.pollFirst(); ++ ++ this.chunkSendQueue.add(data); ++ } ++ ++ if (this.chunkSendQueue.isEmpty()) { ++ return; ++ } ++ ++ final int maxSends = this.getMaxConcurrentChunkSends(); ++ final long nextPlayerDeadline = this.getTargetSendPerPlayerAddend() + time; ++ for (;;) { ++ if (this.chunkSendQueue.isEmpty()) { ++ break; ++ } ++ final int currSends = concurrentChunkSends.get(); ++ if (currSends >= maxSends) { ++ break; ++ } ++ ++ if (!concurrentChunkSends.compareAndSet(currSends, currSends + 1)) { ++ continue; ++ } ++ ++ // send chunk ++ ++ final PlayerLoaderData data = this.chunkSendQueue.removeFirst(); ++ ++ final ChunkPriorityHolder queuedSend = data.sendQueue.pollFirst(); ++ if (queuedSend == null) { ++ concurrentChunkSends.getAndDecrement(); // we never sent, so decrease ++ // stop iterating over players who have nothing to send ++ if (this.chunkSendQueue.isEmpty()) { ++ // nothing left ++ break; ++ } ++ continue; ++ } ++ ++ if (!this.isChunkPlayerLoaded(queuedSend.chunkX, queuedSend.chunkZ)) { ++ throw new IllegalStateException(); ++ } ++ ++ data.nextChunkSendTarget = nextPlayerDeadline; ++ this.chunkSendWaitQueue.add(data); ++ ++ synchronized (this.sendingChunkCounts) { ++ this.sendingChunkCounts.addTo(data, 1); ++ } ++ ++ data.sendChunk(queuedSend.chunkX, queuedSend.chunkZ, () -> { ++ synchronized (this.sendingChunkCounts) { ++ final int count = this.sendingChunkCounts.getInt(data); ++ if (count == 0) { ++ // disconnected, so we don't need to decrement: it will be decremented for us ++ return; ++ } ++ if (count == 1) { ++ this.sendingChunkCounts.removeInt(data); ++ } else { ++ this.sendingChunkCounts.put(data, count - 1); ++ } ++ } ++ ++ concurrentChunkSends.getAndDecrement(); ++ }); ++ ++ nextChunkSend = this.getMaxSendAddend() + time; ++ if (nextChunkSend - time > 0) { ++ break; ++ } ++ } ++ } ++ ++ protected int concurrentChunkLoads; ++ // this interval prevents bursting a lot of chunk loads ++ protected static final IntervalledCounter TICKET_ADDITION_COUNTER_SHORT = new IntervalledCounter((long)(1.0e6 * 50.0)); // 50ms ++ // this interval ensures the rate is kept between ticks correctly ++ protected static final IntervalledCounter TICKET_ADDITION_COUNTER_LONG = new IntervalledCounter((long)(1.0e6 * 1000.0)); // 1000ms ++ private void tryLoadChunks() { ++ if (this.chunkLoadQueue.isEmpty()) { ++ return; ++ } ++ ++ final int maxLoads = this.getMaxChunkLoads(); ++ final long time = System.nanoTime(); ++ boolean updatedCounters = false; ++ for (;;) { ++ final PlayerLoaderData data = this.chunkLoadQueue.pollFirst(); ++ ++ data.lastChunkLoad = time; ++ ++ final ChunkPriorityHolder queuedLoad = data.loadQueue.peekFirst(); ++ if (queuedLoad == null) { ++ if (this.chunkLoadQueue.isEmpty()) { ++ break; ++ } ++ continue; ++ } ++ ++ if (!updatedCounters) { ++ updatedCounters = true; ++ TICKET_ADDITION_COUNTER_SHORT.updateCurrentTime(time); ++ TICKET_ADDITION_COUNTER_LONG.updateCurrentTime(time); ++ data.ticketAdditionCounterShort.updateCurrentTime(time); ++ data.ticketAdditionCounterLong.updateCurrentTime(time); ++ } ++ ++ if (this.isChunkPlayerLoaded(queuedLoad.chunkX, queuedLoad.chunkZ)) { ++ // already loaded! ++ data.loadQueue.pollFirst(); // already loaded so we just skip ++ this.chunkLoadQueue.add(data); ++ ++ // ensure the chunk is queued to send ++ this.onChunkSendReady(queuedLoad.chunkX, queuedLoad.chunkZ); ++ continue; ++ } ++ ++ final long chunkKey = CoordinateUtils.getChunkKey(queuedLoad.chunkX, queuedLoad.chunkZ); ++ ++ final double priority = queuedLoad.priority; ++ // while we do need to rate limit chunk loads, the logic for sending chunks requires that tickets are present. ++ // when chunks are loaded (i.e spawn) but do not have this player's tickets, they have to wait behind the ++ // load queue. To avoid this problem, we check early here if tickets are required to load the chunk - if they ++ // aren't required, it bypasses the limiter system. ++ boolean unloadedTargetChunk = false; ++ unloaded_check: ++ for (int dz = -1; dz <= 1; ++dz) { ++ for (int dx = -1; dx <= 1; ++dx) { ++ final int offX = queuedLoad.chunkX + dx; ++ final int offZ = queuedLoad.chunkZ + dz; ++ if (this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(offX, offZ) == null) { ++ unloadedTargetChunk = true; ++ break unloaded_check; ++ } ++ } ++ } ++ if (unloadedTargetChunk && priority >= 0.0) { ++ // priority >= 0.0 implies rate limited chunks ++ ++ final int currentChunkLoads = this.concurrentChunkLoads; ++ if (currentChunkLoads >= maxLoads || (GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate > 0 && (TICKET_ADDITION_COUNTER_SHORT.getRate() >= GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate || TICKET_ADDITION_COUNTER_LONG.getRate() >= GlobalConfiguration.get().chunkLoading.globalMaxChunkLoadRate)) ++ || (GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate > 0.0 && (data.ticketAdditionCounterShort.getRate() >= GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate || data.ticketAdditionCounterLong.getRate() >= GlobalConfiguration.get().chunkLoading.playerMaxChunkLoadRate))) { ++ // don't poll, we didn't load it ++ this.chunkLoadQueue.add(data); ++ break; ++ } ++ } ++ ++ // can only poll after we decide to load ++ data.loadQueue.pollFirst(); ++ ++ // now that we've polled we can re-add to load queue ++ this.chunkLoadQueue.add(data); ++ ++ // add necessary tickets to load chunk up to send-ready ++ for (int dz = -1; dz <= 1; ++dz) { ++ for (int dx = -1; dx <= 1; ++dx) { ++ final int offX = queuedLoad.chunkX + dx; ++ final int offZ = queuedLoad.chunkZ + dz; ++ final ChunkPos chunkPos = new ChunkPos(offX, offZ); ++ ++ this.chunkMap.level.getChunkSource().addTicketAtLevel(TicketType.PLAYER, chunkPos, LOADED_TICKET_LEVEL, chunkPos); ++ if (this.chunkMap.level.getChunkSource().getChunkAtIfLoadedMainThreadNoCache(offX, offZ) != null) { ++ continue; ++ } ++ ++ if (priority > 0.0 && this.chunkTicketTracker.add(CoordinateUtils.getChunkKey(offX, offZ))) { ++ // won't reach here if unloadedTargetChunk is false ++ ++this.concurrentChunkLoads; ++ TICKET_ADDITION_COUNTER_SHORT.addTime(time); ++ TICKET_ADDITION_COUNTER_LONG.addTime(time); ++ data.ticketAdditionCounterShort.addTime(time); ++ data.ticketAdditionCounterLong.addTime(time); ++ } ++ } ++ } ++ ++ // mark that we've added tickets here ++ this.isTargetedForPlayerLoad.add(chunkKey); ++ ++ // it's possible all we needed was the player tickets to queue up the send. ++ if (this.isChunkPlayerLoaded(queuedLoad.chunkX, queuedLoad.chunkZ)) { ++ // yup, all we needed. ++ this.onChunkSendReady(queuedLoad.chunkX, queuedLoad.chunkZ); ++ } else if (this.chunkNeedsPostProcessing(queuedLoad.chunkX, queuedLoad.chunkZ)) { ++ // requires post processing ++ this.chunkMap.mainThreadExecutor.execute(() -> { ++ final long key = CoordinateUtils.getChunkKey(queuedLoad.chunkX, queuedLoad.chunkZ); ++ final ChunkHolder holder = PlayerChunkLoader.this.chunkMap.getVisibleChunkIfPresent(key); ++ ++ if (holder == null) { ++ return; ++ } ++ ++ final LevelChunk chunk = holder.getSendingChunk(); ++ ++ if (chunk != null && !chunk.isPostProcessingDone) { ++ chunk.postProcessGeneration(); ++ } ++ }); ++ } ++ } ++ } ++ ++ public void tickMidTick() { ++ // try to send more chunks ++ this.trySendChunks(); ++ ++ // try to queue more chunks to load ++ this.tryLoadChunks(); ++ } ++ ++ static final class ChunkPriorityHolder { ++ public final int chunkX; ++ public final int chunkZ; ++ public final int manhattanDistanceToPlayer; ++ public final double priority; ++ ++ public ChunkPriorityHolder(final int chunkX, final int chunkZ, final int manhattanDistanceToPlayer, final double priority) { ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ this.manhattanDistanceToPlayer = manhattanDistanceToPlayer; ++ this.priority = priority; ++ } ++ } ++ ++ public static final class PlayerLoaderData { ++ ++ protected static final float FOV = 110.0f; ++ protected static final double PRIORITISED_DISTANCE = 12.0 * 16.0; ++ ++ // Player max sprint speed is approximately 8m/s ++ protected static final double LOOK_PRIORITY_SPEED_THRESHOLD = (10.0/20.0) * (10.0/20.0); ++ protected static final double LOOK_PRIORITY_YAW_DELTA_RECALC_THRESHOLD = 3.0f; ++ ++ protected double lastLocX = Double.NEGATIVE_INFINITY; ++ protected double lastLocZ = Double.NEGATIVE_INFINITY; ++ ++ protected int lastChunkX = Integer.MIN_VALUE; ++ protected int lastChunkZ = Integer.MIN_VALUE; ++ ++ // this is corrected so that 0 is along the positive x-axis ++ protected float lastYaw = Float.NEGATIVE_INFINITY; ++ ++ protected int lastSendDistance = Integer.MIN_VALUE; ++ protected int lastLoadDistance = Integer.MIN_VALUE; ++ protected int lastTickDistance = Integer.MIN_VALUE; ++ protected boolean usingLookingPriority; ++ ++ protected final ServerPlayer player; ++ protected final PlayerChunkLoader loader; ++ ++ // warning: modifications of this field must be aware that the loadQueue inside PlayerChunkLoader uses this field ++ // in a comparator! ++ protected final ArrayDeque loadQueue = new ArrayDeque<>(); ++ protected final LongOpenHashSet sentChunks = new LongOpenHashSet(); ++ protected final LongOpenHashSet chunksToBeSent = new LongOpenHashSet(); ++ ++ protected final TreeSet sendQueue = new TreeSet<>((final ChunkPriorityHolder p1, final ChunkPriorityHolder p2) -> { ++ final int distanceCompare = Integer.compare(p1.manhattanDistanceToPlayer, p2.manhattanDistanceToPlayer); ++ if (distanceCompare != 0) { ++ return distanceCompare; ++ } ++ ++ final int coordinateXCompare = Integer.compare(p1.chunkX, p2.chunkX); ++ if (coordinateXCompare != 0) { ++ return coordinateXCompare; ++ } ++ ++ return Integer.compare(p1.chunkZ, p2.chunkZ); ++ }); ++ ++ protected int sendViewDistance = -1; ++ protected int loadViewDistance = -1; ++ protected int tickViewDistance = -1; ++ ++ protected long nextChunkSendTarget; ++ ++ // this interval prevents bursting a lot of chunk loads ++ protected final IntervalledCounter ticketAdditionCounterShort = new IntervalledCounter((long)(1.0e6 * 50.0)); // 50ms ++ // this ensures the rate is kept between ticks correctly ++ protected final IntervalledCounter ticketAdditionCounterLong = new IntervalledCounter((long)(1.0e6 * 1000.0)); // 1000ms ++ ++ public long lastChunkLoad; ++ ++ public PlayerLoaderData(final ServerPlayer player, final PlayerChunkLoader loader) { ++ this.player = player; ++ this.loader = loader; ++ } ++ ++ // these view distance methods are for api ++ public int getTargetSendViewDistance() { ++ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance; ++ final int loadViewDistance = Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance); ++ final int clientViewDistance = this.getClientViewDistance(); ++ final int sendViewDistance = Math.min(loadViewDistance, this.sendViewDistance == -1 ? (!GlobalConfiguration.get().chunkLoading.autoconfigSendDistance || clientViewDistance == -1 ? this.loader.getSendDistance() : clientViewDistance + 1) : this.sendViewDistance); ++ return sendViewDistance; ++ } ++ ++ public void setTargetSendViewDistance(final int distance) { ++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE + 1)) { ++ throw new IllegalArgumentException("Send view distance must be a number between " + MIN_VIEW_DISTANCE + " and " + (MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance); ++ } ++ this.sendViewDistance = distance; ++ } ++ ++ public int getTargetNoTickViewDistance() { ++ return (this.loadViewDistance == -1 ? this.getLoadDistance() : this.loadViewDistance) - 1; ++ } ++ ++ public void setTargetNoTickViewDistance(final int distance) { ++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE)) { ++ throw new IllegalArgumentException("Simulation distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + " or -1, got: " + distance); ++ } ++ this.loadViewDistance = distance == -1 ? -1 : distance + 1; ++ } ++ ++ public int getTargetTickViewDistance() { ++ return this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance; ++ } ++ ++ public void setTargetTickViewDistance(final int distance) { ++ if (distance != -1 && (distance < MIN_VIEW_DISTANCE || distance > MAX_VIEW_DISTANCE)) { ++ throw new IllegalArgumentException("View distance must be a number between " + MIN_VIEW_DISTANCE + " and " + MAX_VIEW_DISTANCE + " or -1, got: " + distance); ++ } ++ this.tickViewDistance = distance; ++ } ++ ++ protected int getLoadDistance() { ++ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance; ++ ++ return Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance); ++ } ++ ++ public boolean hasSentChunk(final int chunkX, final int chunkZ) { ++ return this.sentChunks.contains(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ } ++ ++ public void sendChunk(final int chunkX, final int chunkZ, final Runnable onChunkSend) { ++ if (this.sentChunks.add(CoordinateUtils.getChunkKey(chunkX, chunkZ))) { ++ this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player, ++ new ChunkPos(chunkX, chunkZ), new MutableObject<>(), false, true); // unloaded, loaded ++ this.player.connection.connection.execute(onChunkSend); ++ } else { ++ throw new IllegalStateException(); ++ } ++ } ++ ++ public void unloadChunk(final int chunkX, final int chunkZ) { ++ if (this.sentChunks.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) { ++ this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player, ++ new ChunkPos(chunkX, chunkZ), null, true, false); // unloaded, loaded ++ } ++ } ++ ++ protected static boolean wantChunkLoaded(final int centerX, final int centerZ, final int chunkX, final int chunkZ, ++ final int sendRadius) { ++ // expect sendRadius to be = 1 + target viewable radius ++ return ChunkMap.isChunkInRange(chunkX, chunkZ, centerX, centerZ, sendRadius); ++ } ++ ++ protected static boolean triangleIntersects(final double p1x, final double p1z, // triangle point ++ final double p2x, final double p2z, // triangle point ++ final double p3x, final double p3z, // triangle point ++ ++ final double targetX, final double targetZ) { // point ++ // from barycentric coordinates: ++ // targetX = a*p1x + b*p2x + c*p3x ++ // targetZ = a*p1z + b*p2z + c*p3z ++ // 1.0 = a*1.0 + b*1.0 + c*1.0 ++ // where a, b, c >= 0.0 ++ // so, if any of a, b, c are less-than zero then there is no intersection. ++ ++ // d = ((p2z - p3z)(p1x - p3x) + (p3x - p2x)(p1z - p3z)) ++ // a = ((p2z - p3z)(targetX - p3x) + (p3x - p2x)(targetZ - p3z)) / d ++ // b = ((p3z - p1z)(targetX - p3x) + (p1x - p3x)(targetZ - p3z)) / d ++ // c = 1.0 - a - b ++ ++ final double d = (p2z - p3z)*(p1x - p3x) + (p3x - p2x)*(p1z - p3z); ++ final double a = ((p2z - p3z)*(targetX - p3x) + (p3x - p2x)*(targetZ - p3z)) / d; ++ ++ if (a < 0.0 || a > 1.0) { ++ return false; ++ } ++ ++ final double b = ((p3z - p1z)*(targetX - p3x) + (p1x - p3x)*(targetZ - p3z)) / d; ++ if (b < 0.0 || b > 1.0) { ++ return false; ++ } ++ ++ final double c = 1.0 - a - b; ++ ++ return c >= 0.0 && c <= 1.0; ++ } ++ ++ public void remove() { ++ this.loader.broadcastMap.remove(this.player); ++ this.loader.loadMap.remove(this.player); ++ this.loader.loadTicketCleanup.remove(this.player); ++ this.loader.tickMap.remove(this.player); ++ } ++ ++ protected int getClientViewDistance() { ++ return this.player.clientViewDistance == null ? -1 : Math.max(0, this.player.clientViewDistance.intValue()); ++ } ++ ++ public void update() { ++ final int tickViewDistance = this.tickViewDistance == -1 ? this.loader.getTickDistance() : this.tickViewDistance; ++ // load view cannot be less-than tick view + 1 ++ final int loadViewDistance = Math.max(tickViewDistance + 1, this.loadViewDistance == -1 ? this.loader.getLoadDistance() : this.loadViewDistance); ++ // send view cannot be greater-than load view ++ final int clientViewDistance = this.getClientViewDistance(); ++ final int sendViewDistance = Math.min(loadViewDistance, this.sendViewDistance == -1 ? (!GlobalConfiguration.get().chunkLoading.autoconfigSendDistance || clientViewDistance == -1 ? this.loader.getSendDistance() : clientViewDistance + 1) : this.sendViewDistance); ++ ++ final double posX = this.player.getX(); ++ final double posZ = this.player.getZ(); ++ final float yaw = MCUtil.normalizeYaw(this.player.getYRot() + 90.0f); // mc yaw 0 is along the positive z axis, but obviously this is really dumb - offset so we are at positive x-axis ++ ++ // in general, we really only want to prioritise chunks in front if we know we're moving pretty fast into them. ++ final boolean useLookPriority = GlobalConfiguration.get().chunkLoading.enableFrustumPriority && (this.player.getDeltaMovement().horizontalDistanceSqr() > LOOK_PRIORITY_SPEED_THRESHOLD || ++ this.player.getAbilities().flying); ++ ++ // make sure we're in the send queue ++ this.loader.chunkSendWaitQueue.add(this); ++ ++ if ( ++ // has view distance stayed the same? ++ sendViewDistance == this.lastSendDistance ++ && loadViewDistance == this.lastLoadDistance ++ && tickViewDistance == this.lastTickDistance ++ ++ && (this.usingLookingPriority ? ( ++ // has our block stayed the same (this also accounts for chunk change)? ++ Mth.floor(this.lastLocX) == Mth.floor(posX) ++ && Mth.floor(this.lastLocZ) == Mth.floor(posZ) ++ ) : ( ++ // has our chunk stayed the same ++ (Mth.floor(this.lastLocX) >> 4) == (Mth.floor(posX) >> 4) ++ && (Mth.floor(this.lastLocZ) >> 4) == (Mth.floor(posZ) >> 4) ++ )) ++ ++ // has our decision about look priority changed? ++ && this.usingLookingPriority == useLookPriority ++ ++ // if we are currently using look priority, has our yaw stayed within recalc threshold? ++ && (!this.usingLookingPriority || Math.abs(yaw - this.lastYaw) <= LOOK_PRIORITY_YAW_DELTA_RECALC_THRESHOLD) ++ ) { ++ // nothing we care about changed, so we're not re-calculating ++ return; ++ } ++ ++ final int centerChunkX = Mth.floor(posX) >> 4; ++ final int centerChunkZ = Mth.floor(posZ) >> 4; ++ ++ final boolean needsChunkCenterUpdate = (centerChunkX != this.lastChunkX) || (centerChunkZ != this.lastChunkZ); ++ this.loader.broadcastMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, sendViewDistance); ++ this.loader.loadMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, loadViewDistance); ++ this.loader.loadTicketCleanup.addOrUpdate(this.player, centerChunkX, centerChunkZ, loadViewDistance + 1); ++ this.loader.tickMap.addOrUpdate(this.player, centerChunkX, centerChunkZ, tickViewDistance); ++ ++ if (sendViewDistance != this.lastSendDistance) { ++ // update the view radius for client ++ // note that this should be after the map calls because the client wont expect unload calls not in its VD ++ // and it's possible we decreased VD here ++ this.player.connection.send(new ClientboundSetChunkCacheRadiusPacket(sendViewDistance)); ++ } ++ if (tickViewDistance != this.lastTickDistance) { ++ this.player.connection.send(new ClientboundSetSimulationDistancePacket(tickViewDistance)); ++ } ++ ++ this.lastLocX = posX; ++ this.lastLocZ = posZ; ++ this.lastYaw = yaw; ++ this.lastSendDistance = sendViewDistance; ++ this.lastLoadDistance = loadViewDistance; ++ this.lastTickDistance = tickViewDistance; ++ this.usingLookingPriority = useLookPriority; ++ ++ this.lastChunkX = centerChunkX; ++ this.lastChunkZ = centerChunkZ; ++ ++ // points for player "view" triangle: ++ ++ // obviously, the player pos is a vertex ++ final double p1x = posX; ++ final double p1z = posZ; ++ ++ // to the left of the looking direction ++ final double p2x = PRIORITISED_DISTANCE * Math.cos(Math.toRadians(yaw + (double)(FOV / 2.0))) // calculate rotated vector ++ + p1x; // offset vector ++ final double p2z = PRIORITISED_DISTANCE * Math.sin(Math.toRadians(yaw + (double)(FOV / 2.0))) // calculate rotated vector ++ + p1z; // offset vector ++ ++ // to the right of the looking direction ++ final double p3x = PRIORITISED_DISTANCE * Math.cos(Math.toRadians(yaw - (double)(FOV / 2.0))) // calculate rotated vector ++ + p1x; // offset vector ++ final double p3z = PRIORITISED_DISTANCE * Math.sin(Math.toRadians(yaw - (double)(FOV / 2.0))) // calculate rotated vector ++ + p1z; // offset vector ++ ++ // now that we have all of our points, we can recalculate the load queue ++ ++ final List loadQueue = new ArrayList<>(); ++ ++ // clear send queue, we are re-sorting ++ this.sendQueue.clear(); ++ // clear chunk want set, vd/position might have changed ++ this.chunksToBeSent.clear(); ++ ++ final int searchViewDistance = Math.max(loadViewDistance, sendViewDistance); ++ ++ for (int dx = -searchViewDistance; dx <= searchViewDistance; ++dx) { ++ for (int dz = -searchViewDistance; dz <= searchViewDistance; ++dz) { ++ final int chunkX = dx + centerChunkX; ++ final int chunkZ = dz + centerChunkZ; ++ final int squareDistance = Math.max(Math.abs(dx), Math.abs(dz)); ++ final boolean sendChunk = squareDistance <= sendViewDistance && wantChunkLoaded(centerChunkX, centerChunkZ, chunkX, chunkZ, sendViewDistance); ++ ++ if (this.hasSentChunk(chunkX, chunkZ)) { ++ // already sent (which means it is also loaded) ++ if (!sendChunk) { ++ // have sent the chunk, but don't want it anymore ++ // unload it now ++ this.unloadChunk(chunkX, chunkZ); ++ } ++ continue; ++ } ++ ++ final boolean loadChunk = squareDistance <= loadViewDistance; ++ ++ final boolean prioritised = useLookPriority && triangleIntersects( ++ // prioritisation triangle ++ p1x, p1z, p2x, p2z, p3x, p3z, ++ ++ // center of chunk ++ (double)((chunkX << 4) | 8), (double)((chunkZ << 4) | 8) ++ ); ++ ++ final int manhattanDistance = Math.abs(dx) + Math.abs(dz); ++ ++ final double priority; ++ ++ if (squareDistance <= GlobalConfiguration.get().chunkLoading.minLoadRadius) { ++ // priority should be negative, and we also want to order it from center outwards ++ // so we want (0,0) to be the smallest, and (minLoadedRadius,minLoadedRadius) to be the greatest ++ priority = -((2 * GlobalConfiguration.get().chunkLoading.minLoadRadius + 1) - manhattanDistance); ++ } else { ++ if (prioritised) { ++ // we don't prioritise these chunks above others because we also want to make sure some chunks ++ // will be loaded if the player changes direction ++ priority = (double)manhattanDistance / 6.0; ++ } else { ++ priority = (double)manhattanDistance; ++ } ++ } ++ ++ final ChunkPriorityHolder holder = new ChunkPriorityHolder(chunkX, chunkZ, manhattanDistance, priority); ++ ++ if (!this.loader.isChunkPlayerLoaded(chunkX, chunkZ)) { ++ if (loadChunk) { ++ loadQueue.add(holder); ++ if (sendChunk) { ++ this.chunksToBeSent.add(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ } ++ } ++ } else { ++ // loaded but not sent: so queue it! ++ if (sendChunk) { ++ this.sendQueue.add(holder); ++ } ++ } ++ } ++ } ++ ++ loadQueue.sort((final ChunkPriorityHolder p1, final ChunkPriorityHolder p2) -> { ++ return Double.compare(p1.priority, p2.priority); ++ }); ++ ++ // we're modifying loadQueue, must remove ++ this.loader.chunkLoadQueue.remove(this); ++ ++ this.loadQueue.clear(); ++ this.loadQueue.addAll(loadQueue); ++ ++ // must re-add ++ this.loader.chunkLoadQueue.add(this); ++ ++ // update the chunk center ++ // this must be done last so that the client does not ignore any of our unload chunk packets ++ if (needsChunkCenterUpdate) { ++ this.player.connection.send(new ClientboundSetChunkCacheCenterPacket(centerChunkX, centerChunkZ)); ++ } ++ } ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java +index 8a5e93961dac4d87c81c0e70b6f4124a1f1d2556..0dc94dec1317b3f86d38074c6cbe41ab828cab1d 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java ++++ b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java +@@ -31,191 +31,41 @@ public final class ChunkSystem { + } + + public static void scheduleChunkTask(final ServerLevel level, final int chunkX, final int chunkZ, final Runnable run, final PrioritisedExecutor.Priority priority) { +- level.chunkSource.mainThreadProcessor.execute(run); ++ level.chunkTaskScheduler.scheduleChunkTask(chunkX, chunkZ, run, priority); // Paper - rewrite chunk system + } + + public static void scheduleChunkLoad(final ServerLevel level, final int chunkX, final int chunkZ, final boolean gen, + final ChunkStatus toStatus, final boolean addTicket, final PrioritisedExecutor.Priority priority, + final Consumer onComplete) { +- if (gen) { +- scheduleChunkLoad(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete); +- return; +- } +- scheduleChunkLoad(level, chunkX, chunkZ, ChunkStatus.EMPTY, addTicket, priority, (final ChunkAccess chunk) -> { +- if (chunk == null) { +- onComplete.accept(null); +- } else { +- if (chunk.getStatus().isOrAfter(toStatus)) { +- scheduleChunkLoad(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete); +- } else { +- onComplete.accept(null); +- } +- } +- }); ++ level.chunkTaskScheduler.scheduleChunkLoad(chunkX, chunkZ, gen, toStatus, addTicket, priority, onComplete); // Paper - rewrite chunk system + } + +- static final TicketType CHUNK_LOAD = TicketType.create("chunk_load", Long::compareTo); +- +- private static long chunkLoadCounter = 0L; ++ // Paper - rewrite chunk system + public static void scheduleChunkLoad(final ServerLevel level, final int chunkX, final int chunkZ, final ChunkStatus toStatus, + final boolean addTicket, final PrioritisedExecutor.Priority priority, final Consumer onComplete) { +- if (!Bukkit.isPrimaryThread()) { +- scheduleChunkTask(level, chunkX, chunkZ, () -> { +- scheduleChunkLoad(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete); +- }, priority); +- return; +- } +- +- final int minLevel = 33 + ChunkStatus.getDistance(toStatus); +- final Long chunkReference = addTicket ? Long.valueOf(++chunkLoadCounter) : null; +- final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ); +- +- if (addTicket) { +- level.chunkSource.addTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference); +- } +- level.chunkSource.runDistanceManagerUpdates(); +- +- final Consumer loadCallback = (final ChunkAccess chunk) -> { +- try { +- if (onComplete != null) { +- onComplete.accept(chunk); +- } +- } catch (final ThreadDeath death) { +- throw death; +- } catch (final Throwable thr) { +- LOGGER.error("Exception handling chunk load callback", thr); +- SneakyThrow.sneaky(thr); +- } finally { +- if (addTicket) { +- level.chunkSource.addTicketAtLevel(TicketType.UNKNOWN, chunkPos, minLevel, chunkPos); +- level.chunkSource.removeTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference); +- } +- } +- }; +- +- final ChunkHolder holder = level.chunkSource.chunkMap.getUpdatingChunkIfPresent(CoordinateUtils.getChunkKey(chunkX, chunkZ)); +- +- if (holder == null || holder.getTicketLevel() > minLevel) { +- loadCallback.accept(null); +- return; +- } +- +- final CompletableFuture> loadFuture = holder.getOrScheduleFuture(toStatus, level.chunkSource.chunkMap); +- +- if (loadFuture.isDone()) { +- loadCallback.accept(loadFuture.join().left().orElse(null)); +- return; +- } +- +- loadFuture.whenCompleteAsync((final Either either, final Throwable thr) -> { +- if (thr != null) { +- loadCallback.accept(null); +- return; +- } +- loadCallback.accept(either.left().orElse(null)); +- }, (final Runnable r) -> { +- scheduleChunkTask(level, chunkX, chunkZ, r, PrioritisedExecutor.Priority.HIGHEST); +- }); ++ level.chunkTaskScheduler.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); // Paper - rewrite chunk system + } + + public static void scheduleTickingState(final ServerLevel level, final int chunkX, final int chunkZ, + final ChunkHolder.FullChunkStatus toStatus, final boolean addTicket, + final PrioritisedExecutor.Priority priority, final Consumer onComplete) { +- if (toStatus == ChunkHolder.FullChunkStatus.INACCESSIBLE) { +- throw new IllegalArgumentException("Cannot wait for INACCESSIBLE status"); +- } +- +- if (!Bukkit.isPrimaryThread()) { +- scheduleChunkTask(level, chunkX, chunkZ, () -> { +- scheduleTickingState(level, chunkX, chunkZ, toStatus, addTicket, priority, onComplete); +- }, priority); +- return; +- } +- +- final int minLevel = 33 - (toStatus.ordinal() - 1); +- final int radius = toStatus.ordinal() - 1; +- final Long chunkReference = addTicket ? Long.valueOf(++chunkLoadCounter) : null; +- final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ); +- +- if (addTicket) { +- level.chunkSource.addTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference); +- } +- level.chunkSource.runDistanceManagerUpdates(); +- +- final Consumer loadCallback = (final LevelChunk chunk) -> { +- try { +- if (onComplete != null) { +- onComplete.accept(chunk); +- } +- } catch (final ThreadDeath death) { +- throw death; +- } catch (final Throwable thr) { +- LOGGER.error("Exception handling chunk load callback", thr); +- SneakyThrow.sneaky(thr); +- } finally { +- if (addTicket) { +- level.chunkSource.addTicketAtLevel(TicketType.UNKNOWN, chunkPos, minLevel, chunkPos); +- level.chunkSource.removeTicketAtLevel(CHUNK_LOAD, chunkPos, minLevel, chunkReference); +- } +- } +- }; +- +- final ChunkHolder holder = level.chunkSource.chunkMap.getUpdatingChunkIfPresent(CoordinateUtils.getChunkKey(chunkX, chunkZ)); +- +- if (holder == null || holder.getTicketLevel() > minLevel) { +- loadCallback.accept(null); +- return; +- } +- +- final CompletableFuture> tickingState; +- switch (toStatus) { +- case BORDER: { +- tickingState = holder.getFullChunkFuture(); +- break; +- } +- case TICKING: { +- tickingState = holder.getTickingChunkFuture(); +- break; +- } +- case ENTITY_TICKING: { +- tickingState = holder.getEntityTickingChunkFuture(); +- break; +- } +- default: { +- throw new IllegalStateException("Cannot reach here"); +- } +- } +- +- if (tickingState.isDone()) { +- loadCallback.accept(tickingState.join().left().orElse(null)); +- return; +- } +- +- tickingState.whenCompleteAsync((final Either either, final Throwable thr) -> { +- if (thr != null) { +- loadCallback.accept(null); +- return; +- } +- loadCallback.accept(either.left().orElse(null)); +- }, (final Runnable r) -> { +- scheduleChunkTask(level, chunkX, chunkZ, r, PrioritisedExecutor.Priority.HIGHEST); +- }); ++ level.chunkTaskScheduler.scheduleTickingState(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); // Paper - rewrite chunk system + } + + public static List getVisibleChunkHolders(final ServerLevel level) { +- return new ArrayList<>(level.chunkSource.chunkMap.visibleChunkMap.values()); ++ return level.chunkTaskScheduler.chunkHolderManager.getOldChunkHolders(); // Paper - rewrite chunk system + } + + public static List getUpdatingChunkHolders(final ServerLevel level) { +- return new ArrayList<>(level.chunkSource.chunkMap.updatingChunkMap.values()); ++ return level.chunkTaskScheduler.chunkHolderManager.getOldChunkHolders(); // Paper - rewrite chunk system + } + + public static int getVisibleChunkHolderCount(final ServerLevel level) { +- return level.chunkSource.chunkMap.visibleChunkMap.size(); ++ return level.chunkTaskScheduler.chunkHolderManager.size(); // Paper - rewrite chunk system + } + + public static int getUpdatingChunkHolderCount(final ServerLevel level) { +- return level.chunkSource.chunkMap.updatingChunkMap.size(); ++ return level.chunkTaskScheduler.chunkHolderManager.size(); // Paper - rewrite chunk system + } + + public static boolean hasAnyChunkHolders(final ServerLevel level) { +@@ -269,23 +119,15 @@ public final class ChunkSystem { + } + + public static int getSendViewDistance(final ServerPlayer player) { +- return getLoadViewDistance(player); ++ return io.papermc.paper.chunk.PlayerChunkLoader.getSendViewDistance(player); + } + + public static int getLoadViewDistance(final ServerPlayer player) { +- final ServerLevel level = player.getLevel(); +- if (level == null) { +- return Bukkit.getViewDistance() + 1; +- } +- return level.chunkSource.chunkMap.getEffectiveViewDistance() + 1; ++ return io.papermc.paper.chunk.PlayerChunkLoader.getLoadViewDistance(player); + } + + public static int getTickViewDistance(final ServerPlayer player) { +- final ServerLevel level = player.getLevel(); +- if (level == null) { +- return Bukkit.getSimulationDistance(); +- } +- return level.chunkSource.chunkMap.distanceManager.getSimulationDistance(); ++ return io.papermc.paper.chunk.PlayerChunkLoader.getTickViewDistance(player); + } + + private ChunkSystem() { +diff --git a/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java +new file mode 100644 +index 0000000000000000000000000000000000000000..61c170555c8854b102c640b0b6a615f9f732edbf +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/entity/EntityLookup.java +@@ -0,0 +1,839 @@ ++package io.papermc.paper.chunk.system.entity; ++ ++import com.destroystokyo.paper.util.maplist.EntityList; ++import com.mojang.logging.LogUtils; ++import io.papermc.paper.util.CoordinateUtils; ++import io.papermc.paper.util.TickThread; ++import io.papermc.paper.util.WorldUtil; ++import io.papermc.paper.world.ChunkEntitySlices; ++import it.unimi.dsi.fastutil.ints.Int2ReferenceOpenHashMap; ++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap; ++import it.unimi.dsi.fastutil.objects.Object2ReferenceOpenHashMap; ++import net.minecraft.core.BlockPos; ++import io.papermc.paper.chunk.system.ChunkSystem; ++import net.minecraft.server.level.ChunkHolder; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.util.AbortableIterationConsumer; ++import net.minecraft.util.Mth; ++import net.minecraft.world.entity.Entity; ++import net.minecraft.world.entity.EntityType; ++import net.minecraft.world.level.entity.EntityInLevelCallback; ++import net.minecraft.world.level.entity.EntityTypeTest; ++import net.minecraft.world.level.entity.LevelCallback; ++import net.minecraft.world.level.entity.LevelEntityGetter; ++import net.minecraft.world.level.entity.Visibility; ++import net.minecraft.world.phys.AABB; ++import org.jetbrains.annotations.NotNull; ++import org.jetbrains.annotations.Nullable; ++import org.slf4j.Logger; ++import java.util.ArrayList; ++import java.util.Iterator; ++import java.util.List; ++import java.util.NoSuchElementException; ++import java.util.UUID; ++import java.util.concurrent.locks.StampedLock; ++import java.util.function.Consumer; ++import java.util.function.Predicate; ++ ++public final class EntityLookup implements LevelEntityGetter { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ protected static final int REGION_SHIFT = 5; ++ protected static final int REGION_MASK = (1 << REGION_SHIFT) - 1; ++ protected static final int REGION_SIZE = 1 << REGION_SHIFT; ++ ++ public final ServerLevel world; ++ ++ private final StampedLock stateLock = new StampedLock(); ++ protected final Long2ObjectOpenHashMap regions = new Long2ObjectOpenHashMap<>(128, 0.5f); ++ ++ private final int minSection; // inclusive ++ private final int maxSection; // inclusive ++ private final LevelCallback worldCallback; ++ ++ private final StampedLock entityByLock = new StampedLock(); ++ private final Int2ReferenceOpenHashMap entityById = new Int2ReferenceOpenHashMap<>(); ++ private final Object2ReferenceOpenHashMap entityByUUID = new Object2ReferenceOpenHashMap<>(); ++ private final EntityList accessibleEntities = new EntityList(); ++ ++ public EntityLookup(final ServerLevel world, final LevelCallback worldCallback) { ++ this.world = world; ++ this.minSection = WorldUtil.getMinSection(world); ++ this.maxSection = WorldUtil.getMaxSection(world); ++ this.worldCallback = worldCallback; ++ } ++ ++ private static Entity maskNonAccessible(final Entity entity) { ++ if (entity == null) { ++ return null; ++ } ++ final Visibility visibility = EntityLookup.getEntityStatus(entity); ++ return visibility.isAccessible() ? entity : null; ++ } ++ ++ @Nullable ++ @Override ++ public Entity get(final int id) { ++ final long attempt = this.entityByLock.tryOptimisticRead(); ++ if (attempt != 0L) { ++ try { ++ final Entity ret = this.entityById.get(id); ++ ++ if (this.entityByLock.validate(attempt)) { ++ return maskNonAccessible(ret); ++ } ++ } catch (final Error error) { ++ throw error; ++ } catch (final Throwable thr) { ++ // ignore ++ } ++ } ++ ++ this.entityByLock.readLock(); ++ try { ++ return maskNonAccessible(this.entityById.get(id)); ++ } finally { ++ this.entityByLock.tryUnlockRead(); ++ } ++ } ++ ++ @Nullable ++ @Override ++ public Entity get(final UUID id) { ++ final long attempt = this.entityByLock.tryOptimisticRead(); ++ if (attempt != 0L) { ++ try { ++ final Entity ret = this.entityByUUID.get(id); ++ ++ if (this.entityByLock.validate(attempt)) { ++ return maskNonAccessible(ret); ++ } ++ } catch (final Error error) { ++ throw error; ++ } catch (final Throwable thr) { ++ // ignore ++ } ++ } ++ ++ this.entityByLock.readLock(); ++ try { ++ return maskNonAccessible(this.entityByUUID.get(id)); ++ } finally { ++ this.entityByLock.tryUnlockRead(); ++ } ++ } ++ ++ public boolean hasEntity(final UUID uuid) { ++ return this.get(uuid) != null; ++ } ++ ++ public String getDebugInfo() { ++ return "count_id:" + this.entityById.size() + ",count_uuid:" + this.entityByUUID.size() + ",region_count:" + this.regions.size(); ++ } ++ ++ static final class ArrayIterable implements Iterable { ++ ++ private final T[] array; ++ private final int off; ++ private final int length; ++ ++ public ArrayIterable(final T[] array, final int off, final int length) { ++ this.array = array; ++ this.off = off; ++ this.length = length; ++ if (length > array.length) { ++ throw new IllegalArgumentException("Length must be no greater-than the array length"); ++ } ++ } ++ ++ @NotNull ++ @Override ++ public Iterator iterator() { ++ return new ArrayIterator<>(this.array, this.off, this.length); ++ } ++ ++ static final class ArrayIterator implements Iterator { ++ ++ private final T[] array; ++ private int off; ++ private final int length; ++ ++ public ArrayIterator(final T[] array, final int off, final int length) { ++ this.array = array; ++ this.off = off; ++ this.length = length; ++ } ++ ++ @Override ++ public boolean hasNext() { ++ return this.off < this.length; ++ } ++ ++ @Override ++ public T next() { ++ if (this.off >= this.length) { ++ throw new NoSuchElementException(); ++ } ++ return this.array[this.off++]; ++ } ++ ++ @Override ++ public void remove() { ++ throw new UnsupportedOperationException(); ++ } ++ } ++ } ++ ++ @Override ++ public Iterable getAll() { ++ return new ArrayIterable<>(this.accessibleEntities.getRawData(), 0, this.accessibleEntities.size()); ++ } ++ ++ @Override ++ public void get(final EntityTypeTest filter, final AbortableIterationConsumer action) { ++ for (final Entity entity : this.entityById.values()) { ++ final Visibility visibility = EntityLookup.getEntityStatus(entity); ++ if (!visibility.isAccessible()) { ++ continue; ++ } ++ final U casted = filter.tryCast(entity); ++ if (casted != null && action.accept(casted).shouldAbort()) { ++ break; ++ } ++ } ++ } ++ ++ @Override ++ public void get(final AABB box, final Consumer action) { ++ List entities = new ArrayList<>(); ++ this.getEntitiesWithoutDragonParts(null, box, entities, null); ++ for (int i = 0, len = entities.size(); i < len; ++i) { ++ action.accept(entities.get(i)); ++ } ++ } ++ ++ @Override ++ public void get(final EntityTypeTest filter, final AABB box, final AbortableIterationConsumer action) { ++ List entities = new ArrayList<>(); ++ this.getEntitiesWithoutDragonParts(null, box, entities, null); ++ for (int i = 0, len = entities.size(); i < len; ++i) { ++ final U casted = filter.tryCast(entities.get(i)); ++ if (casted != null && action.accept(casted).shouldAbort()) { ++ break; ++ } ++ } ++ } ++ ++ public void entityStatusChange(final Entity entity, final ChunkEntitySlices slices, final Visibility oldVisibility, final Visibility newVisibility, final boolean moved, ++ final boolean created, final boolean destroyed) { ++ TickThread.ensureTickThread(entity, "Entity status change must only happen on the main thread"); ++ ++ if (entity.updatingSectionStatus) { ++ // recursive status update ++ LOGGER.error("Cannot recursively update entity chunk status for entity " + entity, new Throwable()); ++ return; ++ } ++ ++ final boolean entityStatusUpdateBefore = slices == null ? false : slices.startPreventingStatusUpdates(); ++ ++ if (entityStatusUpdateBefore) { ++ LOGGER.error("Cannot update chunk status for entity " + entity + " since entity chunk (" + slices.chunkX + "," + slices.chunkZ + ") is receiving update", new Throwable()); ++ return; ++ } ++ ++ try { ++ final Boolean ticketBlockBefore = this.world.chunkTaskScheduler.chunkHolderManager.blockTicketUpdates(); ++ try { ++ entity.updatingSectionStatus = true; ++ try { ++ if (created) { ++ EntityLookup.this.worldCallback.onCreated(entity); ++ } ++ ++ if (oldVisibility == newVisibility) { ++ if (moved && newVisibility.isAccessible()) { ++ EntityLookup.this.worldCallback.onSectionChange(entity); ++ } ++ return; ++ } ++ ++ if (newVisibility.ordinal() > oldVisibility.ordinal()) { ++ // status upgrade ++ if (!oldVisibility.isAccessible() && newVisibility.isAccessible()) { ++ this.accessibleEntities.add(entity); ++ EntityLookup.this.worldCallback.onTrackingStart(entity); ++ } ++ ++ if (!oldVisibility.isTicking() && newVisibility.isTicking()) { ++ EntityLookup.this.worldCallback.onTickingStart(entity); ++ } ++ } else { ++ // status downgrade ++ if (oldVisibility.isTicking() && !newVisibility.isTicking()) { ++ EntityLookup.this.worldCallback.onTickingEnd(entity); ++ } ++ ++ if (oldVisibility.isAccessible() && !newVisibility.isAccessible()) { ++ this.accessibleEntities.remove(entity); ++ EntityLookup.this.worldCallback.onTrackingEnd(entity); ++ } ++ } ++ ++ if (moved && newVisibility.isAccessible()) { ++ EntityLookup.this.worldCallback.onSectionChange(entity); ++ } ++ ++ if (destroyed) { ++ EntityLookup.this.worldCallback.onDestroyed(entity); ++ } ++ } finally { ++ entity.updatingSectionStatus = false; ++ } ++ } finally { ++ this.world.chunkTaskScheduler.chunkHolderManager.unblockTicketUpdates(ticketBlockBefore); ++ } ++ } finally { ++ if (slices != null) { ++ slices.stopPreventingStatusUpdates(false); ++ } ++ } ++ } ++ ++ public void chunkStatusChange(final int x, final int z, final ChunkHolder.FullChunkStatus newStatus) { ++ this.getChunk(x, z).updateStatus(newStatus, this); ++ } ++ ++ public void addLegacyChunkEntities(final List entities) { ++ for (int i = 0, len = entities.size(); i < len; ++i) { ++ this.addEntity(entities.get(i), true); ++ } ++ } ++ ++ public void addEntityChunkEntities(final List entities) { ++ for (int i = 0, len = entities.size(); i < len; ++i) { ++ this.addEntity(entities.get(i), true); ++ } ++ } ++ ++ public void addWorldGenChunkEntities(final List entities) { ++ for (int i = 0, len = entities.size(); i < len; ++i) { ++ this.addEntity(entities.get(i), false); ++ } ++ } ++ ++ public boolean addNewEntity(final Entity entity) { ++ return this.addEntity(entity, false); ++ } ++ ++ public static Visibility getEntityStatus(final Entity entity) { ++ if (entity.isAlwaysTicking()) { ++ return Visibility.TICKING; ++ } ++ final ChunkHolder.FullChunkStatus entityStatus = entity.chunkStatus; ++ return Visibility.fromFullChunkStatus(entityStatus == null ? ChunkHolder.FullChunkStatus.INACCESSIBLE : entityStatus); ++ } ++ ++ private boolean addEntity(final Entity entity, final boolean fromDisk) { ++ final BlockPos pos = entity.blockPosition(); ++ final int sectionX = pos.getX() >> 4; ++ final int sectionY = Mth.clamp(pos.getY() >> 4, this.minSection, this.maxSection); ++ final int sectionZ = pos.getZ() >> 4; ++ TickThread.ensureTickThread(this.world, sectionX, sectionZ, "Cannot add entity off-main thread"); ++ ++ if (entity.isRemoved()) { ++ LOGGER.warn("Refusing to add removed entity: " + entity); ++ return false; ++ } ++ ++ if (entity.updatingSectionStatus) { ++ LOGGER.warn("Entity " + entity + " is currently prevented from being added/removed to world since it is processing section status updates", new Throwable()); ++ return false; ++ } ++ ++ if (fromDisk) { ++ ChunkSystem.onEntityPreAdd(this.world, entity); ++ if (entity.isRemoved()) { ++ // removed from checkDupeUUID call ++ return false; ++ } ++ } ++ ++ this.entityByLock.writeLock(); ++ try { ++ if (this.entityById.containsKey(entity.getId())) { ++ LOGGER.warn("Entity id already exists: " + entity.getId() + ", mapped to " + this.entityById.get(entity.getId()) + ", can't add " + entity); ++ return false; ++ } ++ if (this.entityByUUID.containsKey(entity.getUUID())) { ++ LOGGER.warn("Entity uuid already exists: " + entity.getUUID() + ", mapped to " + this.entityByUUID.get(entity.getUUID()) + ", can't add " + entity); ++ return false; ++ } ++ this.entityById.put(entity.getId(), entity); ++ this.entityByUUID.put(entity.getUUID(), entity); ++ } finally { ++ this.entityByLock.tryUnlockWrite(); ++ } ++ ++ entity.sectionX = sectionX; ++ entity.sectionY = sectionY; ++ entity.sectionZ = sectionZ; ++ final ChunkEntitySlices slices = this.getOrCreateChunk(sectionX, sectionZ); ++ if (!slices.addEntity(entity, sectionY)) { ++ LOGGER.warn("Entity " + entity + " added to world '" + this.world.getWorld().getName() + "', but was already contained in entity chunk (" + sectionX + "," + sectionZ + ")"); ++ } ++ ++ entity.setLevelCallback(new EntityCallback(entity)); ++ ++ this.entityStatusChange(entity, slices, Visibility.HIDDEN, getEntityStatus(entity), false, !fromDisk, false); ++ ++ return true; ++ } ++ ++ private void removeEntity(final Entity entity) { ++ final int sectionX = entity.sectionX; ++ final int sectionY = entity.sectionY; ++ final int sectionZ = entity.sectionZ; ++ TickThread.ensureTickThread(this.world, sectionX, sectionZ, "Cannot remove entity off-main"); ++ if (!entity.isRemoved()) { ++ throw new IllegalStateException("Only call Entity#setRemoved to remove an entity"); ++ } ++ final ChunkEntitySlices slices = this.getChunk(sectionX, sectionZ); ++ // all entities should be in a chunk ++ if (slices == null) { ++ LOGGER.warn("Cannot remove entity " + entity + " from null entity slices (" + sectionX + "," + sectionZ + ")"); ++ } else { ++ if (!slices.removeEntity(entity, sectionY)) { ++ LOGGER.warn("Failed to remove entity " + entity + " from entity slices (" + sectionX + "," + sectionZ + ")"); ++ } ++ } ++ entity.sectionX = entity.sectionY = entity.sectionZ = Integer.MIN_VALUE; ++ ++ this.entityByLock.writeLock(); ++ try { ++ if (!this.entityById.remove(entity.getId(), entity)) { ++ LOGGER.warn("Failed to remove entity " + entity + " by id, current entity mapped: " + this.entityById.get(entity.getId())); ++ } ++ if (!this.entityByUUID.remove(entity.getUUID(), entity)) { ++ LOGGER.warn("Failed to remove entity " + entity + " by uuid, current entity mapped: " + this.entityByUUID.get(entity.getUUID())); ++ } ++ } finally { ++ this.entityByLock.tryUnlockWrite(); ++ } ++ } ++ ++ private ChunkEntitySlices moveEntity(final Entity entity) { ++ // ensure we own the entity ++ TickThread.ensureTickThread(entity, "Cannot move entity off-main"); ++ ++ final BlockPos newPos = entity.blockPosition(); ++ final int newSectionX = newPos.getX() >> 4; ++ final int newSectionY = Mth.clamp(newPos.getY() >> 4, this.minSection, this.maxSection); ++ final int newSectionZ = newPos.getZ() >> 4; ++ ++ if (newSectionX == entity.sectionX && newSectionY == entity.sectionY && newSectionZ == entity.sectionZ) { ++ return null; ++ } ++ ++ // ensure the new section is owned by this tick thread ++ TickThread.ensureTickThread(this.world, newSectionX, newSectionZ, "Cannot move entity off-main"); ++ ++ // ensure the old section is owned by this tick thread ++ TickThread.ensureTickThread(this.world, entity.sectionX, entity.sectionZ, "Cannot move entity off-main"); ++ ++ final ChunkEntitySlices old = this.getChunk(entity.sectionX, entity.sectionZ); ++ final ChunkEntitySlices slices = this.getOrCreateChunk(newSectionX, newSectionZ); ++ ++ if (!old.removeEntity(entity, entity.sectionY)) { ++ LOGGER.warn("Could not remove entity " + entity + " from its old chunk section (" + entity.sectionX + "," + entity.sectionY + "," + entity.sectionZ + ") since it was not contained in the section"); ++ } ++ ++ if (!slices.addEntity(entity, newSectionY)) { ++ LOGGER.warn("Could not add entity " + entity + " to its new chunk section (" + newSectionX + "," + newSectionY + "," + newSectionZ + ") as it is already contained in the section"); ++ } ++ ++ entity.sectionX = newSectionX; ++ entity.sectionY = newSectionY; ++ entity.sectionZ = newSectionZ; ++ ++ return slices; ++ } ++ ++ public void getEntitiesWithoutDragonParts(final Entity except, final AABB box, final List into, final Predicate predicate) { ++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4; ++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4; ++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4; ++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4; ++ ++ final int minRegionX = minChunkX >> REGION_SHIFT; ++ final int minRegionZ = minChunkZ >> REGION_SHIFT; ++ final int maxRegionX = maxChunkX >> REGION_SHIFT; ++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT; ++ ++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) { ++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0; ++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK; ++ ++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) { ++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ); ++ ++ if (region == null) { ++ continue; ++ } ++ ++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0; ++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK; ++ ++ for (int currZ = minZ; currZ <= maxZ; ++currZ) { ++ for (int currX = minX; currX <= maxX; ++currX) { ++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT)); ++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ continue; ++ } ++ ++ chunk.getEntitiesWithoutDragonParts(except, box, into, predicate); ++ } ++ } ++ } ++ } ++ } ++ ++ public void getEntities(final Entity except, final AABB box, final List into, final Predicate predicate) { ++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4; ++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4; ++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4; ++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4; ++ ++ final int minRegionX = minChunkX >> REGION_SHIFT; ++ final int minRegionZ = minChunkZ >> REGION_SHIFT; ++ final int maxRegionX = maxChunkX >> REGION_SHIFT; ++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT; ++ ++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) { ++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0; ++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK; ++ ++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) { ++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ); ++ ++ if (region == null) { ++ continue; ++ } ++ ++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0; ++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK; ++ ++ for (int currZ = minZ; currZ <= maxZ; ++currZ) { ++ for (int currX = minX; currX <= maxX; ++currX) { ++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT)); ++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ continue; ++ } ++ ++ chunk.getEntities(except, box, into, predicate); ++ } ++ } ++ } ++ } ++ } ++ ++ public void getHardCollidingEntities(final Entity except, final AABB box, final List into, final Predicate predicate) { ++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4; ++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4; ++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4; ++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4; ++ ++ final int minRegionX = minChunkX >> REGION_SHIFT; ++ final int minRegionZ = minChunkZ >> REGION_SHIFT; ++ final int maxRegionX = maxChunkX >> REGION_SHIFT; ++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT; ++ ++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) { ++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0; ++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK; ++ ++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) { ++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ); ++ ++ if (region == null) { ++ continue; ++ } ++ ++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0; ++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK; ++ ++ for (int currZ = minZ; currZ <= maxZ; ++currZ) { ++ for (int currX = minX; currX <= maxX; ++currX) { ++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT)); ++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ continue; ++ } ++ ++ chunk.getHardCollidingEntities(except, box, into, predicate); ++ } ++ } ++ } ++ } ++ } ++ ++ public void getEntities(final EntityType type, final AABB box, final List into, ++ final Predicate predicate) { ++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4; ++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4; ++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4; ++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4; ++ ++ final int minRegionX = minChunkX >> REGION_SHIFT; ++ final int minRegionZ = minChunkZ >> REGION_SHIFT; ++ final int maxRegionX = maxChunkX >> REGION_SHIFT; ++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT; ++ ++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) { ++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0; ++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK; ++ ++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) { ++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ); ++ ++ if (region == null) { ++ continue; ++ } ++ ++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0; ++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK; ++ ++ for (int currZ = minZ; currZ <= maxZ; ++currZ) { ++ for (int currX = minX; currX <= maxX; ++currX) { ++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT)); ++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ continue; ++ } ++ ++ chunk.getEntities(type, box, (List)into, (Predicate)predicate); ++ } ++ } ++ } ++ } ++ } ++ ++ public void getEntities(final Class clazz, final Entity except, final AABB box, final List into, ++ final Predicate predicate) { ++ final int minChunkX = (Mth.floor(box.minX) - 2) >> 4; ++ final int minChunkZ = (Mth.floor(box.minZ) - 2) >> 4; ++ final int maxChunkX = (Mth.floor(box.maxX) + 2) >> 4; ++ final int maxChunkZ = (Mth.floor(box.maxZ) + 2) >> 4; ++ ++ final int minRegionX = minChunkX >> REGION_SHIFT; ++ final int minRegionZ = minChunkZ >> REGION_SHIFT; ++ final int maxRegionX = maxChunkX >> REGION_SHIFT; ++ final int maxRegionZ = maxChunkZ >> REGION_SHIFT; ++ ++ for (int currRegionZ = minRegionZ; currRegionZ <= maxRegionZ; ++currRegionZ) { ++ final int minZ = currRegionZ == minRegionZ ? minChunkZ & REGION_MASK : 0; ++ final int maxZ = currRegionZ == maxRegionZ ? maxChunkZ & REGION_MASK : REGION_MASK; ++ ++ for (int currRegionX = minRegionX; currRegionX <= maxRegionX; ++currRegionX) { ++ final ChunkSlicesRegion region = this.getRegion(currRegionX, currRegionZ); ++ ++ if (region == null) { ++ continue; ++ } ++ ++ final int minX = currRegionX == minRegionX ? minChunkX & REGION_MASK : 0; ++ final int maxX = currRegionX == maxRegionX ? maxChunkX & REGION_MASK : REGION_MASK; ++ ++ for (int currZ = minZ; currZ <= maxZ; ++currZ) { ++ for (int currX = minX; currX <= maxX; ++currX) { ++ final ChunkEntitySlices chunk = region.get(currX | (currZ << REGION_SHIFT)); ++ if (chunk == null || !chunk.status.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ continue; ++ } ++ ++ chunk.getEntities(clazz, except, box, into, predicate); ++ } ++ } ++ } ++ } ++ } ++ ++ public void entitySectionLoad(final int chunkX, final int chunkZ, final ChunkEntitySlices slices) { ++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot load in entity section off-main"); ++ synchronized (this) { ++ final ChunkEntitySlices curr = this.getChunk(chunkX, chunkZ); ++ if (curr != null) { ++ this.removeChunk(chunkX, chunkZ); ++ ++ curr.mergeInto(slices); ++ ++ this.addChunk(chunkX, chunkZ, slices); ++ } else { ++ this.addChunk(chunkX, chunkZ, slices); ++ } ++ } ++ } ++ ++ public void entitySectionUnload(final int chunkX, final int chunkZ) { ++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot unload entity section off-main"); ++ this.removeChunk(chunkX, chunkZ); ++ } ++ ++ public ChunkEntitySlices getChunk(final int chunkX, final int chunkZ) { ++ final ChunkSlicesRegion region = this.getRegion(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT); ++ if (region == null) { ++ return null; ++ } ++ ++ return region.get((chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT)); ++ } ++ ++ public ChunkEntitySlices getOrCreateChunk(final int chunkX, final int chunkZ) { ++ final ChunkSlicesRegion region = this.getRegion(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT); ++ ChunkEntitySlices ret; ++ if (region == null || (ret = region.get((chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT))) == null) { ++ // loadInEntityChunk will call addChunk for us ++ return this.world.chunkTaskScheduler.chunkHolderManager.getOrCreateEntityChunk(chunkX, chunkZ, true); ++ } ++ ++ return ret; ++ } ++ ++ public ChunkSlicesRegion getRegion(final int regionX, final int regionZ) { ++ final long key = CoordinateUtils.getChunkKey(regionX, regionZ); ++ final long attempt = this.stateLock.tryOptimisticRead(); ++ if (attempt != 0L) { ++ try { ++ final ChunkSlicesRegion ret = this.regions.get(key); ++ ++ if (this.stateLock.validate(attempt)) { ++ return ret; ++ } ++ } catch (final Error error) { ++ throw error; ++ } catch (final Throwable thr) { ++ // ignore ++ } ++ } ++ ++ this.stateLock.readLock(); ++ try { ++ return this.regions.get(key); ++ } finally { ++ this.stateLock.tryUnlockRead(); ++ } ++ } ++ ++ private synchronized void removeChunk(final int chunkX, final int chunkZ) { ++ final long key = CoordinateUtils.getChunkKey(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT); ++ final int relIndex = (chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT); ++ ++ final ChunkSlicesRegion region = this.regions.get(key); ++ final int remaining = region.remove(relIndex); ++ ++ if (remaining == 0) { ++ this.stateLock.writeLock(); ++ try { ++ this.regions.remove(key); ++ } finally { ++ this.stateLock.tryUnlockWrite(); ++ } ++ } ++ } ++ ++ public synchronized void addChunk(final int chunkX, final int chunkZ, final ChunkEntitySlices slices) { ++ final long key = CoordinateUtils.getChunkKey(chunkX >> REGION_SHIFT, chunkZ >> REGION_SHIFT); ++ final int relIndex = (chunkX & REGION_MASK) | ((chunkZ & REGION_MASK) << REGION_SHIFT); ++ ++ ChunkSlicesRegion region = this.regions.get(key); ++ if (region != null) { ++ region.add(relIndex, slices); ++ } else { ++ region = new ChunkSlicesRegion(); ++ region.add(relIndex, slices); ++ this.stateLock.writeLock(); ++ try { ++ this.regions.put(key, region); ++ } finally { ++ this.stateLock.tryUnlockWrite(); ++ } ++ } ++ } ++ ++ public static final class ChunkSlicesRegion { ++ ++ protected final ChunkEntitySlices[] slices = new ChunkEntitySlices[REGION_SIZE * REGION_SIZE]; ++ protected int sliceCount; ++ ++ public ChunkEntitySlices get(final int index) { ++ return this.slices[index]; ++ } ++ ++ public int remove(final int index) { ++ final ChunkEntitySlices slices = this.slices[index]; ++ if (slices == null) { ++ throw new IllegalStateException(); ++ } ++ ++ this.slices[index] = null; ++ ++ return --this.sliceCount; ++ } ++ ++ public void add(final int index, final ChunkEntitySlices slices) { ++ final ChunkEntitySlices curr = this.slices[index]; ++ if (curr != null) { ++ throw new IllegalStateException(); ++ } ++ ++ this.slices[index] = slices; ++ ++ ++this.sliceCount; ++ } ++ } ++ ++ private final class EntityCallback implements EntityInLevelCallback { ++ ++ public final Entity entity; ++ ++ public EntityCallback(final Entity entity) { ++ this.entity = entity; ++ } ++ ++ @Override ++ public void onMove() { ++ final Entity entity = this.entity; ++ final Visibility oldVisibility = getEntityStatus(entity); ++ final ChunkEntitySlices newSlices = EntityLookup.this.moveEntity(this.entity); ++ if (newSlices == null) { ++ // no new section, so didn't change sections ++ return; ++ } ++ final Visibility newVisibility = getEntityStatus(entity); ++ ++ EntityLookup.this.entityStatusChange(entity, newSlices, oldVisibility, newVisibility, true, false, false); ++ } ++ ++ @Override ++ public void onRemove(final Entity.RemovalReason reason) { ++ final Entity entity = this.entity; ++ TickThread.ensureTickThread(entity, "Cannot remove entity off-main"); // Paper - rewrite chunk system ++ final Visibility tickingState = EntityLookup.getEntityStatus(entity); ++ ++ EntityLookup.this.removeEntity(entity); ++ ++ EntityLookup.this.entityStatusChange(entity, null, tickingState, Visibility.HIDDEN, false, false, reason.shouldDestroy()); ++ ++ this.entity.setLevelCallback(NoOpCallback.INSTANCE); ++ } ++ } ++ ++ private static final class NoOpCallback implements EntityInLevelCallback { ++ ++ public static final NoOpCallback INSTANCE = new NoOpCallback(); ++ ++ @Override ++ public void onMove() {} ++ ++ @Override ++ public void onRemove(final Entity.RemovalReason reason) {} ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java +new file mode 100644 +index 0000000000000000000000000000000000000000..a08cde4eefe879adcee7c4118bc38f98c5097ed0 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java +@@ -0,0 +1,1328 @@ ++package io.papermc.paper.chunk.system.io; ++ ++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue; ++import ca.spottedleaf.concurrentutil.executor.Cancellable; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedQueueExecutorThread; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import com.mojang.logging.LogUtils; ++import io.papermc.paper.util.CoordinateUtils; ++import io.papermc.paper.util.TickThread; ++import it.unimi.dsi.fastutil.HashCommon; ++import net.minecraft.nbt.CompoundTag; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.storage.RegionFile; ++import net.minecraft.world.level.chunk.storage.RegionFileStorage; ++import org.slf4j.Logger; ++import java.io.IOException; ++import java.lang.invoke.VarHandle; ++import java.util.concurrent.CompletableFuture; ++import java.util.concurrent.CompletionException; ++import java.util.concurrent.ConcurrentHashMap; ++import java.util.concurrent.atomic.AtomicInteger; ++import java.util.function.BiConsumer; ++import java.util.function.BiFunction; ++import java.util.function.Consumer; ++import java.util.function.Function; ++ ++/** ++ * Prioritised RegionFile I/O executor, responsible for all RegionFile access. ++ *

    ++ * All functions provided are MT-Safe, however certain ordering constraints are recommended: ++ *

  • ++ * Chunk saves may not occur for unloaded chunks. ++ *
  • ++ *
  • ++ * Tasks must be scheduled on the chunk scheduler thread. ++ *
  • ++ * By following these constraints, no chunk data loss should occur with the exception of underlying I/O problems. ++ *

    ++ */ ++public final class RegionFileIOThread extends PrioritisedQueueExecutorThread { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ /** ++ * The kinds of region files controlled by the region file thread. Add more when needed, and ensure ++ * getControllerFor is updated. ++ */ ++ public static enum RegionFileType { ++ CHUNK_DATA, ++ POI_DATA, ++ ENTITY_DATA; ++ } ++ ++ protected static final RegionFileType[] CACHED_REGIONFILE_TYPES = RegionFileType.values(); ++ ++ private ChunkDataController getControllerFor(final ServerLevel world, final RegionFileType type) { ++ switch (type) { ++ case CHUNK_DATA: ++ return world.chunkDataControllerNew; ++ case POI_DATA: ++ return world.poiDataControllerNew; ++ case ENTITY_DATA: ++ return world.entityDataControllerNew; ++ default: ++ throw new IllegalStateException("Unknown controller type " + type); ++ } ++ } ++ ++ /** ++ * Collects regionfile data for a certain chunk. ++ */ ++ public static final class RegionFileData { ++ ++ private final boolean[] hasResult = new boolean[CACHED_REGIONFILE_TYPES.length]; ++ private final CompoundTag[] data = new CompoundTag[CACHED_REGIONFILE_TYPES.length]; ++ private final Throwable[] throwables = new Throwable[CACHED_REGIONFILE_TYPES.length]; ++ ++ /** ++ * Sets the result associated with the specified regionfile type. Note that ++ * results can only be set once per regionfile type. ++ * ++ * @param type The regionfile type. ++ * @param data The result to set. ++ */ ++ public void setData(final RegionFileType type, final CompoundTag data) { ++ final int index = type.ordinal(); ++ ++ if (this.hasResult[index]) { ++ throw new IllegalArgumentException("Result already exists for type " + type); ++ } ++ this.hasResult[index] = true; ++ this.data[index] = data; ++ } ++ ++ /** ++ * Sets the result associated with the specified regionfile type. Note that ++ * results can only be set once per regionfile type. ++ * ++ * @param type The regionfile type. ++ * @param throwable The result to set. ++ */ ++ public void setThrowable(final RegionFileType type, final Throwable throwable) { ++ final int index = type.ordinal(); ++ ++ if (this.hasResult[index]) { ++ throw new IllegalArgumentException("Result already exists for type " + type); ++ } ++ this.hasResult[index] = true; ++ this.throwables[index] = throwable; ++ } ++ ++ /** ++ * Returns whether there is a result for the specified regionfile type. ++ * ++ * @param type Specified regionfile type. ++ * ++ * @return Whether a result exists for {@code type}. ++ */ ++ public boolean hasResult(final RegionFileType type) { ++ return this.hasResult[type.ordinal()]; ++ } ++ ++ /** ++ * Returns the data result for the regionfile type. ++ * ++ * @param type Specified regionfile type. ++ * ++ * @throws IllegalArgumentException If the result has not been set for {@code type}. ++ * @return The data result for the specified type. If the result is a {@code Throwable}, ++ * then returns {@code null}. ++ */ ++ public CompoundTag getData(final RegionFileType type) { ++ final int index = type.ordinal(); ++ ++ if (!this.hasResult[index]) { ++ throw new IllegalArgumentException("Result does not exist for type " + type); ++ } ++ ++ return this.data[index]; ++ } ++ ++ /** ++ * Returns the throwable result for the regionfile type. ++ * ++ * @param type Specified regionfile type. ++ * ++ * @throws IllegalArgumentException If the result has not been set for {@code type}. ++ * @return The throwable result for the specified type. If the result is an {@code CompoundTag}, ++ * then returns {@code null}. ++ */ ++ public Throwable getThrowable(final RegionFileType type) { ++ final int index = type.ordinal(); ++ ++ if (!this.hasResult[index]) { ++ throw new IllegalArgumentException("Result does not exist for type " + type); ++ } ++ ++ return this.throwables[index]; ++ } ++ } ++ ++ private static final Object INIT_LOCK = new Object(); ++ ++ static RegionFileIOThread[] threads; ++ ++ /* needs to be consistent given a set of parameters */ ++ static RegionFileIOThread selectThread(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) { ++ if (threads == null) { ++ throw new IllegalStateException("Threads not initialised"); ++ } ++ ++ final int regionX = chunkX >> 5; ++ final int regionZ = chunkZ >> 5; ++ final int typeOffset = type.ordinal(); ++ ++ return threads[(System.identityHashCode(world) + regionX + regionZ + typeOffset) % threads.length]; ++ } ++ ++ /** ++ * Shuts down the I/O executor(s). Watis for all tasks to complete if specified. ++ * Tasks queued during this call might not be accepted, and tasks queued after will not be accepted. ++ * ++ * @param wait Whether to wait until all tasks have completed. ++ */ ++ public static void close(final boolean wait) { ++ for (int i = 0, len = threads.length; i < len; ++i) { ++ threads[i].close(false, true); ++ } ++ if (wait) { ++ RegionFileIOThread.flush(); ++ } ++ } ++ ++ public static long[] getExecutedTasks() { ++ final long[] ret = new long[threads.length]; ++ for (int i = 0, len = threads.length; i < len; ++i) { ++ ret[i] = threads[i].getTotalTasksExecuted(); ++ } ++ ++ return ret; ++ } ++ ++ public static long[] getTasksScheduled() { ++ final long[] ret = new long[threads.length]; ++ for (int i = 0, len = threads.length; i < len; ++i) { ++ ret[i] = threads[i].getTotalTasksScheduled(); ++ } ++ return ret; ++ } ++ ++ public static void flush() { ++ for (int i = 0, len = threads.length; i < len; ++i) { ++ threads[i].waitUntilAllExecuted(); ++ } ++ } ++ ++ public static void partialFlush(final int totalTasksRemaining) { ++ long failures = 1L; // start out at 0.25ms ++ ++ for (;;) { ++ final long[] executed = getExecutedTasks(); ++ final long[] scheduled = getTasksScheduled(); ++ ++ long sum = 0; ++ for (int i = 0; i < executed.length; ++i) { ++ sum += scheduled[i] - executed[i]; ++ } ++ ++ if (sum <= totalTasksRemaining) { ++ break; ++ } ++ ++ failures = ConcurrentUtil.linearLongBackoff(failures, 250_000L, 5_000_000L); // 500us, 5ms ++ } ++ } ++ ++ /** ++ * Inits the executor with the specified number of threads. ++ * ++ * @param threads Specified number of threads. ++ */ ++ public static void init(final int threads) { ++ synchronized (INIT_LOCK) { ++ if (RegionFileIOThread.threads != null) { ++ throw new IllegalStateException("Already initialised threads"); ++ } ++ ++ RegionFileIOThread.threads = new RegionFileIOThread[threads]; ++ ++ for (int i = 0; i < threads; ++i) { ++ RegionFileIOThread.threads[i] = new RegionFileIOThread(i); ++ RegionFileIOThread.threads[i].start(); ++ } ++ } ++ } ++ ++ private RegionFileIOThread(final int threadNumber) { ++ super(new PrioritisedThreadedTaskQueue(), (int)(1.0e6)); // 1.0ms spinwait time ++ this.setName("RegionFile I/O Thread #" + threadNumber); ++ this.setPriority(Thread.NORM_PRIORITY - 2); // we keep priority close to normal because threads can wait on us ++ this.setUncaughtExceptionHandler((final Thread thread, final Throwable thr) -> { ++ LOGGER.error("Uncaught exception thrown from I/O thread, report this! Thread: " + thread.getName(), thr); ++ }); ++ } ++ ++ /** ++ * Returns whether the current thread is a regionfile I/O executor. ++ * @return Whether the current thread is a regionfile I/O executor. ++ */ ++ public static boolean isRegionFileThread() { ++ return Thread.currentThread() instanceof RegionFileIOThread; ++ } ++ ++ /** ++ * Returns the priority associated with blocking I/O based on the current thread. The goal is to avoid ++ * dumb plugins from taking away priority from threads we consider crucial. ++ * @return The priroity to use with blocking I/O on the current thread. ++ */ ++ public static PrioritisedExecutor.Priority getIOBlockingPriorityForCurrentThread() { ++ if (TickThread.isTickThread()) { ++ return PrioritisedExecutor.Priority.BLOCKING; ++ } ++ return PrioritisedExecutor.Priority.HIGHEST; ++ } ++ ++ /** ++ * Returns the current {@code CompoundTag} pending for write for the specified chunk & regionfile type. ++ * Note that this does not copy the result, so do not modify the result returned. ++ * ++ * @param world Specified world. ++ * @param chunkX Specified chunk x. ++ * @param chunkZ Specified chunk z. ++ * @param type Specified regionfile type. ++ * ++ * @return The compound tag associated for the specified chunk. {@code null} if no write was pending, or if {@code null} is the write pending. ++ */ ++ public static CompoundTag getPendingWrite(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) { ++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type); ++ return thread.getPendingWriteInternal(world, chunkX, chunkZ, type); ++ } ++ ++ CompoundTag getPendingWriteInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) { ++ final ChunkDataController taskController = this.getControllerFor(world, type); ++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ))); ++ ++ if (task == null) { ++ return null; ++ } ++ ++ final CompoundTag ret = task.inProgressWrite; ++ ++ return ret == ChunkDataTask.NOTHING_TO_WRITE ? null : ret; ++ } ++ ++ /** ++ * Returns the priority for the specified regionfile type for the specified chunk. ++ * @param world Specified world. ++ * @param chunkX Specified chunk x. ++ * @param chunkZ Specified chunk z. ++ * @param type Specified regionfile type. ++ * @return The priority for the chunk ++ */ ++ public static PrioritisedExecutor.Priority getPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) { ++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type); ++ return thread.getPriorityInternal(world, chunkX, chunkZ, type); ++ } ++ ++ PrioritisedExecutor.Priority getPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type) { ++ final ChunkDataController taskController = this.getControllerFor(world, type); ++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ))); ++ ++ if (task == null) { ++ return PrioritisedExecutor.Priority.COMPLETING; ++ } ++ ++ return task.prioritisedTask.getPriority(); ++ } ++ ++ /** ++ * Sets the priority for all regionfile types for the specified chunk. Note that great care should ++ * be taken using this method, as there can be multiple tasks tied to the same chunk that want different ++ * priorities. ++ * ++ * @param world Specified world. ++ * @param chunkX Specified chunk x. ++ * @param chunkZ Specified chunk z. ++ * @param priority New priority. ++ * ++ * @see #raisePriority(ServerLevel, int, int, Priority) ++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority) ++ * @see #lowerPriority(ServerLevel, int, int, Priority) ++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority) ++ */ ++ public static void setPriority(final ServerLevel world, final int chunkX, final int chunkZ, ++ final PrioritisedExecutor.Priority priority) { ++ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) { ++ RegionFileIOThread.setPriority(world, chunkX, chunkZ, type, priority); ++ } ++ } ++ ++ /** ++ * Sets the priority for the specified regionfile type for the specified chunk. Note that great care should ++ * be taken using this method, as there can be multiple tasks tied to the same chunk that want different ++ * priorities. ++ * ++ * @param world Specified world. ++ * @param chunkX Specified chunk x. ++ * @param chunkZ Specified chunk z. ++ * @param type Specified regionfile type. ++ * @param priority New priority. ++ * ++ * @see #raisePriority(ServerLevel, int, int, Priority) ++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority) ++ * @see #lowerPriority(ServerLevel, int, int, Priority) ++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority) ++ */ ++ public static void setPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type, ++ final PrioritisedExecutor.Priority priority) { ++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type); ++ thread.setPriorityInternal(world, chunkX, chunkZ, type, priority); ++ } ++ ++ void setPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type, ++ final PrioritisedExecutor.Priority priority) { ++ final ChunkDataController taskController = this.getControllerFor(world, type); ++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ))); ++ ++ if (task != null) { ++ task.prioritisedTask.setPriority(priority); ++ } ++ } ++ ++ /** ++ * Raises the priority for all regionfile types for the specified chunk. ++ * ++ * @param world Specified world. ++ * @param chunkX Specified chunk x. ++ * @param chunkZ Specified chunk z. ++ * @param priority New priority. ++ * ++ * @see #setPriority(ServerLevel, int, int, Priority) ++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority) ++ * @see #lowerPriority(ServerLevel, int, int, Priority) ++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority) ++ */ ++ public static void raisePriority(final ServerLevel world, final int chunkX, final int chunkZ, ++ final PrioritisedExecutor.Priority priority) { ++ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) { ++ RegionFileIOThread.raisePriority(world, chunkX, chunkZ, type, priority); ++ } ++ } ++ ++ /** ++ * Raises the priority for the specified regionfile type for the specified chunk. ++ * ++ * @param world Specified world. ++ * @param chunkX Specified chunk x. ++ * @param chunkZ Specified chunk z. ++ * @param type Specified regionfile type. ++ * @param priority New priority. ++ * ++ * @see #setPriority(ServerLevel, int, int, Priority) ++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority) ++ * @see #lowerPriority(ServerLevel, int, int, Priority) ++ * @see #lowerPriority(ServerLevel, int, int, RegionFileType, Priority) ++ */ ++ public static void raisePriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type, ++ final PrioritisedExecutor.Priority priority) { ++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type); ++ thread.raisePriorityInternal(world, chunkX, chunkZ, type, priority); ++ } ++ ++ void raisePriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type, ++ final PrioritisedExecutor.Priority priority) { ++ final ChunkDataController taskController = this.getControllerFor(world, type); ++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ))); ++ ++ if (task != null) { ++ task.prioritisedTask.raisePriority(priority); ++ } ++ } ++ ++ /** ++ * Lowers the priority for all regionfile types for the specified chunk. ++ * ++ * @param world Specified world. ++ * @param chunkX Specified chunk x. ++ * @param chunkZ Specified chunk z. ++ * @param priority New priority. ++ * ++ * @see #raisePriority(ServerLevel, int, int, Priority) ++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority) ++ * @see #setPriority(ServerLevel, int, int, Priority) ++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority) ++ */ ++ public static void lowerPriority(final ServerLevel world, final int chunkX, final int chunkZ, ++ final PrioritisedExecutor.Priority priority) { ++ for (final RegionFileType type : CACHED_REGIONFILE_TYPES) { ++ RegionFileIOThread.lowerPriority(world, chunkX, chunkZ, type, priority); ++ } ++ } ++ ++ /** ++ * Lowers the priority for the specified regionfile type for the specified chunk. ++ * ++ * @param world Specified world. ++ * @param chunkX Specified chunk x. ++ * @param chunkZ Specified chunk z. ++ * @param type Specified regionfile type. ++ * @param priority New priority. ++ * ++ * @see #raisePriority(ServerLevel, int, int, Priority) ++ * @see #raisePriority(ServerLevel, int, int, RegionFileType, Priority) ++ * @see #setPriority(ServerLevel, int, int, Priority) ++ * @see #setPriority(ServerLevel, int, int, RegionFileType, Priority) ++ */ ++ public static void lowerPriority(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type, ++ final PrioritisedExecutor.Priority priority) { ++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type); ++ thread.lowerPriorityInternal(world, chunkX, chunkZ, type, priority); ++ } ++ ++ void lowerPriorityInternal(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type, ++ final PrioritisedExecutor.Priority priority) { ++ final ChunkDataController taskController = this.getControllerFor(world, type); ++ final ChunkDataTask task = taskController.tasks.get(Long.valueOf(CoordinateUtils.getChunkKey(chunkX, chunkZ))); ++ ++ if (task != null) { ++ task.prioritisedTask.lowerPriority(priority); ++ } ++ } ++ ++ /** ++ * Schedules the chunk data to be written asynchronously. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means ++ * saves must be scheduled before a chunk is unloaded. ++ *
  • ++ *
  • ++ * Writes may be called concurrently, although only the "later" write will go through. ++ *
  • ++ * ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param data Chunk's data ++ * @param type The regionfile type to write to. ++ * ++ * @throws IllegalStateException If the file io thread has shutdown. ++ */ ++ public static void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data, ++ final RegionFileType type) { ++ RegionFileIOThread.scheduleSave(world, chunkX, chunkZ, data, type, PrioritisedExecutor.Priority.NORMAL); ++ } ++ ++ /** ++ * Schedules the chunk data to be written asynchronously. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * This function presumes a chunk load for the coordinates is not called during this function (anytime after is OK). This means ++ * saves must be scheduled before a chunk is unloaded. ++ *
  • ++ *
  • ++ * Writes may be called concurrently, although only the "later" write will go through. ++ *
  • ++ * ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param data Chunk's data ++ * @param type The regionfile type to write to. ++ * @param priority The minimum priority to schedule at. ++ * ++ * @throws IllegalStateException If the file io thread has shutdown. ++ */ ++ public static void scheduleSave(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data, ++ final RegionFileType type, final PrioritisedExecutor.Priority priority) { ++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type); ++ thread.scheduleSaveInternal(world, chunkX, chunkZ, data, type, priority); ++ } ++ ++ void scheduleSaveInternal(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data, ++ final RegionFileType type, final PrioritisedExecutor.Priority priority) { ++ final ChunkDataController taskController = this.getControllerFor(world, type); ++ ++ final boolean[] created = new boolean[1]; ++ final ChunkCoordinate key = new ChunkCoordinate(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ final ChunkDataTask task = taskController.tasks.compute(key, (final ChunkCoordinate keyInMap, final ChunkDataTask taskRunning) -> { ++ if (taskRunning == null || taskRunning.failedWrite) { ++ // no task is scheduled or the previous write failed - meaning we need to overwrite it ++ ++ // create task ++ final ChunkDataTask newTask = new ChunkDataTask(world, chunkX, chunkZ, taskController, RegionFileIOThread.this, priority); ++ newTask.inProgressWrite = data; ++ created[0] = true; ++ ++ return newTask; ++ } ++ ++ taskRunning.inProgressWrite = data; ++ ++ return taskRunning; ++ }); ++ ++ if (created[0]) { ++ task.prioritisedTask.queue(); ++ } else { ++ task.prioritisedTask.raisePriority(priority); ++ } ++ } ++ ++ /** ++ * Schedules a load to be executed asynchronously. This task will load all regionfile types, and then call ++ * {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)} ++ * for single load. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may ++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of ++ * data is undefined behaviour, and can cause deadlock. ++ *
  • ++ * ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param onComplete Consumer to execute once this task has completed ++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost ++ * of this call. ++ * ++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data. ++ * ++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean) ++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority) ++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...) ++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...) ++ */ ++ public static Cancellable loadAllChunkData(final ServerLevel world, final int chunkX, final int chunkZ, ++ final Consumer onComplete, final boolean intendingToBlock) { ++ return RegionFileIOThread.loadAllChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL); ++ } ++ ++ /** ++ * Schedules a load to be executed asynchronously. This task will load all regionfile types, and then call ++ * {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)} ++ * for single load. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may ++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of ++ * data is undefined behaviour, and can cause deadlock. ++ *
  • ++ * ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param onComplete Consumer to execute once this task has completed ++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost ++ * of this call. ++ * @param priority The minimum priority to load the data at. ++ * ++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data. ++ * ++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean) ++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority) ++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...) ++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...) ++ */ ++ public static Cancellable loadAllChunkData(final ServerLevel world, final int chunkX, final int chunkZ, ++ final Consumer onComplete, final boolean intendingToBlock, ++ final PrioritisedExecutor.Priority priority) { ++ return RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, priority, CACHED_REGIONFILE_TYPES); ++ } ++ ++ /** ++ * Schedules a load to be executed asynchronously. This task will load data for the specified regionfile type(s), and ++ * then call {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean)} ++ * for single load. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may ++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of ++ * data is undefined behaviour, and can cause deadlock. ++ *
  • ++ * ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param onComplete Consumer to execute once this task has completed ++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost ++ * of this call. ++ * @param types The regionfile type(s) to load. ++ * ++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data. ++ * ++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean) ++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority) ++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean) ++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority) ++ */ ++ public static Cancellable loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ, ++ final Consumer onComplete, final boolean intendingToBlock, ++ final RegionFileType... types) { ++ return RegionFileIOThread.loadChunkData(world, chunkX, chunkZ, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL, types); ++ } ++ ++ /** ++ * Schedules a load to be executed asynchronously. This task will load data for the specified regionfile type(s), and ++ * then call {@code onComplete}. This is a bulk load operation, see {@link #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority)} ++ * for single load. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may ++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of ++ * data is undefined behaviour, and can cause deadlock. ++ *
  • ++ * ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param onComplete Consumer to execute once this task has completed ++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost ++ * of this call. ++ * @param types The regionfile type(s) to load. ++ * @param priority The minimum priority to load the data at. ++ * ++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data. ++ * ++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean) ++ * @see #loadDataAsync(ServerLevel, int, int, RegionFileType, BiConsumer, boolean, Priority) ++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean) ++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority) ++ */ ++ public static Cancellable loadChunkData(final ServerLevel world, final int chunkX, final int chunkZ, ++ final Consumer onComplete, final boolean intendingToBlock, ++ final PrioritisedExecutor.Priority priority, final RegionFileType... types) { ++ if (types == null) { ++ throw new NullPointerException("Types cannot be null"); ++ } ++ if (types.length == 0) { ++ throw new IllegalArgumentException("Types cannot be empty"); ++ } ++ ++ final RegionFileData ret = new RegionFileData(); ++ ++ final Cancellable[] reads = new CancellableRead[types.length]; ++ final AtomicInteger completions = new AtomicInteger(); ++ final int expectedCompletions = types.length; ++ ++ for (int i = 0; i < expectedCompletions; ++i) { ++ final RegionFileType type = types[i]; ++ reads[i] = RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type, ++ (final CompoundTag data, final Throwable throwable) -> { ++ if (throwable != null) { ++ ret.setThrowable(type, throwable); ++ } else { ++ ret.setData(type, data); ++ } ++ ++ if (completions.incrementAndGet() == expectedCompletions) { ++ onComplete.accept(ret); ++ } ++ }, intendingToBlock, priority); ++ } ++ ++ return new CancellableReads(reads); ++ } ++ ++ /** ++ * Schedules a load to be executed asynchronously. This task will load the specified regionfile type, and then call ++ * {@code onComplete}. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may ++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of ++ * data is undefined behaviour, and can cause deadlock. ++ *
  • ++ * ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param onComplete Consumer to execute once this task has completed ++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost ++ * of this call. ++ * ++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data. ++ * ++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...) ++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...) ++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean) ++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority) ++ */ ++ public static Cancellable loadDataAsync(final ServerLevel world, final int chunkX, final int chunkZ, ++ final RegionFileType type, final BiConsumer onComplete, ++ final boolean intendingToBlock) { ++ return RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type, onComplete, intendingToBlock, PrioritisedExecutor.Priority.NORMAL); ++ } ++ ++ /** ++ * Schedules a load to be executed asynchronously. This task will load the specified regionfile type, and then call ++ * {@code onComplete}. ++ *

    ++ * Impl notes: ++ *

    ++ *
  • ++ * The {@code onComplete} parameter may be completed during the execution of this function synchronously or it may ++ * be completed asynchronously on this file io thread. Interacting with the file IO thread in the completion of ++ * data is undefined behaviour, and can cause deadlock. ++ *
  • ++ * ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param onComplete Consumer to execute once this task has completed ++ * @param intendingToBlock Whether the caller is intending to block on completion. This only affects the cost ++ * of this call. ++ * @param priority Minimum priority to load the data at. ++ * ++ * @return The {@link Cancellable} for this chunk load. Cancelling it will not affect other loads for the same chunk data. ++ * ++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, RegionFileType...) ++ * @see #loadChunkData(ServerLevel, int, int, Consumer, boolean, Priority, RegionFileType...) ++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean) ++ * @see #loadAllChunkData(ServerLevel, int, int, Consumer, boolean, Priority) ++ */ ++ public static Cancellable loadDataAsync(final ServerLevel world, final int chunkX, final int chunkZ, ++ final RegionFileType type, final BiConsumer onComplete, ++ final boolean intendingToBlock, final PrioritisedExecutor.Priority priority) { ++ final RegionFileIOThread thread = RegionFileIOThread.selectThread(world, chunkX, chunkZ, type); ++ return thread.loadDataAsyncInternal(world, chunkX, chunkZ, type, onComplete, intendingToBlock, priority); ++ } ++ ++ private static Boolean doesRegionFileExist(final int chunkX, final int chunkZ, final boolean intendingToBlock, ++ final ChunkDataController taskController) { ++ final ChunkPos chunkPos = new ChunkPos(chunkX, chunkZ); ++ if (intendingToBlock) { ++ return taskController.computeForRegionFile(chunkX, chunkZ, true, (final RegionFile file) -> { ++ if (file == null) { // null if no regionfile exists ++ return Boolean.FALSE; ++ } ++ ++ return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE; ++ }); ++ } else { ++ return taskController.computeForRegionFileIfLoaded(chunkX, chunkZ, (final RegionFile file) -> { ++ if (file == null) { // null if not loaded ++ return Boolean.TRUE; ++ } ++ ++ return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE; ++ }); ++ } ++ } ++ ++ Cancellable loadDataAsyncInternal(final ServerLevel world, final int chunkX, final int chunkZ, ++ final RegionFileType type, final BiConsumer onComplete, ++ final boolean intendingToBlock, final PrioritisedExecutor.Priority priority) { ++ final ChunkDataController taskController = this.getControllerFor(world, type); ++ ++ final ImmediateCallbackCompletion callbackInfo = new ImmediateCallbackCompletion(); ++ ++ final ChunkCoordinate key = new ChunkCoordinate(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ final BiFunction compute = (final ChunkCoordinate keyInMap, final ChunkDataTask running) -> { ++ if (running == null) { ++ // not scheduled ++ ++ if (callbackInfo.regionFileCalculation == null) { ++ // caller will compute this outside of compute(), to avoid holding the bin lock ++ callbackInfo.needsRegionFileTest = true; ++ return null; ++ } ++ ++ if (callbackInfo.regionFileCalculation == Boolean.FALSE) { ++ // not on disk ++ callbackInfo.data = null; ++ callbackInfo.throwable = null; ++ callbackInfo.completeNow = true; ++ return null; ++ } ++ ++ // set up task ++ final ChunkDataTask newTask = new ChunkDataTask( ++ world, chunkX, chunkZ, taskController, RegionFileIOThread.this, priority ++ ); ++ newTask.inProgressRead = new RegionFileIOThread.InProgressRead(); ++ newTask.inProgressRead.waiters.add(onComplete); ++ ++ callbackInfo.tasksNeedsScheduling = true; ++ return newTask; ++ } ++ ++ final CompoundTag pendingWrite = running.inProgressWrite; ++ ++ if (pendingWrite == ChunkDataTask.NOTHING_TO_WRITE) { ++ // need to add to waiters here, because the regionfile thread will use compute() to lock and check for cancellations ++ if (!running.inProgressRead.addToWaiters(onComplete)) { ++ callbackInfo.data = running.inProgressRead.value; ++ callbackInfo.throwable = running.inProgressRead.throwable; ++ callbackInfo.completeNow = true; ++ } ++ return running; ++ } ++ // using the result sync here - don't bump priority ++ ++ // at this stage we have to use the in progress write's data to avoid an order issue ++ callbackInfo.data = pendingWrite; ++ callbackInfo.throwable = null; ++ callbackInfo.completeNow = true; ++ return running; ++ }; ++ ++ ChunkDataTask curr = taskController.tasks.get(key); ++ if (curr == null) { ++ callbackInfo.regionFileCalculation = doesRegionFileExist(chunkX, chunkZ, intendingToBlock, taskController); ++ } ++ ChunkDataTask ret = taskController.tasks.compute(key, compute); ++ if (callbackInfo.needsRegionFileTest) { ++ // curr isn't null but when we went into compute() it was ++ callbackInfo.regionFileCalculation = doesRegionFileExist(chunkX, chunkZ, intendingToBlock, taskController); ++ // now it should be fine ++ ret = taskController.tasks.compute(key, compute); ++ } ++ ++ // needs to be scheduled ++ if (callbackInfo.tasksNeedsScheduling) { ++ ret.prioritisedTask.queue(); ++ } else if (callbackInfo.completeNow) { ++ try { ++ onComplete.accept(callbackInfo.data, callbackInfo.throwable); ++ } catch (final ThreadDeath thr) { ++ throw thr; ++ } catch (final Throwable thr) { ++ LOGGER.error("Callback " + ConcurrentUtil.genericToString(onComplete) + " synchronously failed to handle chunk data for task " + ret.toString(), thr); ++ } ++ } else { ++ // we're waiting on a task we didn't schedule, so raise its priority to what we want ++ ret.prioritisedTask.raisePriority(priority); ++ } ++ ++ return new CancellableRead(onComplete, ret); ++ } ++ ++ /** ++ * Schedules a load task to be executed asynchronously, and blocks on that task. ++ * ++ * @param world Chunk's world ++ * @param chunkX Chunk's x coordinate ++ * @param chunkZ Chunk's z coordinate ++ * @param type Regionfile type ++ * @param priority Minimum priority to load the data at. ++ * ++ * @return The chunk data for the chunk. Note that a {@code null} result means the chunk or regionfile does not exist on disk. ++ * ++ * @throws IOException If the load fails for any reason ++ */ ++ public static CompoundTag loadData(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileType type, ++ final PrioritisedExecutor.Priority priority) throws IOException { ++ final CompletableFuture ret = new CompletableFuture<>(); ++ ++ RegionFileIOThread.loadDataAsync(world, chunkX, chunkZ, type, (final CompoundTag compound, final Throwable thr) -> { ++ if (thr != null) { ++ ret.completeExceptionally(thr); ++ } else { ++ ret.complete(compound); ++ } ++ }, true, priority); ++ ++ try { ++ return ret.join(); ++ } catch (final CompletionException ex) { ++ throw new IOException(ex); ++ } ++ } ++ ++ private static final class ImmediateCallbackCompletion { ++ ++ public CompoundTag data; ++ public Throwable throwable; ++ public boolean completeNow; ++ public boolean tasksNeedsScheduling; ++ public boolean needsRegionFileTest; ++ public Boolean regionFileCalculation; ++ ++ } ++ ++ static final class CancellableRead implements Cancellable { ++ ++ private BiConsumer callback; ++ private RegionFileIOThread.ChunkDataTask task; ++ ++ CancellableRead(final BiConsumer callback, final RegionFileIOThread.ChunkDataTask task) { ++ this.callback = callback; ++ this.task = task; ++ } ++ ++ @Override ++ public boolean cancel() { ++ final BiConsumer callback = this.callback; ++ final RegionFileIOThread.ChunkDataTask task = this.task; ++ ++ if (callback == null || task == null) { ++ return false; ++ } ++ ++ this.callback = null; ++ this.task = null; ++ ++ final RegionFileIOThread.InProgressRead read = task.inProgressRead; ++ ++ // read can be null if no read was scheduled (i.e no regionfile existed or chunk in regionfile didn't) ++ return (read != null && read.waiters.remove(callback)); ++ } ++ } ++ ++ static final class CancellableReads implements Cancellable { ++ ++ private Cancellable[] reads; ++ ++ protected static final VarHandle READS_HANDLE = ConcurrentUtil.getVarHandle(CancellableReads.class, "reads", Cancellable[].class); ++ ++ CancellableReads(final Cancellable[] reads) { ++ this.reads = reads; ++ } ++ ++ @Override ++ public boolean cancel() { ++ final Cancellable[] reads = (Cancellable[])READS_HANDLE.getAndSet((CancellableReads)this, (Cancellable[])null); ++ ++ if (reads == null) { ++ return false; ++ } ++ ++ boolean ret = false; ++ ++ for (final Cancellable read : reads) { ++ ret |= read.cancel(); ++ } ++ ++ return ret; ++ } ++ } ++ ++ static final class InProgressRead { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ CompoundTag value; ++ Throwable throwable; ++ final MultiThreadedQueue> waiters = new MultiThreadedQueue<>(); ++ ++ // rets false if already completed (callback not invoked), true if callback was added ++ boolean addToWaiters(final BiConsumer callback) { ++ return this.waiters.add(callback); ++ } ++ ++ void complete(final RegionFileIOThread.ChunkDataTask task, final CompoundTag value, final Throwable throwable) { ++ this.value = value; ++ this.throwable = throwable; ++ ++ BiConsumer consumer; ++ while ((consumer = this.waiters.pollOrBlockAdds()) != null) { ++ try { ++ consumer.accept(value, throwable); ++ } catch (final ThreadDeath thr) { ++ throw thr; ++ } catch (final Throwable thr) { ++ LOGGER.error("Callback " + ConcurrentUtil.genericToString(consumer) + " failed to handle chunk data for task " + task.toString(), thr); ++ } ++ } ++ } ++ } ++ ++ /** ++ * Class exists to replace {@link Long} usages as keys inside non-fastutil hashtables. The hash for some Long {@code x} ++ * is defined as {@code (x >>> 32) ^ x}. Chunk keys as long values are defined as {@code ((chunkX & 0xFFFFFFFFL) | (chunkZ << 32))}, ++ * which means the hashcode as a Long value will be {@code chunkX ^ chunkZ}. Given that most chunks are created within a radius arounds players, ++ * this will lead to many hash collisions. So, this class uses a better hashing algorithm so that usage of ++ * non-fastutil collections is not degraded. ++ */ ++ public static final class ChunkCoordinate implements Comparable { ++ ++ public final long key; ++ ++ public ChunkCoordinate(final long key) { ++ this.key = key; ++ } ++ ++ @Override ++ public int hashCode() { ++ return (int)HashCommon.mix(this.key); ++ } ++ ++ @Override ++ public boolean equals(final Object obj) { ++ if (this == obj) { ++ return true; ++ } ++ ++ if (!(obj instanceof ChunkCoordinate)) { ++ return false; ++ } ++ ++ final ChunkCoordinate other = (ChunkCoordinate)obj; ++ ++ return this.key == other.key; ++ } ++ ++ // This class is intended for HashMap/ConcurrentHashMap usage, which do treeify bin nodes if the chain ++ // is too large. So we should implement compareTo to help. ++ @Override ++ public int compareTo(final RegionFileIOThread.ChunkCoordinate other) { ++ return Long.compare(this.key, other.key); ++ } ++ ++ @Override ++ public String toString() { ++ return new ChunkPos(this.key).toString(); ++ } ++ } ++ ++ public static abstract class ChunkDataController { ++ ++ // ConcurrentHashMap synchronizes per chain, so reduce the chance of task's hashes colliding. ++ protected final ConcurrentHashMap tasks = new ConcurrentHashMap<>(8192, 0.10f); ++ ++ public final RegionFileType type; ++ ++ public ChunkDataController(final RegionFileType type) { ++ this.type = type; ++ } ++ ++ public abstract RegionFileStorage getCache(); ++ ++ public abstract void writeData(final int chunkX, final int chunkZ, final CompoundTag compound) throws IOException; ++ ++ public abstract CompoundTag readData(final int chunkX, final int chunkZ) throws IOException; ++ ++ public boolean hasTasks() { ++ return !this.tasks.isEmpty(); ++ } ++ ++ public T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function function) { ++ final RegionFileStorage cache = this.getCache(); ++ final RegionFile regionFile; ++ synchronized (cache) { ++ try { ++ regionFile = cache.getRegionFile(new ChunkPos(chunkX, chunkZ), existingOnly, true); ++ } catch (final IOException ex) { ++ throw new RuntimeException(ex); ++ } ++ } ++ ++ try { ++ return function.apply(regionFile); ++ } finally { ++ if (regionFile != null) { ++ regionFile.fileLock.unlock(); ++ } ++ } ++ } ++ ++ public T computeForRegionFileIfLoaded(final int chunkX, final int chunkZ, final Function function) { ++ final RegionFileStorage cache = this.getCache(); ++ final RegionFile regionFile; ++ ++ synchronized (cache) { ++ regionFile = cache.getRegionFileIfLoaded(new ChunkPos(chunkX, chunkZ)); ++ if (regionFile != null) { ++ regionFile.fileLock.lock(); ++ } ++ } ++ ++ try { ++ return function.apply(regionFile); ++ } finally { ++ if (regionFile != null) { ++ regionFile.fileLock.unlock(); ++ } ++ } ++ } ++ } ++ ++ static final class ChunkDataTask implements Runnable { ++ ++ protected static final CompoundTag NOTHING_TO_WRITE = new CompoundTag(); ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ RegionFileIOThread.InProgressRead inProgressRead; ++ volatile CompoundTag inProgressWrite = NOTHING_TO_WRITE; // only needs to be acquire/release ++ ++ boolean failedWrite; ++ ++ final ServerLevel world; ++ final int chunkX; ++ final int chunkZ; ++ final RegionFileIOThread.ChunkDataController taskController; ++ ++ final PrioritisedExecutor.PrioritisedTask prioritisedTask; ++ ++ /* ++ * IO thread will perform reads before writes for a given chunk x and z ++ * ++ * How reads/writes are scheduled: ++ * ++ * If read is scheduled while scheduling write, take no special action and just schedule write ++ * If read is scheduled while scheduling read and no write is scheduled, chain the read task ++ * ++ * ++ * If write is scheduled while scheduling read, use the pending write data and ret immediately (so no read is scheduled) ++ * If write is scheduled while scheduling write (ignore read in progress), overwrite the write in progress data ++ * ++ * This allows the reads and writes to act as if they occur synchronously to the thread scheduling them, however ++ * it fails to properly propagate write failures thanks to writes overwriting each other ++ */ ++ ++ public ChunkDataTask(final ServerLevel world, final int chunkX, final int chunkZ, final RegionFileIOThread.ChunkDataController taskController, ++ final PrioritisedExecutor executor, final PrioritisedExecutor.Priority priority) { ++ this.world = world; ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ this.taskController = taskController; ++ this.prioritisedTask = executor.createTask(this, priority); ++ } ++ ++ @Override ++ public String toString() { ++ return "Task for world: '" + this.world.getWorld().getName() + "' at (" + this.chunkX + "," + this.chunkZ + ++ ") type: " + this.taskController.type.name() + ", hash: " + this.hashCode(); ++ } ++ ++ @Override ++ public void run() { ++ final RegionFileIOThread.InProgressRead read = this.inProgressRead; ++ final ChunkCoordinate chunkKey = new ChunkCoordinate(CoordinateUtils.getChunkKey(this.chunkX, this.chunkZ)); ++ ++ if (read != null) { ++ final boolean[] canRead = new boolean[] { true }; ++ ++ if (read.waiters.isEmpty()) { ++ // cancelled read? go to task controller to confirm ++ final ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> { ++ if (valueInMap == null) { ++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!"); ++ } ++ if (valueInMap != ChunkDataTask.this) { ++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!"); ++ } ++ ++ if (!read.waiters.isEmpty()) { // as per usual IntelliJ is unable to figure out that there are concurrent accesses. ++ return valueInMap; ++ } else { ++ canRead[0] = false; ++ } ++ ++ return valueInMap.inProgressWrite == NOTHING_TO_WRITE ? null : valueInMap; ++ }); ++ ++ if (inMap == null) { ++ // read is cancelled - and no write pending, so we're done ++ return; ++ } ++ // if there is a write in progress, we don't actually have to worry about waiters gaining new entries - ++ // the readers will just use the in progress write, so the value in canRead is good to use without ++ // further synchronisation. ++ } ++ ++ if (canRead[0]) { ++ CompoundTag compound = null; ++ Throwable throwable = null; ++ ++ try { ++ compound = this.taskController.readData(this.chunkX, this.chunkZ); ++ } catch (final ThreadDeath thr) { ++ throw thr; ++ } catch (final Throwable thr) { ++ throwable = thr; ++ LOGGER.error("Failed to read chunk data for task: " + this.toString(), thr); ++ } ++ read.complete(this, compound, throwable); ++ } ++ } ++ ++ CompoundTag write = this.inProgressWrite; ++ ++ if (write == NOTHING_TO_WRITE) { ++ final ChunkDataTask inMap = this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> { ++ if (valueInMap == null) { ++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!"); ++ } ++ if (valueInMap != ChunkDataTask.this) { ++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!"); ++ } ++ return valueInMap.inProgressWrite == NOTHING_TO_WRITE ? null : valueInMap; ++ }); ++ ++ if (inMap == null) { ++ return; // set the task value to null, indicating we're done ++ } // else: inProgressWrite changed, so now we have something to write ++ } ++ ++ for (;;) { ++ write = this.inProgressWrite; ++ final CompoundTag dataWritten = write; ++ ++ boolean failedWrite = false; ++ ++ try { ++ this.taskController.writeData(this.chunkX, this.chunkZ, write); ++ } catch (final ThreadDeath thr) { ++ throw thr; ++ } catch (final Throwable thr) { ++ if (thr instanceof RegionFileStorage.RegionFileSizeException) { ++ final int maxSize = RegionFile.MAX_CHUNK_SIZE / (1024 * 1024); ++ LOGGER.error("Chunk at (" + this.chunkX + "," + this.chunkZ + ") in '" + this.world.getWorld().getName() + "' exceeds max size of " + maxSize + "MiB, it has been deleted from disk."); ++ } else { ++ failedWrite = thr instanceof IOException; ++ LOGGER.error("Failed to write chunk data for task: " + this.toString(), thr); ++ } ++ } ++ ++ final boolean finalFailWrite = failedWrite; ++ final boolean[] done = new boolean[] { false }; ++ ++ this.taskController.tasks.compute(chunkKey, (final ChunkCoordinate keyInMap, final ChunkDataTask valueInMap) -> { ++ if (valueInMap == null) { ++ throw new IllegalStateException("Write completed concurrently, expected this task: " + ChunkDataTask.this.toString() + ", report this!"); ++ } ++ if (valueInMap != ChunkDataTask.this) { ++ throw new IllegalStateException("Chunk task mismatch, expected this task: " + ChunkDataTask.this.toString() + ", got: " + valueInMap.toString() + ", report this!"); ++ } ++ if (valueInMap.inProgressWrite == dataWritten) { ++ valueInMap.failedWrite = finalFailWrite; ++ done[0] = true; ++ // keep the data in map if we failed the write so we can try to prevent data loss ++ return finalFailWrite ? valueInMap : null; ++ } ++ // different data than expected, means we need to retry write ++ return valueInMap; ++ }); ++ ++ if (done[0]) { ++ return; ++ } ++ ++ // fetch & write new data ++ continue; ++ } ++ } ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java +new file mode 100644 +index 0000000000000000000000000000000000000000..0b7a2b0ead4f3bc07bfd9a38c2b7cf024bd140c6 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java +@@ -0,0 +1,280 @@ ++package io.papermc.paper.chunk.system.light; ++ ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.starlight.common.light.BlockStarLightEngine; ++import ca.spottedleaf.starlight.common.light.SkyStarLightEngine; ++import ca.spottedleaf.starlight.common.light.StarLightInterface; ++import io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler; ++import io.papermc.paper.util.CoordinateUtils; ++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap; ++import it.unimi.dsi.fastutil.shorts.ShortCollection; ++import it.unimi.dsi.fastutil.shorts.ShortOpenHashSet; ++import net.minecraft.core.BlockPos; ++import net.minecraft.core.SectionPos; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.level.ChunkPos; ++import java.util.ArrayList; ++import java.util.HashSet; ++import java.util.List; ++import java.util.Set; ++import java.util.concurrent.CompletableFuture; ++import java.util.function.BooleanSupplier; ++ ++public final class LightQueue { ++ ++ protected final Long2ObjectOpenHashMap chunkTasks = new Long2ObjectOpenHashMap<>(); ++ protected final StarLightInterface manager; ++ protected final ServerLevel world; ++ ++ public LightQueue(final StarLightInterface manager) { ++ this.manager = manager; ++ this.world = ((ServerLevel)manager.getWorld()); ++ } ++ ++ public void lowerPriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) { ++ final ChunkTasks task; ++ synchronized (this) { ++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ } ++ if (task != null) { ++ task.lowerPriority(priority); ++ } ++ } ++ ++ public void setPriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) { ++ final ChunkTasks task; ++ synchronized (this) { ++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ } ++ if (task != null) { ++ task.setPriority(priority); ++ } ++ } ++ ++ public void raisePriority(final int chunkX, final int chunkZ, final PrioritisedExecutor.Priority priority) { ++ final ChunkTasks task; ++ synchronized (this) { ++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ } ++ if (task != null) { ++ task.raisePriority(priority); ++ } ++ } ++ ++ public PrioritisedExecutor.Priority getPriority(final int chunkX, final int chunkZ) { ++ final ChunkTasks task; ++ synchronized (this) { ++ task = this.chunkTasks.get(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ } ++ if (task != null) { ++ return task.getPriority(); ++ } ++ ++ return PrioritisedExecutor.Priority.COMPLETING; ++ } ++ ++ public boolean isEmpty() { ++ synchronized (this) { ++ return this.chunkTasks.isEmpty(); ++ } ++ } ++ ++ public CompletableFuture queueBlockChange(final BlockPos pos) { ++ final ChunkTasks tasks; ++ synchronized (this) { ++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> { ++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this); ++ }); ++ tasks.changedPositions.add(pos.immutable()); ++ } ++ ++ tasks.schedule(); ++ ++ return tasks.onComplete; ++ } ++ ++ public CompletableFuture queueSectionChange(final SectionPos pos, final boolean newEmptyValue) { ++ final ChunkTasks tasks; ++ synchronized (this) { ++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> { ++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this); ++ }); ++ ++ if (tasks.changedSectionSet == null) { ++ tasks.changedSectionSet = new Boolean[this.manager.maxSection - this.manager.minSection + 1]; ++ } ++ tasks.changedSectionSet[pos.getY() - this.manager.minSection] = Boolean.valueOf(newEmptyValue); ++ } ++ ++ tasks.schedule(); ++ ++ return tasks.onComplete; ++ } ++ ++ public CompletableFuture queueChunkLightTask(final ChunkPos pos, final BooleanSupplier lightTask, final PrioritisedExecutor.Priority priority) { ++ final ChunkTasks tasks; ++ synchronized (this) { ++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> { ++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this, priority); ++ }); ++ if (tasks.lightTasks == null) { ++ tasks.lightTasks = new ArrayList<>(); ++ } ++ tasks.lightTasks.add(lightTask); ++ } ++ ++ tasks.schedule(); ++ ++ return tasks.onComplete; ++ } ++ ++ public CompletableFuture queueChunkSkylightEdgeCheck(final SectionPos pos, final ShortCollection sections) { ++ final ChunkTasks tasks; ++ synchronized (this) { ++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> { ++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this); ++ }); ++ ++ ShortOpenHashSet queuedEdges = tasks.queuedEdgeChecksSky; ++ if (queuedEdges == null) { ++ queuedEdges = tasks.queuedEdgeChecksSky = new ShortOpenHashSet(); ++ } ++ queuedEdges.addAll(sections); ++ } ++ ++ tasks.schedule(); ++ ++ return tasks.onComplete; ++ } ++ ++ public CompletableFuture queueChunkBlocklightEdgeCheck(final SectionPos pos, final ShortCollection sections) { ++ final ChunkTasks tasks; ++ ++ synchronized (this) { ++ tasks = this.chunkTasks.computeIfAbsent(CoordinateUtils.getChunkKey(pos), (final long keyInMap) -> { ++ return new ChunkTasks(keyInMap, LightQueue.this.manager, LightQueue.this); ++ }); ++ ++ ShortOpenHashSet queuedEdges = tasks.queuedEdgeChecksBlock; ++ if (queuedEdges == null) { ++ queuedEdges = tasks.queuedEdgeChecksBlock = new ShortOpenHashSet(); ++ } ++ queuedEdges.addAll(sections); ++ } ++ ++ tasks.schedule(); ++ ++ return tasks.onComplete; ++ } ++ ++ public void removeChunk(final ChunkPos pos) { ++ final ChunkTasks tasks; ++ synchronized (this) { ++ tasks = this.chunkTasks.remove(CoordinateUtils.getChunkKey(pos)); ++ } ++ if (tasks != null && tasks.cancel()) { ++ tasks.onComplete.complete(null); ++ } ++ } ++ ++ protected static final class ChunkTasks implements Runnable { ++ ++ final Set changedPositions = new HashSet<>(); ++ Boolean[] changedSectionSet; ++ ShortOpenHashSet queuedEdgeChecksSky; ++ ShortOpenHashSet queuedEdgeChecksBlock; ++ List lightTasks; ++ ++ final CompletableFuture onComplete = new CompletableFuture<>(); ++ ++ public final long chunkCoordinate; ++ private final StarLightInterface lightEngine; ++ private final LightQueue queue; ++ private final PrioritisedExecutor.PrioritisedTask task; ++ ++ public ChunkTasks(final long chunkCoordinate, final StarLightInterface lightEngine, final LightQueue queue) { ++ this(chunkCoordinate, lightEngine, queue, PrioritisedExecutor.Priority.NORMAL); ++ } ++ ++ public ChunkTasks(final long chunkCoordinate, final StarLightInterface lightEngine, final LightQueue queue, ++ final PrioritisedExecutor.Priority priority) { ++ this.chunkCoordinate = chunkCoordinate; ++ this.lightEngine = lightEngine; ++ this.queue = queue; ++ this.task = queue.world.chunkTaskScheduler.lightExecutor.createTask(this, priority); ++ } ++ ++ public void schedule() { ++ this.task.queue(); ++ } ++ ++ public boolean cancel() { ++ return this.task.cancel(); ++ } ++ ++ public PrioritisedExecutor.Priority getPriority() { ++ return this.task.getPriority(); ++ } ++ ++ public void lowerPriority(final PrioritisedExecutor.Priority priority) { ++ this.task.lowerPriority(priority); ++ } ++ ++ public void setPriority(final PrioritisedExecutor.Priority priority) { ++ this.task.setPriority(priority); ++ } ++ ++ public void raisePriority(final PrioritisedExecutor.Priority priority) { ++ this.task.raisePriority(priority); ++ } ++ ++ @Override ++ public void run() { ++ final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine(); ++ final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine(); ++ try { ++ synchronized (this.queue) { ++ this.queue.chunkTasks.remove(this.chunkCoordinate); ++ } ++ ++ boolean litChunk = false; ++ if (this.lightTasks != null) { ++ for (final BooleanSupplier run : this.lightTasks) { ++ if (run.getAsBoolean()) { ++ litChunk = true; ++ break; ++ } ++ } ++ } ++ ++ final long coordinate = this.chunkCoordinate; ++ final int chunkX = CoordinateUtils.getChunkX(coordinate); ++ final int chunkZ = CoordinateUtils.getChunkZ(coordinate); ++ ++ final Set positions = this.changedPositions; ++ final Boolean[] sectionChanges = this.changedSectionSet; ++ ++ if (!litChunk) { ++ if (skyEngine != null && (!positions.isEmpty() || sectionChanges != null)) { ++ skyEngine.blocksChangedInChunk(this.lightEngine.getLightAccess(), chunkX, chunkZ, positions, sectionChanges); ++ } ++ if (blockEngine != null && (!positions.isEmpty() || sectionChanges != null)) { ++ blockEngine.blocksChangedInChunk(this.lightEngine.getLightAccess(), chunkX, chunkZ, positions, sectionChanges); ++ } ++ ++ if (skyEngine != null && this.queuedEdgeChecksSky != null) { ++ skyEngine.checkChunkEdges(this.lightEngine.getLightAccess(), chunkX, chunkZ, this.queuedEdgeChecksSky); ++ } ++ if (blockEngine != null && this.queuedEdgeChecksBlock != null) { ++ blockEngine.checkChunkEdges(this.lightEngine.getLightAccess(), chunkX, chunkZ, this.queuedEdgeChecksBlock); ++ } ++ } ++ ++ this.onComplete.complete(null); ++ } finally { ++ this.lightEngine.releaseSkyLightEngine(skyEngine); ++ this.lightEngine.releaseBlockLightEngine(blockEngine); ++ } ++ } ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java b/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java +new file mode 100644 +index 0000000000000000000000000000000000000000..d72041aa814ff179e6e29a45dcd359a91d426d47 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/poi/PoiChunk.java +@@ -0,0 +1,213 @@ ++package io.papermc.paper.chunk.system.poi; ++ ++import com.mojang.logging.LogUtils; ++import com.mojang.serialization.Codec; ++import com.mojang.serialization.DataResult; ++import io.papermc.paper.util.CoordinateUtils; ++import io.papermc.paper.util.TickThread; ++import io.papermc.paper.util.WorldUtil; ++import net.minecraft.SharedConstants; ++import net.minecraft.nbt.CompoundTag; ++import net.minecraft.nbt.NbtOps; ++import net.minecraft.nbt.Tag; ++import net.minecraft.resources.RegistryOps; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.entity.ai.village.poi.PoiManager; ++import net.minecraft.world.entity.ai.village.poi.PoiSection; ++import org.slf4j.Logger; ++ ++import java.util.Optional; ++ ++public final class PoiChunk { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ public final ServerLevel world; ++ public final int chunkX; ++ public final int chunkZ; ++ public final int minSection; ++ public final int maxSection; ++ ++ protected final PoiSection[] sections; ++ ++ private boolean isDirty; ++ private boolean loaded; ++ ++ public PoiChunk(final ServerLevel world, final int chunkX, final int chunkZ, final int minSection, final int maxSection) { ++ this(world, chunkX, chunkZ, minSection, maxSection, new PoiSection[maxSection - minSection + 1]); ++ } ++ ++ public PoiChunk(final ServerLevel world, final int chunkX, final int chunkZ, final int minSection, final int maxSection, final PoiSection[] sections) { ++ this.world = world; ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ this.minSection = minSection; ++ this.maxSection = maxSection; ++ this.sections = sections; ++ if (this.sections.length != (maxSection - minSection + 1)) { ++ throw new IllegalStateException("Incorrect length used, expected " + (maxSection - minSection + 1) + ", got " + this.sections.length); ++ } ++ } ++ ++ public void load() { ++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Loading in poi chunk off-main"); ++ if (this.loaded) { ++ return; ++ } ++ this.loaded = true; ++ this.world.chunkSource.getPoiManager().loadInPoiChunk(this); ++ } ++ ++ public boolean isLoaded() { ++ return this.loaded; ++ } ++ ++ public boolean isEmpty() { ++ for (final PoiSection section : this.sections) { ++ if (section != null && !section.isEmpty()) { ++ return false; ++ } ++ } ++ ++ return true; ++ } ++ ++ public PoiSection getOrCreateSection(final int chunkY) { ++ if (chunkY >= this.minSection && chunkY <= this.maxSection) { ++ final int idx = chunkY - this.minSection; ++ final PoiSection ret = this.sections[idx]; ++ if (ret != null) { ++ return ret; ++ } ++ ++ final PoiManager poiManager = this.world.getPoiManager(); ++ final long key = CoordinateUtils.getChunkSectionKey(this.chunkX, chunkY, this.chunkZ); ++ ++ return this.sections[idx] = new PoiSection(() -> { ++ poiManager.setDirty(key); ++ }); ++ } ++ throw new IllegalArgumentException("chunkY is out of bounds, chunkY: " + chunkY + " outside [" + this.minSection + "," + this.maxSection + "]"); ++ } ++ ++ public PoiSection getSection(final int chunkY) { ++ if (chunkY >= this.minSection && chunkY <= this.maxSection) { ++ return this.sections[chunkY - this.minSection]; ++ } ++ return null; ++ } ++ ++ public Optional getSectionForVanilla(final int chunkY) { ++ if (chunkY >= this.minSection && chunkY <= this.maxSection) { ++ final PoiSection ret = this.sections[chunkY - this.minSection]; ++ return ret == null ? Optional.empty() : ret.noAllocateOptional; ++ } ++ return Optional.empty(); ++ } ++ ++ public boolean isDirty() { ++ return this.isDirty; ++ } ++ ++ public void setDirty(final boolean dirty) { ++ this.isDirty = dirty; ++ } ++ ++ // returns null if empty ++ public CompoundTag save() { ++ final RegistryOps registryOps = RegistryOps.create(NbtOps.INSTANCE, world.getPoiManager().registryAccess); ++ ++ final CompoundTag ret = new CompoundTag(); ++ final CompoundTag sections = new CompoundTag(); ++ ret.put("Sections", sections); ++ ++ ret.putInt("DataVersion", SharedConstants.getCurrentVersion().getDataVersion().getVersion()); ++ ++ final ServerLevel world = this.world; ++ final PoiManager poiManager = world.getPoiManager(); ++ final int chunkX = this.chunkX; ++ final int chunkZ = this.chunkZ; ++ ++ for (int sectionY = this.minSection; sectionY <= this.maxSection; ++sectionY) { ++ final PoiSection chunk = this.sections[sectionY - this.minSection]; ++ if (chunk == null || chunk.isEmpty()) { ++ continue; ++ } ++ ++ final long key = CoordinateUtils.getChunkSectionKey(chunkX, sectionY, chunkZ); ++ // codecs are honestly such a fucking disaster. What the fuck is this trash? ++ final Codec codec = PoiSection.codec(() -> { ++ poiManager.setDirty(key); ++ }); ++ ++ final DataResult serializedResult = codec.encodeStart(registryOps, chunk); ++ final int finalSectionY = sectionY; ++ final Tag serialized = serializedResult.resultOrPartial((final String description) -> { ++ LOGGER.error("Failed to serialize poi chunk for world: " + world.getWorld().getName() + ", chunk: (" + chunkX + "," + finalSectionY + "," + chunkZ + "); description: " + description); ++ }).orElse(null); ++ if (serialized == null) { ++ // failed, should be logged from the resultOrPartial ++ continue; ++ } ++ ++ sections.put(Integer.toString(sectionY), serialized); ++ } ++ ++ return sections.isEmpty() ? null : ret; ++ } ++ ++ public static PoiChunk empty(final ServerLevel world, final int chunkX, final int chunkZ) { ++ final PoiChunk ret = new PoiChunk(world, chunkX, chunkZ, WorldUtil.getMinSection(world), WorldUtil.getMaxSection(world)); ++ ret.loaded = true; ++ return ret; ++ } ++ ++ public static PoiChunk parse(final ServerLevel world, final int chunkX, final int chunkZ, final CompoundTag data) { ++ final PoiChunk ret = empty(world, chunkX, chunkZ); ++ ++ final RegistryOps registryOps = RegistryOps.create(NbtOps.INSTANCE, world.getPoiManager().registryAccess); ++ ++ final CompoundTag sections = data.getCompound("Sections"); ++ ++ if (sections.isEmpty()) { ++ // nothing to parse ++ return ret; ++ } ++ ++ final PoiManager poiManager = world.getPoiManager(); ++ ++ boolean readAnything = false; ++ ++ for (int sectionY = ret.minSection; sectionY <= ret.maxSection; ++sectionY) { ++ final String key = Integer.toString(sectionY); ++ if (!sections.contains(key)) { ++ continue; ++ } ++ ++ final long coordinateKey = CoordinateUtils.getChunkSectionKey(chunkX, sectionY, chunkZ); ++ // codecs are honestly such a fucking disaster. What the fuck is this trash? ++ final Codec codec = PoiSection.codec(() -> { ++ poiManager.setDirty(coordinateKey); ++ }); ++ ++ final CompoundTag section = sections.getCompound(key); ++ final DataResult deserializeResult = codec.parse(registryOps, section); ++ final int finalSectionY = sectionY; ++ final PoiSection deserialized = deserializeResult.resultOrPartial((final String description) -> { ++ LOGGER.error("Failed to deserialize poi chunk for world: " + world.getWorld().getName() + ", chunk: (" + chunkX + "," + finalSectionY + "," + chunkZ + "); description: " + description); ++ }).orElse(null); ++ ++ if (deserialized == null || deserialized.isEmpty()) { ++ // completely empty, no point in storing this ++ continue; ++ } ++ ++ readAnything = true; ++ ret.sections[sectionY - ret.minSection] = deserialized; ++ } ++ ++ ret.loaded = !readAnything; // Set loaded to false if we read anything to ensure proper callbacks to PoiManager are made on #load ++ ++ return ret; ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java +new file mode 100644 +index 0000000000000000000000000000000000000000..fb42d776f15f735fb59e972e00e2b512c23a8387 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java +@@ -0,0 +1,121 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import net.minecraft.server.level.ChunkMap; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; ++import net.minecraft.world.level.chunk.ImposterProtoChunk; ++import net.minecraft.world.level.chunk.LevelChunk; ++import net.minecraft.world.level.chunk.ProtoChunk; ++import java.lang.invoke.VarHandle; ++ ++public final class ChunkFullTask extends ChunkProgressionTask implements Runnable { ++ ++ protected final NewChunkHolder chunkHolder; ++ protected final ChunkAccess fromChunk; ++ protected final PrioritisedExecutor.PrioritisedTask convertToFullTask; ++ ++ public ChunkFullTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ, ++ final NewChunkHolder chunkHolder, final ChunkAccess fromChunk, final PrioritisedExecutor.Priority priority) { ++ super(scheduler, world, chunkX, chunkZ); ++ this.chunkHolder = chunkHolder; ++ this.fromChunk = fromChunk; ++ this.convertToFullTask = scheduler.createChunkTask(chunkX, chunkZ, this, priority); ++ } ++ ++ @Override ++ public ChunkStatus getTargetStatus() { ++ return ChunkStatus.FULL; ++ } ++ ++ @Override ++ public void run() { ++ // See Vanilla protoChunkToFullChunk for what this function should be doing ++ final LevelChunk chunk; ++ try { ++ if (this.fromChunk instanceof ImposterProtoChunk wrappedFull) { ++ chunk = wrappedFull.getWrapped(); ++ } else { ++ final ServerLevel world = this.world; ++ final ProtoChunk protoChunk = (ProtoChunk)this.fromChunk; ++ chunk = new LevelChunk(this.world, protoChunk, (final LevelChunk unused) -> { ++ ChunkMap.postLoadProtoChunk(world, protoChunk.getEntities()); ++ }); ++ } ++ ++ chunk.setChunkHolder(this.scheduler.chunkHolderManager.getChunkHolder(this.chunkX, this.chunkZ)); // replaces setFullStatus ++ chunk.runPostLoad(); ++ // Unlike Vanilla, we load the entity chunk here, as we load the NBT in empty status (unlike Vanilla) ++ // This brings entity addition back in line with older versions of the game ++ // Since we load the NBT in the empty status, this will never block for I/O ++ this.world.chunkTaskScheduler.chunkHolderManager.getOrCreateEntityChunk(this.chunkX, this.chunkZ, false); ++ ++ // we don't need the entitiesInLevel trash, this system doesn't double run callbacks ++ chunk.setLoaded(true); ++ chunk.registerAllBlockEntitiesAfterLevelLoad(); ++ chunk.registerTickContainerInLevel(this.world); ++ } catch (final Throwable throwable) { ++ this.complete(null, throwable); ++ ++ if (throwable instanceof ThreadDeath) { ++ throw (ThreadDeath)throwable; ++ } ++ return; ++ } ++ this.complete(chunk, null); ++ } ++ ++ protected volatile boolean scheduled; ++ protected static final VarHandle SCHEDULED_HANDLE = ConcurrentUtil.getVarHandle(ChunkFullTask.class, "scheduled", boolean.class); ++ ++ @Override ++ public boolean isScheduled() { ++ return this.scheduled; ++ } ++ ++ @Override ++ public void schedule() { ++ if ((boolean)SCHEDULED_HANDLE.getAndSet((ChunkFullTask)this, true)) { ++ throw new IllegalStateException("Cannot double call schedule()"); ++ } ++ this.convertToFullTask.queue(); ++ } ++ ++ @Override ++ public void cancel() { ++ if (this.convertToFullTask.cancel()) { ++ this.complete(null, null); ++ } ++ } ++ ++ @Override ++ public PrioritisedExecutor.Priority getPriority() { ++ return this.convertToFullTask.getPriority(); ++ } ++ ++ @Override ++ public void lowerPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.convertToFullTask.lowerPriority(priority); ++ } ++ ++ @Override ++ public void setPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.convertToFullTask.setPriority(priority); ++ } ++ ++ @Override ++ public void raisePriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.convertToFullTask.raisePriority(priority); ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java +new file mode 100644 +index 0000000000000000000000000000000000000000..748cc48c6c42c694d1c9b685e96fbe6d8337d3f3 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java +@@ -0,0 +1,1211 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable; ++import co.aikar.timings.Timing; ++import com.google.common.collect.ImmutableList; ++import com.google.gson.JsonArray; ++import com.google.gson.JsonObject; ++import com.mojang.logging.LogUtils; ++import io.papermc.paper.chunk.system.io.RegionFileIOThread; ++import io.papermc.paper.chunk.system.poi.PoiChunk; ++import io.papermc.paper.util.CoordinateUtils; ++import io.papermc.paper.util.TickThread; ++import io.papermc.paper.util.misc.Delayed8WayDistancePropagator2D; ++import io.papermc.paper.world.ChunkEntitySlices; ++import it.unimi.dsi.fastutil.longs.Long2IntLinkedOpenHashMap; ++import it.unimi.dsi.fastutil.longs.Long2IntMap; ++import it.unimi.dsi.fastutil.longs.Long2IntOpenHashMap; ++import it.unimi.dsi.fastutil.longs.Long2ObjectMap; ++import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap; ++import it.unimi.dsi.fastutil.longs.LongArrayList; ++import it.unimi.dsi.fastutil.longs.LongIterator; ++import it.unimi.dsi.fastutil.objects.ObjectRBTreeSet; ++import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet; ++import net.minecraft.nbt.CompoundTag; ++import io.papermc.paper.chunk.system.ChunkSystem; ++import net.minecraft.server.MinecraftServer; ++import net.minecraft.server.level.ChunkHolder; ++import net.minecraft.server.level.ChunkMap; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.server.level.Ticket; ++import net.minecraft.server.level.TicketType; ++import net.minecraft.util.SortedArraySet; ++import net.minecraft.util.Unit; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; ++import org.bukkit.plugin.Plugin; ++import org.slf4j.Logger; ++import java.io.IOException; ++import java.text.DecimalFormat; ++import java.util.ArrayDeque; ++import java.util.ArrayList; ++import java.util.Collection; ++import java.util.Collections; ++import java.util.Iterator; ++import java.util.List; ++import java.util.Objects; ++import java.util.concurrent.TimeUnit; ++import java.util.concurrent.atomic.AtomicBoolean; ++import java.util.concurrent.atomic.AtomicReference; ++import java.util.concurrent.locks.LockSupport; ++import java.util.concurrent.locks.ReentrantLock; ++import java.util.function.Predicate; ++ ++public final class ChunkHolderManager { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ public static final int FULL_LOADED_TICKET_LEVEL = 33; ++ public static final int BLOCK_TICKING_TICKET_LEVEL = 32; ++ public static final int ENTITY_TICKING_TICKET_LEVEL = 31; ++ public static final int MAX_TICKET_LEVEL = ChunkMap.MAX_CHUNK_DISTANCE; // inclusive ++ ++ private static final long NO_TIMEOUT_MARKER = -1L; ++ ++ final ReentrantLock ticketLock = new ReentrantLock(); ++ ++ private final SWMRLong2ObjectHashTable chunkHolders = new SWMRLong2ObjectHashTable<>(16384, 0.25f); ++ private final Long2ObjectOpenHashMap>> tickets = new Long2ObjectOpenHashMap<>(8192, 0.25f); ++ // what a disaster of a name ++ // this is a map of removal tick to a map of chunks and the number of tickets a chunk has that are to expire that tick ++ private final Long2ObjectOpenHashMap removeTickToChunkExpireTicketCount = new Long2ObjectOpenHashMap<>(); ++ private final ServerLevel world; ++ private final ChunkTaskScheduler taskScheduler; ++ private long currentTick; ++ ++ private final ArrayDeque pendingFullLoadUpdate = new ArrayDeque<>(); ++ private final ObjectRBTreeSet autoSaveQueue = new ObjectRBTreeSet<>((final NewChunkHolder c1, final NewChunkHolder c2) -> { ++ if (c1 == c2) { ++ return 0; ++ } ++ ++ final int saveTickCompare = Long.compare(c1.lastAutoSave, c2.lastAutoSave); ++ ++ if (saveTickCompare != 0) { ++ return saveTickCompare; ++ } ++ ++ final long coord1 = CoordinateUtils.getChunkKey(c1.chunkX, c1.chunkZ); ++ final long coord2 = CoordinateUtils.getChunkKey(c2.chunkX, c2.chunkZ); ++ ++ if (coord1 == coord2) { ++ throw new IllegalStateException("Duplicate chunkholder in auto save queue"); ++ } ++ ++ return Long.compare(coord1, coord2); ++ }); ++ ++ public ChunkHolderManager(final ServerLevel world, final ChunkTaskScheduler taskScheduler) { ++ this.world = world; ++ this.taskScheduler = taskScheduler; ++ } ++ ++ private long statusUpgradeId; ++ ++ long getNextStatusUpgradeId() { ++ return ++this.statusUpgradeId; ++ } ++ ++ public List getOldChunkHolders() { ++ final List holders = this.getChunkHolders(); ++ final List ret = new ArrayList<>(holders.size()); ++ for (final NewChunkHolder holder : holders) { ++ ret.add(holder.vanillaChunkHolder); ++ } ++ return ret; ++ } ++ ++ public List getChunkHolders() { ++ final List ret = new ArrayList<>(this.chunkHolders.size()); ++ this.chunkHolders.forEachValue(ret::add); ++ return ret; ++ } ++ ++ public int size() { ++ return this.chunkHolders.size(); ++ } ++ ++ public void close(final boolean save, final boolean halt) { ++ TickThread.ensureTickThread("Closing world off-main"); ++ if (halt) { ++ LOGGER.info("Waiting 60s for chunk system to halt for world '" + this.world.getWorld().getName() + "'"); ++ if (!this.taskScheduler.halt(true, TimeUnit.SECONDS.toNanos(60L))) { ++ LOGGER.warn("Failed to halt world generation/loading tasks for world '" + this.world.getWorld().getName() + "'"); ++ } else { ++ LOGGER.info("Halted chunk system for world '" + this.world.getWorld().getName() + "'"); ++ } ++ } ++ ++ if (save) { ++ this.saveAllChunks(true, true, true); ++ } ++ ++ if (this.world.chunkDataControllerNew.hasTasks() || this.world.entityDataControllerNew.hasTasks() || this.world.poiDataControllerNew.hasTasks()) { ++ RegionFileIOThread.flush(); ++ } ++ ++ // kill regionfile cache ++ try { ++ this.world.chunkDataControllerNew.getCache().close(); ++ } catch (final IOException ex) { ++ LOGGER.error("Failed to close chunk regionfile cache for world '" + this.world.getWorld().getName() + "'", ex); ++ } ++ try { ++ this.world.entityDataControllerNew.getCache().close(); ++ } catch (final IOException ex) { ++ LOGGER.error("Failed to close entity regionfile cache for world '" + this.world.getWorld().getName() + "'", ex); ++ } ++ try { ++ this.world.poiDataControllerNew.getCache().close(); ++ } catch (final IOException ex) { ++ LOGGER.error("Failed to close poi regionfile cache for world '" + this.world.getWorld().getName() + "'", ex); ++ } ++ } ++ ++ void ensureInAutosave(final NewChunkHolder holder) { ++ if (!this.autoSaveQueue.contains(holder)) { ++ holder.lastAutoSave = MinecraftServer.currentTick; ++ this.autoSaveQueue.add(holder); ++ } ++ } ++ ++ public void autoSave() { ++ final List reschedule = new ArrayList<>(); ++ final long currentTick = MinecraftServer.currentTickLong; ++ final long maxSaveTime = currentTick - this.world.paperConfig().chunks.autoSaveInterval.value(); ++ for (int autoSaved = 0; autoSaved < this.world.paperConfig().chunks.maxAutoSaveChunksPerTick && !this.autoSaveQueue.isEmpty();) { ++ final NewChunkHolder holder = this.autoSaveQueue.first(); ++ ++ if (holder.lastAutoSave > maxSaveTime) { ++ break; ++ } ++ ++ this.autoSaveQueue.remove(holder); ++ ++ holder.lastAutoSave = currentTick; ++ if (holder.save(false, false) != null) { ++ ++autoSaved; ++ } ++ ++ if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ reschedule.add(holder); ++ } ++ } ++ ++ for (final NewChunkHolder holder : reschedule) { ++ if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ this.autoSaveQueue.add(holder); ++ } ++ } ++ } ++ ++ public void saveAllChunks(final boolean flush, final boolean shutdown, final boolean logProgress) { ++ final List holders = this.getChunkHolders(); ++ ++ if (logProgress) { ++ LOGGER.info("Saving all chunkholders for world '" + this.world.getWorld().getName() + "'"); ++ } ++ ++ final DecimalFormat format = new DecimalFormat("#0.00"); ++ ++ int saved = 0; ++ ++ long start = System.nanoTime(); ++ long lastLog = start; ++ boolean needsFlush = false; ++ final int flushInterval = 50; ++ ++ int savedChunk = 0; ++ int savedEntity = 0; ++ int savedPoi = 0; ++ ++ for (int i = 0, len = holders.size(); i < len; ++i) { ++ final NewChunkHolder holder = holders.get(i); ++ try { ++ final NewChunkHolder.SaveStat saveStat = holder.save(shutdown, false); ++ if (saveStat != null) { ++ ++saved; ++ needsFlush = flush; ++ if (saveStat.savedChunk()) { ++ ++savedChunk; ++ } ++ if (saveStat.savedEntityChunk()) { ++ ++savedEntity; ++ } ++ if (saveStat.savedPoiChunk()) { ++ ++savedPoi; ++ } ++ } ++ } catch (final ThreadDeath thr) { ++ throw thr; ++ } catch (final Throwable thr) { ++ LOGGER.error("Failed to save chunk (" + holder.chunkX + "," + holder.chunkZ + ") in world '" + this.world.getWorld().getName() + "'", thr); ++ } ++ if (needsFlush && (saved % flushInterval) == 0) { ++ needsFlush = false; ++ RegionFileIOThread.partialFlush(flushInterval / 2); ++ } ++ if (logProgress) { ++ final long currTime = System.nanoTime(); ++ if ((currTime - lastLog) > TimeUnit.SECONDS.toNanos(10L)) { ++ lastLog = currTime; ++ LOGGER.info("Saved " + saved + " chunks (" + format.format((double)(i+1)/(double)len * 100.0) + "%) in world '" + this.world.getWorld().getName() + "'"); ++ } ++ } ++ } ++ if (flush) { ++ RegionFileIOThread.flush(); ++ if (this.world.paperConfig().chunks.flushRegionsOnSave) { ++ try { ++ this.world.chunkSource.chunkMap.regionFileCache.flush(); ++ } catch (IOException ex) { ++ LOGGER.error("Exception when flushing regions in world {}", this.world.getWorld().getName(), ex); ++ } ++ } ++ } ++ if (logProgress) { ++ LOGGER.info("Saved " + savedChunk + " block chunks, " + savedEntity + " entity chunks, " + savedPoi + " poi chunks in world '" + this.world.getWorld().getName() + "' in " + format.format(1.0E-9 * (System.nanoTime() - start)) + "s"); ++ } ++ } ++ ++ protected final Long2IntLinkedOpenHashMap ticketLevelUpdates = new Long2IntLinkedOpenHashMap() { ++ @Override ++ protected void rehash(final int newN) { ++ // no downsizing allowed ++ if (newN < this.n) { ++ return; ++ } ++ super.rehash(newN); ++ } ++ }; ++ ++ protected final Delayed8WayDistancePropagator2D ticketLevelPropagator = new Delayed8WayDistancePropagator2D( ++ (final long coordinate, final byte oldLevel, final byte newLevel) -> { ++ ChunkHolderManager.this.ticketLevelUpdates.putAndMoveToLast(coordinate, convertBetweenTicketLevels(newLevel)); ++ } ++ ); ++ // function for converting between ticket levels and propagator levels and vice versa ++ // the problem is the ticket level propagator will propagate from a set source down to zero, whereas mojang expects ++ // levels to propagate from a set value up to a maximum value. so we need to convert the levels we put into the propagator ++ // and the levels we get out of the propagator ++ ++ public static int convertBetweenTicketLevels(final int level) { ++ return ChunkMap.MAX_CHUNK_DISTANCE - level + 1; ++ } ++ ++ public boolean hasTickets() { ++ this.ticketLock.lock(); ++ try { ++ return !this.tickets.isEmpty(); ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ ++ public String getTicketDebugString(final long coordinate) { ++ this.ticketLock.lock(); ++ try { ++ final SortedArraySet> tickets = this.tickets.get(coordinate); ++ ++ return tickets != null ? tickets.first().toString() : "no_ticket"; ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ ++ public Long2ObjectOpenHashMap>> getTicketsCopy() { ++ this.ticketLock.lock(); ++ try { ++ return this.tickets.clone(); ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ ++ public Collection getPluginChunkTickets(int x, int z) { ++ ImmutableList.Builder ret; ++ this.ticketLock.lock(); ++ try { ++ SortedArraySet> tickets = this.tickets.get(ChunkPos.asLong(x, z)); ++ ++ if (tickets == null) { ++ return Collections.emptyList(); ++ } ++ ++ ret = ImmutableList.builder(); ++ for (Ticket ticket : tickets) { ++ if (ticket.getType() == TicketType.PLUGIN_TICKET) { ++ ret.add((Plugin)ticket.key); ++ } ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ ++ return ret.build(); ++ } ++ ++ protected final int getPropagatedTicketLevel(final long coordinate) { ++ return convertBetweenTicketLevels(this.ticketLevelPropagator.getLevel(coordinate)); ++ } ++ ++ protected final void updateTicketLevel(final long coordinate, final int ticketLevel) { ++ if (ticketLevel > ChunkMap.MAX_CHUNK_DISTANCE) { ++ this.ticketLevelPropagator.removeSource(coordinate); ++ } else { ++ this.ticketLevelPropagator.setSource(coordinate, convertBetweenTicketLevels(ticketLevel)); ++ } ++ } ++ ++ private static int getTicketLevelAt(SortedArraySet> tickets) { ++ return !tickets.isEmpty() ? tickets.first().getTicketLevel() : MAX_TICKET_LEVEL + 1; ++ } ++ ++ public boolean addTicketAtLevel(final TicketType type, final ChunkPos chunkPos, final int level, ++ final T identifier) { ++ return this.addTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkPos), level, identifier); ++ } ++ ++ public boolean addTicketAtLevel(final TicketType type, final int chunkX, final int chunkZ, final int level, ++ final T identifier) { ++ return this.addTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkX, chunkZ), level, identifier); ++ } ++ ++ // supposed to return true if the ticket was added and did not replace another ++ // but, we always return false if the ticket cannot be added ++ public boolean addTicketAtLevel(final TicketType type, final long chunk, final int level, final T identifier) { ++ final long removeDelay = Math.max(0, type.timeout); ++ if (level > MAX_TICKET_LEVEL) { ++ return false; ++ } ++ ++ this.ticketLock.lock(); ++ try { ++ final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : this.currentTick + removeDelay; ++ final Ticket ticket = new Ticket<>(type, level, identifier, removeTick); ++ ++ final SortedArraySet> ticketsAtChunk = this.tickets.computeIfAbsent(chunk, (final long keyInMap) -> { ++ return SortedArraySet.create(4); ++ }); ++ ++ final int levelBefore = getTicketLevelAt(ticketsAtChunk); ++ final Ticket current = (Ticket)ticketsAtChunk.replace(ticket); ++ final int levelAfter = getTicketLevelAt(ticketsAtChunk); ++ ++ if (current != ticket) { ++ final long oldRemovalTick = current.removalTick; ++ if (removeTick != oldRemovalTick) { ++ if (oldRemovalTick != NO_TIMEOUT_MARKER) { ++ final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(oldRemovalTick); ++ final int prevCount = removeCounts.addTo(chunk, -1); ++ ++ if (prevCount == 1) { ++ removeCounts.remove(chunk); ++ if (removeCounts.isEmpty()) { ++ this.removeTickToChunkExpireTicketCount.remove(oldRemovalTick); ++ } ++ } ++ } ++ if (removeTick != NO_TIMEOUT_MARKER) { ++ this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> { ++ return new Long2IntOpenHashMap(); ++ }).addTo(chunk, 1); ++ } ++ } ++ } else { ++ if (removeTick != NO_TIMEOUT_MARKER) { ++ this.removeTickToChunkExpireTicketCount.computeIfAbsent(removeTick, (final long keyInMap) -> { ++ return new Long2IntOpenHashMap(); ++ }).addTo(chunk, 1); ++ } ++ } ++ ++ if (levelBefore != levelAfter) { ++ this.updateTicketLevel(chunk, levelAfter); ++ } ++ ++ return current == ticket; ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ ++ public boolean removeTicketAtLevel(final TicketType type, final ChunkPos chunkPos, final int level, final T identifier) { ++ return this.removeTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkPos), level, identifier); ++ } ++ ++ public boolean removeTicketAtLevel(final TicketType type, final int chunkX, final int chunkZ, final int level, final T identifier) { ++ return this.removeTicketAtLevel(type, CoordinateUtils.getChunkKey(chunkX, chunkZ), level, identifier); ++ } ++ ++ public boolean removeTicketAtLevel(final TicketType type, final long chunk, final int level, final T identifier) { ++ if (level > MAX_TICKET_LEVEL) { ++ return false; ++ } ++ ++ this.ticketLock.lock(); ++ try { ++ final SortedArraySet> ticketsAtChunk = this.tickets.get(chunk); ++ if (ticketsAtChunk == null) { ++ return false; ++ } ++ ++ final int oldLevel = getTicketLevelAt(ticketsAtChunk); ++ final Ticket ticket = (Ticket)ticketsAtChunk.removeAndGet(new Ticket<>(type, level, identifier, -2L)); ++ ++ if (ticket == null) { ++ return false; ++ } ++ ++ if (ticketsAtChunk.isEmpty()) { ++ this.tickets.remove(chunk); ++ } ++ ++ final int newLevel = getTicketLevelAt(ticketsAtChunk); ++ ++ final long removeTick = ticket.removalTick; ++ if (removeTick != NO_TIMEOUT_MARKER) { ++ final Long2IntOpenHashMap removeCounts = this.removeTickToChunkExpireTicketCount.get(removeTick); ++ final int currCount = removeCounts.addTo(chunk, -1); ++ ++ if (currCount == 1) { ++ removeCounts.remove(chunk); ++ if (removeCounts.isEmpty()) { ++ this.removeTickToChunkExpireTicketCount.remove(removeTick); ++ } ++ } ++ } ++ ++ if (oldLevel != newLevel) { ++ this.updateTicketLevel(chunk, newLevel); ++ } ++ ++ return true; ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ ++ // atomic with respect to all add/remove/addandremove ticket calls for the given chunk ++ public void addAndRemoveTickets(final long chunk, final TicketType addType, final int addLevel, final T addIdentifier, ++ final TicketType removeType, final int removeLevel, final V removeIdentifier) { ++ this.ticketLock.lock(); ++ try { ++ this.addTicketAtLevel(addType, chunk, addLevel, addIdentifier); ++ this.removeTicketAtLevel(removeType, chunk, removeLevel, removeIdentifier); ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ ++ public void removeAllTicketsFor(final TicketType ticketType, final int ticketLevel, final T ticketIdentifier) { ++ if (ticketLevel > MAX_TICKET_LEVEL) { ++ return; ++ } ++ ++ this.ticketLock.lock(); ++ try { ++ for (final LongIterator iterator = new LongArrayList(this.tickets.keySet()).longIterator(); iterator.hasNext();) { ++ final long chunk = iterator.nextLong(); ++ ++ this.removeTicketAtLevel(ticketType, chunk, ticketLevel, ticketIdentifier); ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ ++ public void tick() { ++ TickThread.ensureTickThread("Cannot tick ticket manager off-main"); ++ ++ this.ticketLock.lock(); ++ try { ++ final long tick = ++this.currentTick; ++ ++ final Long2IntOpenHashMap toRemove = this.removeTickToChunkExpireTicketCount.remove(tick); ++ ++ if (toRemove == null) { ++ return; ++ } ++ ++ final Predicate> expireNow = (final Ticket ticket) -> { ++ return ticket.removalTick == tick; ++ }; ++ ++ for (final LongIterator iterator = toRemove.keySet().longIterator(); iterator.hasNext();) { ++ final long chunk = iterator.nextLong(); ++ ++ final SortedArraySet> tickets = this.tickets.get(chunk); ++ tickets.removeIf(expireNow); ++ if (tickets.isEmpty()) { ++ this.tickets.remove(chunk); ++ this.ticketLevelPropagator.removeSource(chunk); ++ } else { ++ this.ticketLevelPropagator.setSource(chunk, convertBetweenTicketLevels(tickets.first().getTicketLevel())); ++ } ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ ++ this.processTicketUpdates(); ++ } ++ ++ public NewChunkHolder getChunkHolder(final int chunkX, final int chunkZ) { ++ return this.chunkHolders.get(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ } ++ ++ public NewChunkHolder getChunkHolder(final long position) { ++ return this.chunkHolders.get(position); ++ } ++ ++ public void raisePriority(final int x, final int z, final PrioritisedExecutor.Priority priority) { ++ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z); ++ if (chunkHolder != null) { ++ chunkHolder.raisePriority(priority); ++ } ++ } ++ ++ public void setPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) { ++ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z); ++ if (chunkHolder != null) { ++ chunkHolder.setPriority(priority); ++ } ++ } ++ ++ public void lowerPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) { ++ final NewChunkHolder chunkHolder = this.getChunkHolder(x, z); ++ if (chunkHolder != null) { ++ chunkHolder.lowerPriority(priority); ++ } ++ } ++ ++ private NewChunkHolder createChunkHolder(final long position) { ++ final NewChunkHolder ret = new NewChunkHolder(this.world, CoordinateUtils.getChunkX(position), CoordinateUtils.getChunkZ(position), this.taskScheduler); ++ ++ ChunkSystem.onChunkHolderCreate(this.world, ret.vanillaChunkHolder); ++ ret.vanillaChunkHolder.onChunkAdd(); ++ ++ return ret; ++ } ++ ++ // because this function creates the chunk holder without a ticket, it is the caller's responsibility to ensure ++ // the chunk holder eventually unloads. this should only be used to avoid using processTicketUpdates to create chunkholders, ++ // as processTicketUpdates may call plugin logic; in every other case a ticket is appropriate ++ private NewChunkHolder getOrCreateChunkHolder(final int chunkX, final int chunkZ) { ++ return this.getOrCreateChunkHolder(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ } ++ ++ private NewChunkHolder getOrCreateChunkHolder(final long position) { ++ if (!this.ticketLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Must hold ticket level update lock!"); ++ } ++ if (!this.taskScheduler.schedulingLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Must hold scheduler lock!!"); ++ } ++ ++ // we could just acquire these locks, but... ++ // must own the locks because the caller needs to ensure that no unload can occur AFTER this function returns ++ ++ NewChunkHolder current = this.chunkHolders.get(position); ++ if (current != null) { ++ return current; ++ } ++ ++ current = this.createChunkHolder(position); ++ this.chunkHolders.put(position, current); ++ ++ return current; ++ } ++ ++ private long entityLoadCounter; ++ ++ public ChunkEntitySlices getOrCreateEntityChunk(final int chunkX, final int chunkZ, final boolean transientChunk) { ++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot create entity chunk off-main"); ++ ChunkEntitySlices ret; ++ ++ NewChunkHolder current = this.getChunkHolder(chunkX, chunkZ); ++ if (current != null && (ret = current.getEntityChunk()) != null && (transientChunk || !ret.isTransient())) { ++ return ret; ++ } ++ ++ final AtomicBoolean isCompleted = new AtomicBoolean(); ++ final Thread waiter = Thread.currentThread(); ++ final Long entityLoadId; ++ NewChunkHolder.GenericDataLoadTaskCallback loadTask = null; ++ this.ticketLock.lock(); ++ try { ++ entityLoadId = Long.valueOf(this.entityLoadCounter++); ++ this.addTicketAtLevel(TicketType.ENTITY_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, entityLoadId); ++ this.taskScheduler.schedulingLock.lock(); ++ try { ++ current = this.getOrCreateChunkHolder(chunkX, chunkZ); ++ if ((ret = current.getEntityChunk()) != null && (transientChunk || !ret.isTransient())) { ++ this.removeTicketAtLevel(TicketType.ENTITY_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, entityLoadId); ++ return ret; ++ } ++ ++ if (current.isEntityChunkNBTLoaded()) { ++ isCompleted.setPlain(true); ++ } else { ++ loadTask = current.getOrLoadEntityData((final GenericDataLoadTask.TaskResult result) -> { ++ if (!transientChunk) { ++ isCompleted.set(true); ++ LockSupport.unpark(waiter); ++ } ++ }); ++ final ChunkLoadTask.EntityDataLoadTask entityLoad = current.getEntityDataLoadTask(); ++ ++ if (entityLoad != null && !transientChunk) { ++ entityLoad.raisePriority(PrioritisedExecutor.Priority.BLOCKING); ++ } ++ } ++ } finally { ++ this.taskScheduler.schedulingLock.unlock(); ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ ++ if (loadTask != null) { ++ loadTask.schedule(); ++ } ++ ++ if (!transientChunk) { ++ // Note: no need to busy wait on the chunk queue, entity load will complete off-main ++ boolean interrupted = false; ++ while (!isCompleted.get()) { ++ interrupted |= Thread.interrupted(); ++ LockSupport.park(); ++ } ++ ++ if (interrupted) { ++ Thread.currentThread().interrupt(); ++ } ++ } ++ ++ // now that the entity data is loaded, we can load it into the world ++ ++ ret = current.loadInEntityChunk(transientChunk); ++ ++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ); ++ this.addAndRemoveTickets(chunkKey, ++ TicketType.UNKNOWN, MAX_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ), ++ TicketType.ENTITY_LOAD, MAX_TICKET_LEVEL, entityLoadId ++ ); ++ ++ return ret; ++ } ++ ++ public PoiChunk getPoiChunkIfLoaded(final int chunkX, final int chunkZ, final boolean checkLoadInCallback) { ++ final NewChunkHolder holder = this.getChunkHolder(chunkX, chunkZ); ++ if (holder != null) { ++ final PoiChunk ret = holder.getPoiChunk(); ++ return ret == null || (checkLoadInCallback && !ret.isLoaded()) ? null : ret; ++ } ++ return null; ++ } ++ ++ private long poiLoadCounter; ++ ++ public PoiChunk loadPoiChunk(final int chunkX, final int chunkZ) { ++ TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Cannot create poi chunk off-main"); ++ PoiChunk ret; ++ ++ NewChunkHolder current = this.getChunkHolder(chunkX, chunkZ); ++ if (current != null && (ret = current.getPoiChunk()) != null) { ++ if (!ret.isLoaded()) { ++ ret.load(); ++ } ++ return ret; ++ } ++ ++ final AtomicReference completed = new AtomicReference<>(); ++ final AtomicBoolean isCompleted = new AtomicBoolean(); ++ final Thread waiter = Thread.currentThread(); ++ final Long poiLoadId; ++ NewChunkHolder.GenericDataLoadTaskCallback loadTask = null; ++ this.ticketLock.lock(); ++ try { ++ poiLoadId = Long.valueOf(this.poiLoadCounter++); ++ this.addTicketAtLevel(TicketType.POI_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, poiLoadId); ++ this.taskScheduler.schedulingLock.lock(); ++ try { ++ current = this.getOrCreateChunkHolder(chunkX, chunkZ); ++ if (current.isPoiChunkLoaded()) { ++ this.removeTicketAtLevel(TicketType.POI_LOAD, chunkX, chunkZ, MAX_TICKET_LEVEL, poiLoadId); ++ return current.getPoiChunk(); ++ } ++ ++ loadTask = current.getOrLoadPoiData((final GenericDataLoadTask.TaskResult result) -> { ++ completed.setPlain(result.left()); ++ isCompleted.set(true); ++ LockSupport.unpark(waiter); ++ }); ++ final ChunkLoadTask.PoiDataLoadTask poiLoad = current.getPoiDataLoadTask(); ++ ++ if (poiLoad != null) { ++ poiLoad.raisePriority(PrioritisedExecutor.Priority.BLOCKING); ++ } ++ } finally { ++ this.taskScheduler.schedulingLock.unlock(); ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ ++ if (loadTask != null) { ++ loadTask.schedule(); ++ } ++ ++ // Note: no need to busy wait on the chunk queue, poi load will complete off-main ++ ++ boolean interrupted = false; ++ while (!isCompleted.get()) { ++ interrupted |= Thread.interrupted(); ++ LockSupport.park(); ++ } ++ ++ if (interrupted) { ++ Thread.currentThread().interrupt(); ++ } ++ ++ ret = completed.getPlain(); ++ ++ ret.load(); ++ ++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ); ++ this.addAndRemoveTickets(chunkKey, ++ TicketType.UNKNOWN, MAX_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ), ++ TicketType.POI_LOAD, MAX_TICKET_LEVEL, poiLoadId ++ ); ++ ++ return ret; ++ } ++ ++ void addChangedStatuses(final List changedFullStatus) { ++ if (changedFullStatus.isEmpty()) { ++ return; ++ } ++ if (!TickThread.isTickThread()) { ++ this.taskScheduler.scheduleChunkTask(() -> { ++ final ArrayDeque pendingFullLoadUpdate = ChunkHolderManager.this.pendingFullLoadUpdate; ++ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) { ++ pendingFullLoadUpdate.add(changedFullStatus.get(i)); ++ } ++ ++ ChunkHolderManager.this.processPendingFullUpdate(); ++ }, PrioritisedExecutor.Priority.HIGHEST); ++ } else { ++ final ArrayDeque pendingFullLoadUpdate = this.pendingFullLoadUpdate; ++ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) { ++ pendingFullLoadUpdate.add(changedFullStatus.get(i)); ++ } ++ } ++ } ++ ++ final ReferenceLinkedOpenHashSet unloadQueue = new ReferenceLinkedOpenHashSet<>(); ++ ++ private void removeChunkHolder(final NewChunkHolder holder) { ++ holder.killed = true; ++ holder.vanillaChunkHolder.onChunkRemove(); ++ this.autoSaveQueue.remove(holder); ++ ChunkSystem.onChunkHolderDelete(this.world, holder.vanillaChunkHolder); ++ this.chunkHolders.remove(CoordinateUtils.getChunkKey(holder.chunkX, holder.chunkZ)); ++ } ++ ++ // note: never call while inside the chunk system, this will absolutely break everything ++ public void processUnloads() { ++ TickThread.ensureTickThread("Cannot unload chunks off-main"); ++ ++ if (BLOCK_TICKET_UPDATES.get() == Boolean.TRUE) { ++ throw new IllegalStateException("Cannot unload chunks recursively"); ++ } ++ if (this.ticketLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Cannot hold ticket update lock while calling processUnloads"); ++ } ++ if (this.taskScheduler.schedulingLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Cannot hold scheduling lock while calling processUnloads"); ++ } ++ ++ final List unloadQueue; ++ final List scheduleList = new ArrayList<>(); ++ this.ticketLock.lock(); ++ try { ++ this.taskScheduler.schedulingLock.lock(); ++ try { ++ if (this.unloadQueue.isEmpty()) { ++ return; ++ } ++ // in order to ensure all chunks in the unload queue do not have a pending ticket level update, ++ // process them now ++ this.processTicketUpdates(false, false, scheduleList); ++ unloadQueue = new ArrayList<>((int)(this.unloadQueue.size() * 0.05) + 1); ++ ++ final int unloadCount = Math.max(50, (int)(this.unloadQueue.size() * 0.05)); ++ for (int i = 0; i < unloadCount && !this.unloadQueue.isEmpty(); ++i) { ++ final NewChunkHolder chunkHolder = this.unloadQueue.removeFirst(); ++ if (chunkHolder.isSafeToUnload() != null) { ++ LOGGER.error("Chunkholder " + chunkHolder + " is not safe to unload but is inside the unload queue?"); ++ continue; ++ } ++ final NewChunkHolder.UnloadState state = chunkHolder.unloadStage1(); ++ if (state == null) { ++ // can unload immediately ++ this.removeChunkHolder(chunkHolder); ++ continue; ++ } ++ unloadQueue.add(state); ++ } ++ } finally { ++ this.taskScheduler.schedulingLock.unlock(); ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ // schedule tasks, we can't let processTicketUpdates do this because we call it holding the schedule lock ++ for (int i = 0, len = scheduleList.size(); i < len; ++i) { ++ scheduleList.get(i).schedule(); ++ } ++ ++ final List toRemove = new ArrayList<>(unloadQueue.size()); ++ ++ final Boolean before = this.blockTicketUpdates(); ++ try { ++ for (int i = 0, len = unloadQueue.size(); i < len; ++i) { ++ final NewChunkHolder.UnloadState state = unloadQueue.get(i); ++ final NewChunkHolder holder = state.holder(); ++ ++ holder.unloadStage2(state); ++ toRemove.add(holder); ++ } ++ } finally { ++ this.unblockTicketUpdates(before); ++ } ++ ++ this.ticketLock.lock(); ++ try { ++ this.taskScheduler.schedulingLock.lock(); ++ try { ++ for (int i = 0, len = toRemove.size(); i < len; ++i) { ++ final NewChunkHolder holder = toRemove.get(i); ++ ++ if (holder.unloadStage3()) { ++ this.removeChunkHolder(holder); ++ } else { ++ // add cooldown so the next unload check is not immediately next tick ++ this.addTicketAtLevel(TicketType.UNLOAD_COOLDOWN, holder.chunkX, holder.chunkZ, MAX_TICKET_LEVEL, Unit.INSTANCE); ++ } ++ } ++ } finally { ++ this.taskScheduler.schedulingLock.unlock(); ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ ++ private final ThreadLocal BLOCK_TICKET_UPDATES = ThreadLocal.withInitial(() -> { ++ return Boolean.FALSE; ++ }); ++ ++ public Boolean blockTicketUpdates() { ++ final Boolean ret = BLOCK_TICKET_UPDATES.get(); ++ BLOCK_TICKET_UPDATES.set(Boolean.TRUE); ++ return ret; ++ } ++ ++ public void unblockTicketUpdates(final Boolean before) { ++ BLOCK_TICKET_UPDATES.set(before); ++ } ++ ++ public boolean processTicketUpdates() { ++ return this.processTicketUpdates(true, true, null); ++ } ++ ++ private static final ThreadLocal> CURRENT_TICKET_UPDATE_SCHEDULING = new ThreadLocal<>(); ++ ++ static List getCurrentTicketUpdateScheduling() { ++ return CURRENT_TICKET_UPDATE_SCHEDULING.get(); ++ } ++ ++ private boolean processTicketUpdates(final boolean checkLocks, final boolean processFullUpdates, List scheduledTasks) { ++ TickThread.ensureTickThread("Cannot process ticket levels off-main"); ++ if (BLOCK_TICKET_UPDATES.get() == Boolean.TRUE) { ++ throw new IllegalStateException("Cannot update ticket level while unloading chunks or updating entity manager"); ++ } ++ if (checkLocks && this.ticketLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Illegal recursive processTicketUpdates!"); ++ } ++ if (checkLocks && this.taskScheduler.schedulingLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Cannot update ticket levels from a scheduler context!"); ++ } ++ ++ List changedFullStatus = null; ++ ++ final boolean isTickThread = TickThread.isTickThread(); ++ ++ boolean ret = false; ++ final boolean canProcessFullUpdates = processFullUpdates & isTickThread; ++ final boolean canProcessScheduling = scheduledTasks == null; ++ ++ this.ticketLock.lock(); ++ try { ++ final boolean levelsUpdated = this.ticketLevelPropagator.propagateUpdates(); ++ if (levelsUpdated) { ++ // Unlike CB, ticket level updates cannot happen recursively. Thank god. ++ if (!this.ticketLevelUpdates.isEmpty()) { ++ ret = true; ++ ++ // first the necessary chunkholders must be created, so just update the ticket levels ++ for (final Iterator iterator = this.ticketLevelUpdates.long2IntEntrySet().fastIterator(); iterator.hasNext();) { ++ final Long2IntMap.Entry entry = iterator.next(); ++ final long key = entry.getLongKey(); ++ final int newLevel = entry.getIntValue(); ++ ++ NewChunkHolder current = this.chunkHolders.get(key); ++ if (current == null && newLevel > MAX_TICKET_LEVEL) { ++ // not loaded and it shouldn't be loaded! ++ iterator.remove(); ++ continue; ++ } ++ ++ final int currentLevel = current == null ? MAX_TICKET_LEVEL + 1 : current.getCurrentTicketLevel(); ++ if (currentLevel == newLevel) { ++ // nothing to do ++ iterator.remove(); ++ continue; ++ } ++ ++ if (current == null) { ++ // must create ++ current = this.createChunkHolder(key); ++ this.chunkHolders.put(key, current); ++ current.updateTicketLevel(newLevel); ++ } else { ++ current.updateTicketLevel(newLevel); ++ } ++ } ++ ++ if (scheduledTasks == null) { ++ scheduledTasks = new ArrayList<>(); ++ } ++ changedFullStatus = new ArrayList<>(); ++ ++ // allow the chunkholders to process ticket level updates without needing to acquire the schedule lock every time ++ final List prev = CURRENT_TICKET_UPDATE_SCHEDULING.get(); ++ CURRENT_TICKET_UPDATE_SCHEDULING.set(scheduledTasks); ++ try { ++ this.taskScheduler.schedulingLock.lock(); ++ try { ++ for (final Iterator iterator = this.ticketLevelUpdates.long2IntEntrySet().fastIterator(); iterator.hasNext();) { ++ final Long2IntMap.Entry entry = iterator.next(); ++ final long key = entry.getLongKey(); ++ final NewChunkHolder current = this.chunkHolders.get(key); ++ ++ if (current == null) { ++ throw new IllegalStateException("Expected chunk holder to be created"); ++ } ++ ++ current.processTicketLevelUpdate(scheduledTasks, changedFullStatus); ++ } ++ } finally { ++ this.taskScheduler.schedulingLock.unlock(); ++ } ++ } finally { ++ CURRENT_TICKET_UPDATE_SCHEDULING.set(prev); ++ } ++ ++ this.ticketLevelUpdates.clear(); ++ } ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ ++ if (changedFullStatus != null) { ++ this.addChangedStatuses(changedFullStatus); ++ } ++ ++ if (canProcessScheduling && scheduledTasks != null) { ++ for (int i = 0, len = scheduledTasks.size(); i < len; ++i) { ++ scheduledTasks.get(i).schedule(); ++ } ++ } ++ ++ if (canProcessFullUpdates) { ++ ret |= this.processPendingFullUpdate(); ++ } ++ ++ return ret; ++ } ++ ++ // only call on tick thread ++ protected final boolean processPendingFullUpdate() { ++ final ArrayDeque pendingFullLoadUpdate = this.pendingFullLoadUpdate; ++ ++ boolean ret = false; ++ ++ List changedFullStatus = new ArrayList<>(); ++ ++ NewChunkHolder holder; ++ while ((holder = pendingFullLoadUpdate.poll()) != null) { ++ ret |= holder.handleFullStatusChange(changedFullStatus); ++ ++ if (!changedFullStatus.isEmpty()) { ++ for (int i = 0, len = changedFullStatus.size(); i < len; ++i) { ++ pendingFullLoadUpdate.add(changedFullStatus.get(i)); ++ } ++ changedFullStatus.clear(); ++ } ++ } ++ ++ return ret; ++ } ++ ++ public JsonObject getDebugJsonForWatchdog() { ++ // try and detect any potential deadlock that would require us to read unlocked ++ try { ++ if (this.ticketLock.tryLock(10, TimeUnit.SECONDS)) { ++ try { ++ if (this.taskScheduler.schedulingLock.tryLock(10, TimeUnit.SECONDS)) { ++ try { ++ return this.getDebugJsonNoLock(); ++ } finally { ++ this.taskScheduler.schedulingLock.unlock(); ++ } ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ } catch (final InterruptedException ignore) {} ++ ++ LOGGER.error("Failed to acquire ticket and scheduling lock before timeout for world " + this.world.getWorld().getName()); ++ ++ // because we read without locks, it may throw exceptions for fastutil maps ++ // so just try until it works... ++ Throwable lastException = null; ++ for (int count = 0;count < 1000;++count) { ++ try { ++ return this.getDebugJsonNoLock(); ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr) { ++ lastException = thr; ++ Thread.yield(); ++ LockSupport.parkNanos(10_000L); ++ } ++ } ++ ++ // failed, return ++ LOGGER.error("Failed to retrieve debug json for watchdog thread without locking", lastException); ++ return null; ++ } ++ ++ private JsonObject getDebugJsonNoLock() { ++ final JsonObject ret = new JsonObject(); ++ ret.addProperty("current_tick", Long.valueOf(this.currentTick)); ++ ++ final JsonArray unloadQueue = new JsonArray(); ++ ret.add("unload_queue", unloadQueue); ++ for (final NewChunkHolder holder : this.unloadQueue) { ++ final JsonObject coordinate = new JsonObject(); ++ unloadQueue.add(coordinate); ++ ++ coordinate.addProperty("chunkX", Integer.valueOf(holder.chunkX)); ++ coordinate.addProperty("chunkZ", Integer.valueOf(holder.chunkZ)); ++ } ++ ++ final JsonArray holders = new JsonArray(); ++ ret.add("chunkholders", holders); ++ ++ for (final NewChunkHolder holder : this.getChunkHolders()) { ++ holders.add(holder.getDebugJson()); ++ } ++ ++ final JsonArray removeTickToChunkExpireTicketCount = new JsonArray(); ++ ret.add("remove_tick_to_chunk_expire_ticket_count", removeTickToChunkExpireTicketCount); ++ ++ for (final Long2ObjectMap.Entry tickEntry : this.removeTickToChunkExpireTicketCount.long2ObjectEntrySet()) { ++ final long tick = tickEntry.getLongKey(); ++ final Long2IntOpenHashMap coordinateToCount = tickEntry.getValue(); ++ ++ final JsonObject tickJson = new JsonObject(); ++ removeTickToChunkExpireTicketCount.add(tickJson); ++ ++ tickJson.addProperty("tick", Long.valueOf(tick)); ++ ++ final JsonArray tickEntries = new JsonArray(); ++ tickJson.add("entries", tickEntries); ++ ++ for (final Long2IntMap.Entry entry : coordinateToCount.long2IntEntrySet()) { ++ final long coordinate = entry.getLongKey(); ++ final int count = entry.getIntValue(); ++ ++ final JsonObject entryJson = new JsonObject(); ++ tickEntries.add(entryJson); ++ ++ entryJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate))); ++ entryJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate))); ++ entryJson.addProperty("count", Integer.valueOf(count)); ++ } ++ } ++ ++ final JsonArray allTicketsJson = new JsonArray(); ++ ret.add("tickets", allTicketsJson); ++ ++ for (final Long2ObjectMap.Entry>> coordinateTickets : this.tickets.long2ObjectEntrySet()) { ++ final long coordinate = coordinateTickets.getLongKey(); ++ final SortedArraySet> tickets = coordinateTickets.getValue(); ++ ++ final JsonObject coordinateJson = new JsonObject(); ++ allTicketsJson.add(coordinateJson); ++ ++ coordinateJson.addProperty("chunkX", Long.valueOf(CoordinateUtils.getChunkX(coordinate))); ++ coordinateJson.addProperty("chunkZ", Long.valueOf(CoordinateUtils.getChunkZ(coordinate))); ++ ++ final JsonArray ticketsSerialized = new JsonArray(); ++ coordinateJson.add("tickets", ticketsSerialized); ++ ++ for (final Ticket ticket : tickets) { ++ final JsonObject ticketSerialized = new JsonObject(); ++ ticketsSerialized.add(ticketSerialized); ++ ++ ticketSerialized.addProperty("type", ticket.getType().toString()); ++ ticketSerialized.addProperty("level", Integer.valueOf(ticket.getTicketLevel())); ++ ticketSerialized.addProperty("identifier", Objects.toString(ticket.key)); ++ ticketSerialized.addProperty("remove_tick", Long.valueOf(ticket.removalTick)); ++ } ++ } ++ ++ return ret; ++ } ++ ++ public JsonObject getDebugJson() { ++ final List scheduleList = new ArrayList<>(); ++ try { ++ final JsonObject ret; ++ this.ticketLock.lock(); ++ try { ++ this.taskScheduler.schedulingLock.lock(); ++ try { ++ this.processTicketUpdates(false, false, scheduleList); ++ ret = this.getDebugJsonNoLock(); ++ } finally { ++ this.taskScheduler.schedulingLock.unlock(); ++ } ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ return ret; ++ } finally { ++ // schedule tasks, we can't let processTicketUpdates do this because we call it holding the schedule lock ++ for (int i = 0, len = scheduleList.size(); i < len; ++i) { ++ scheduleList.get(i).schedule(); ++ } ++ } ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java +new file mode 100644 +index 0000000000000000000000000000000000000000..53ddd7e9ac05e6a9eb809f329796e6d4f6bb2ab1 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLightTask.java +@@ -0,0 +1,181 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.starlight.common.light.StarLightEngine; ++import ca.spottedleaf.starlight.common.light.StarLightInterface; ++import io.papermc.paper.chunk.system.light.LightQueue; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; ++import net.minecraft.world.level.chunk.ProtoChunk; ++import org.apache.logging.log4j.LogManager; ++import org.apache.logging.log4j.Logger; ++import java.util.function.BooleanSupplier; ++ ++public final class ChunkLightTask extends ChunkProgressionTask { ++ ++ private static final Logger LOGGER = LogManager.getLogger(); ++ ++ protected final ChunkAccess fromChunk; ++ ++ private final LightTaskPriorityHolder priorityHolder; ++ ++ public ChunkLightTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ, ++ final ChunkAccess chunk, final PrioritisedExecutor.Priority priority) { ++ super(scheduler, world, chunkX, chunkZ); ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.priorityHolder = new LightTaskPriorityHolder(priority, this); ++ this.fromChunk = chunk; ++ } ++ ++ @Override ++ public boolean isScheduled() { ++ return this.priorityHolder.isScheduled(); ++ } ++ ++ @Override ++ public ChunkStatus getTargetStatus() { ++ return ChunkStatus.LIGHT; ++ } ++ ++ @Override ++ public void schedule() { ++ this.priorityHolder.schedule(); ++ } ++ ++ @Override ++ public void cancel() { ++ this.priorityHolder.cancel(); ++ } ++ ++ @Override ++ public PrioritisedExecutor.Priority getPriority() { ++ return this.priorityHolder.getPriority(); ++ } ++ ++ @Override ++ public void lowerPriority(final PrioritisedExecutor.Priority priority) { ++ this.priorityHolder.raisePriority(priority); ++ } ++ ++ @Override ++ public void setPriority(final PrioritisedExecutor.Priority priority) { ++ this.priorityHolder.setPriority(priority); ++ } ++ ++ @Override ++ public void raisePriority(final PrioritisedExecutor.Priority priority) { ++ this.priorityHolder.raisePriority(priority); ++ } ++ ++ private static final class LightTaskPriorityHolder extends PriorityHolder { ++ ++ protected final ChunkLightTask task; ++ ++ protected LightTaskPriorityHolder(final PrioritisedExecutor.Priority priority, final ChunkLightTask task) { ++ super(priority); ++ this.task = task; ++ } ++ ++ @Override ++ protected void cancelScheduled() { ++ final ChunkLightTask task = this.task; ++ task.complete(null, null); ++ } ++ ++ @Override ++ protected PrioritisedExecutor.Priority getScheduledPriority() { ++ final ChunkLightTask task = this.task; ++ return task.world.getChunkSource().getLightEngine().theLightEngine.lightQueue.getPriority(task.chunkX, task.chunkZ); ++ } ++ ++ @Override ++ protected void scheduleTask(final PrioritisedExecutor.Priority priority) { ++ final ChunkLightTask task = this.task; ++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine; ++ final LightQueue lightQueue = starLightInterface.lightQueue; ++ lightQueue.queueChunkLightTask(new ChunkPos(task.chunkX, task.chunkZ), new LightTask(starLightInterface, task), priority); ++ lightQueue.setPriority(task.chunkX, task.chunkZ, priority); ++ } ++ ++ @Override ++ protected void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority) { ++ final ChunkLightTask task = this.task; ++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine; ++ final LightQueue lightQueue = starLightInterface.lightQueue; ++ lightQueue.lowerPriority(task.chunkX, task.chunkZ, priority); ++ } ++ ++ @Override ++ protected void setPriorityScheduled(final PrioritisedExecutor.Priority priority) { ++ final ChunkLightTask task = this.task; ++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine; ++ final LightQueue lightQueue = starLightInterface.lightQueue; ++ lightQueue.setPriority(task.chunkX, task.chunkZ, priority); ++ } ++ ++ @Override ++ protected void raisePriorityScheduled(final PrioritisedExecutor.Priority priority) { ++ final ChunkLightTask task = this.task; ++ final StarLightInterface starLightInterface = task.world.getChunkSource().getLightEngine().theLightEngine; ++ final LightQueue lightQueue = starLightInterface.lightQueue; ++ lightQueue.raisePriority(task.chunkX, task.chunkZ, priority); ++ } ++ } ++ ++ private static final class LightTask implements BooleanSupplier { ++ ++ protected final StarLightInterface lightEngine; ++ protected final ChunkLightTask task; ++ ++ public LightTask(final StarLightInterface lightEngine, final ChunkLightTask task) { ++ this.lightEngine = lightEngine; ++ this.task = task; ++ } ++ ++ @Override ++ public boolean getAsBoolean() { ++ final ChunkLightTask task = this.task; ++ // executed on light thread ++ if (!task.priorityHolder.markExecuting()) { ++ // cancelled ++ return false; ++ } ++ ++ try { ++ final Boolean[] emptySections = StarLightEngine.getEmptySectionsForChunk(task.fromChunk); ++ ++ if (task.fromChunk.isLightCorrect() && task.fromChunk.getStatus().isOrAfter(ChunkStatus.LIGHT)) { ++ this.lightEngine.forceLoadInChunk(task.fromChunk, emptySections); ++ this.lightEngine.checkChunkEdges(task.chunkX, task.chunkZ); ++ } else { ++ task.fromChunk.setLightCorrect(false); ++ this.lightEngine.lightChunk(task.fromChunk, emptySections); ++ task.fromChunk.setLightCorrect(true); ++ } ++ // we need to advance status ++ if (task.fromChunk instanceof ProtoChunk chunk && chunk.getStatus() == ChunkStatus.LIGHT.getParent()) { ++ chunk.setStatus(ChunkStatus.LIGHT); ++ } ++ } catch (final Throwable thr) { ++ if (!(thr instanceof ThreadDeath)) { ++ LOGGER.fatal("Failed to light chunk " + task.fromChunk.getPos().toString() + " in world '" + this.lightEngine.getWorld().getWorld().getName() + "'", thr); ++ } ++ ++ task.complete(null, thr); ++ ++ if (thr instanceof ThreadDeath) { ++ throw (ThreadDeath)thr; ++ } ++ ++ return true; ++ } ++ ++ task.complete(task.fromChunk, null); ++ return true; ++ } ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java +new file mode 100644 +index 0000000000000000000000000000000000000000..be6f3f6a57668a9bd50d0ea5f2dd2335355b69d6 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java +@@ -0,0 +1,499 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import ca.spottedleaf.dataconverter.minecraft.MCDataConverter; ++import ca.spottedleaf.dataconverter.minecraft.datatypes.MCTypeRegistry; ++import com.mojang.logging.LogUtils; ++import io.papermc.paper.chunk.system.io.RegionFileIOThread; ++import io.papermc.paper.chunk.system.poi.PoiChunk; ++import net.minecraft.SharedConstants; ++import net.minecraft.core.registries.Registries; ++import net.minecraft.nbt.CompoundTag; ++import net.minecraft.server.level.ChunkMap; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; ++import net.minecraft.world.level.chunk.ProtoChunk; ++import net.minecraft.world.level.chunk.UpgradeData; ++import net.minecraft.world.level.chunk.storage.ChunkSerializer; ++import net.minecraft.world.level.chunk.storage.EntityStorage; ++import net.minecraft.world.level.levelgen.blending.BlendingData; ++import org.slf4j.Logger; ++import java.lang.invoke.VarHandle; ++import java.util.Map; ++import java.util.concurrent.atomic.AtomicInteger; ++import java.util.function.Consumer; ++ ++public final class ChunkLoadTask extends ChunkProgressionTask { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ private final NewChunkHolder chunkHolder; ++ private final ChunkDataLoadTask loadTask; ++ ++ private boolean cancelled; ++ private NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask; ++ private NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask; ++ ++ protected ChunkLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ, ++ final NewChunkHolder chunkHolder, final PrioritisedExecutor.Priority priority) { ++ super(scheduler, world, chunkX, chunkZ); ++ this.chunkHolder = chunkHolder; ++ this.loadTask = new ChunkDataLoadTask(scheduler, world, chunkX, chunkZ, priority); ++ this.loadTask.addCallback((final GenericDataLoadTask.TaskResult result) -> { ++ ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right()); ++ }); ++ } ++ ++ @Override ++ public ChunkStatus getTargetStatus() { ++ return ChunkStatus.EMPTY; ++ } ++ ++ private boolean scheduled; ++ ++ @Override ++ public boolean isScheduled() { ++ return this.scheduled; ++ } ++ ++ @Override ++ public void schedule() { ++ final NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask; ++ final NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask; ++ ++ final AtomicInteger count = new AtomicInteger(); ++ final Consumer> scheduleLoadTask = (final GenericDataLoadTask.TaskResult result) -> { ++ if (count.decrementAndGet() == 0) { ++ ChunkLoadTask.this.loadTask.schedule(false); ++ } ++ }; ++ ++ // NOTE: it is IMPOSSIBLE for getOrLoadEntityData/getOrLoadPoiData to complete synchronously, because ++ // they must schedule a task to off main or to on main to complete ++ this.scheduler.schedulingLock.lock(); ++ try { ++ if (this.scheduled) { ++ throw new IllegalStateException("schedule() called twice"); ++ } ++ this.scheduled = true; ++ if (this.cancelled) { ++ return; ++ } ++ if (!this.chunkHolder.isEntityChunkNBTLoaded()) { ++ entityLoadTask = this.chunkHolder.getOrLoadEntityData((Consumer)scheduleLoadTask); ++ count.setPlain(count.getPlain() + 1); ++ } else { ++ entityLoadTask = null; ++ } ++ ++ if (!this.chunkHolder.isPoiChunkLoaded()) { ++ poiLoadTask = this.chunkHolder.getOrLoadPoiData((Consumer)scheduleLoadTask); ++ count.setPlain(count.getPlain() + 1); ++ } else { ++ poiLoadTask = null; ++ } ++ ++ this.entityLoadTask = entityLoadTask; ++ this.poiLoadTask = poiLoadTask; ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ ++ if (entityLoadTask != null) { ++ entityLoadTask.schedule(); ++ } ++ ++ if (poiLoadTask != null) { ++ poiLoadTask.schedule(); ++ } ++ ++ if (entityLoadTask == null && poiLoadTask == null) { ++ // no need to wait on those, we can schedule now ++ this.loadTask.schedule(false); ++ } ++ } ++ ++ @Override ++ public void cancel() { ++ // must be before load task access, so we can synchronise with the writes to the fields ++ this.scheduler.schedulingLock.lock(); ++ try { ++ this.cancelled = true; ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ ++ /* ++ Note: The entityLoadTask/poiLoadTask do not complete when cancelled, ++ but this is fine because if they are successfully cancelled then ++ we will successfully cancel the load task, which will complete when cancelled ++ */ ++ ++ if (this.entityLoadTask != null) { ++ this.entityLoadTask.cancel(); ++ } ++ if (this.poiLoadTask != null) { ++ this.poiLoadTask.cancel(); ++ } ++ this.loadTask.cancel(); ++ } ++ ++ @Override ++ public PrioritisedExecutor.Priority getPriority() { ++ return this.loadTask.getPriority(); ++ } ++ ++ @Override ++ public void lowerPriority(final PrioritisedExecutor.Priority priority) { ++ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask(); ++ if (entityLoad != null) { ++ entityLoad.lowerPriority(priority); ++ } ++ ++ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask(); ++ ++ if (poiLoad != null) { ++ poiLoad.lowerPriority(priority); ++ } ++ ++ this.loadTask.lowerPriority(priority); ++ } ++ ++ @Override ++ public void setPriority(final PrioritisedExecutor.Priority priority) { ++ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask(); ++ if (entityLoad != null) { ++ entityLoad.setPriority(priority); ++ } ++ ++ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask(); ++ ++ if (poiLoad != null) { ++ poiLoad.setPriority(priority); ++ } ++ ++ this.loadTask.setPriority(priority); ++ } ++ ++ @Override ++ public void raisePriority(final PrioritisedExecutor.Priority priority) { ++ final EntityDataLoadTask entityLoad = this.chunkHolder.getEntityDataLoadTask(); ++ if (entityLoad != null) { ++ entityLoad.raisePriority(priority); ++ } ++ ++ final PoiDataLoadTask poiLoad = this.chunkHolder.getPoiDataLoadTask(); ++ ++ if (poiLoad != null) { ++ poiLoad.raisePriority(priority); ++ } ++ ++ this.loadTask.raisePriority(priority); ++ } ++ ++ protected static abstract class CallbackDataLoadTask extends GenericDataLoadTask { ++ ++ private TaskResult result; ++ private final MultiThreadedQueue>> waiters = new MultiThreadedQueue<>(); ++ ++ protected volatile boolean completed; ++ protected static final VarHandle COMPLETED_HANDLE = ConcurrentUtil.getVarHandle(CallbackDataLoadTask.class, "completed", boolean.class); ++ ++ protected CallbackDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, ++ final int chunkZ, final RegionFileIOThread.RegionFileType type, ++ final PrioritisedExecutor.Priority priority) { ++ super(scheduler, world, chunkX, chunkZ, type, priority); ++ } ++ ++ public void addCallback(final Consumer> consumer) { ++ if (!this.waiters.add(consumer)) { ++ try { ++ consumer.accept(this.result); ++ } catch (final Throwable throwable) { ++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of( ++ "Consumer", ChunkTaskScheduler.stringIfNull(consumer), ++ "Completed throwable", ChunkTaskScheduler.stringIfNull(this.result.right()) ++ ), throwable); ++ if (throwable instanceof ThreadDeath) { ++ throw (ThreadDeath)throwable; ++ } ++ } ++ } ++ } ++ ++ @Override ++ protected void onComplete(final TaskResult result) { ++ if ((boolean)COMPLETED_HANDLE.getAndSet((CallbackDataLoadTask)this, (boolean)true)) { ++ throw new IllegalStateException("Already completed"); ++ } ++ this.result = result; ++ Consumer> consumer; ++ while ((consumer = this.waiters.pollOrBlockAdds()) != null) { ++ try { ++ consumer.accept(result); ++ } catch (final Throwable throwable) { ++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of( ++ "Consumer", ChunkTaskScheduler.stringIfNull(consumer), ++ "Completed throwable", ChunkTaskScheduler.stringIfNull(result.right()) ++ ), throwable); ++ if (throwable instanceof ThreadDeath) { ++ throw (ThreadDeath)throwable; ++ } ++ return; ++ } ++ } ++ } ++ } ++ ++ public final class ChunkDataLoadTask extends CallbackDataLoadTask { ++ protected ChunkDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, ++ final int chunkZ, final PrioritisedExecutor.Priority priority) { ++ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.CHUNK_DATA, priority); ++ } ++ ++ @Override ++ protected boolean hasOffMain() { ++ return true; ++ } ++ ++ @Override ++ protected boolean hasOnMain() { ++ return true; ++ } ++ ++ @Override ++ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) { ++ return this.scheduler.loadExecutor.createTask(run, priority); ++ } ++ ++ @Override ++ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) { ++ return this.scheduler.createChunkTask(this.chunkX, this.chunkZ, run, priority); ++ } ++ ++ @Override ++ protected TaskResult completeOnMainOffMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) { ++ if (data != null) { ++ return null; ++ } ++ ++ final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk(); ++ if (poiChunk == null) { ++ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString()); ++ } else if (!poiChunk.isLoaded()) { ++ // need to call poiChunk.load() on main ++ return null; ++ } ++ ++ return new TaskResult<>(this.getEmptyChunk(), null); ++ } ++ ++ @Override ++ protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) { ++ if (throwable != null) { ++ LOGGER.error("Failed to load chunk data for task: " + this.toString() + ", chunk data will be lost", throwable); ++ return new TaskResult<>(null, null); ++ } ++ ++ if (data == null) { ++ return new TaskResult<>(null, null); ++ } ++ ++ // need to convert data, and then deserialize it ++ ++ try { ++ final ChunkPos chunkPos = new ChunkPos(this.chunkX, this.chunkZ); ++ final ChunkMap chunkMap = this.world.getChunkSource().chunkMap; ++ // run converters ++ // note: upgradeChunkTag copies the data already ++ final CompoundTag converted = chunkMap.upgradeChunkTag( ++ this.world.getTypeKey(), chunkMap.overworldDataStorage, data, chunkMap.generator.getTypeNameForDataFixer(), ++ chunkPos, this.world ++ ); ++ // deserialize ++ final ChunkSerializer.InProgressChunkHolder chunkHolder = ChunkSerializer.loadChunk( ++ this.world, chunkMap.getPoiManager(), chunkPos, converted, true ++ ); ++ ++ return new TaskResult<>(chunkHolder, null); ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr2) { ++ LOGGER.error("Failed to parse chunk data for task: " + this.toString() + ", chunk data will be lost", thr2); ++ return new TaskResult<>(null, thr2); ++ } ++ } ++ ++ private ProtoChunk getEmptyChunk() { ++ return new ProtoChunk( ++ new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world, ++ this.world.registryAccess().registryOrThrow(Registries.BIOME), (BlendingData)null ++ ); ++ } ++ ++ @Override ++ protected TaskResult runOnMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) { ++ final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk(); ++ if (poiChunk == null) { ++ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString()); ++ } else { ++ poiChunk.load(); ++ } ++ ++ if (data == null || data.protoChunk == null) { ++ // throwable could be non-null, but the off-main task will print its exceptions - so we don't need to care, ++ // it's handled already ++ ++ return new TaskResult<>(this.getEmptyChunk(), null); ++ } ++ ++ // have tasks to run (at this point, it's just the POI consistency checking) ++ try { ++ if (data.tasks != null) { ++ for (int i = 0, len = data.tasks.size(); i < len; ++i) { ++ data.tasks.poll().run(); ++ } ++ } ++ ++ return new TaskResult<>(data.protoChunk, null); ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr2) { ++ LOGGER.error("Failed to parse main tasks for task " + this.toString() + ", chunk data will be lost", thr2); ++ return new TaskResult<>(this.getEmptyChunk(), null); ++ } ++ } ++ } ++ ++ public static final class PoiDataLoadTask extends CallbackDataLoadTask { ++ public PoiDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, ++ final int chunkZ, final PrioritisedExecutor.Priority priority) { ++ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.POI_DATA, priority); ++ } ++ ++ @Override ++ protected boolean hasOffMain() { ++ return true; ++ } ++ ++ @Override ++ protected boolean hasOnMain() { ++ return false; ++ } ++ ++ @Override ++ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) { ++ return this.scheduler.loadExecutor.createTask(run, priority); ++ } ++ ++ @Override ++ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) { ++ throw new UnsupportedOperationException(); ++ } ++ ++ @Override ++ protected TaskResult completeOnMainOffMain(final PoiChunk data, final Throwable throwable) { ++ throw new UnsupportedOperationException(); ++ } ++ ++ @Override ++ protected TaskResult runOffMain(CompoundTag data, final Throwable throwable) { ++ if (throwable != null) { ++ LOGGER.error("Failed to load poi data for task: " + this.toString() + ", poi data will be lost", throwable); ++ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null); ++ } ++ ++ if (data == null || data.isEmpty()) { ++ // nothing to do ++ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null); ++ } ++ ++ try { ++ data = data.copy(); // coming from the I/O thread, so we need to copy ++ // run converters ++ final int dataVersion = !data.contains(SharedConstants.DATA_VERSION_TAG, 99) ? 1945 : data.getInt(SharedConstants.DATA_VERSION_TAG); ++ final CompoundTag converted = MCDataConverter.convertTag( ++ MCTypeRegistry.POI_CHUNK, data, dataVersion, SharedConstants.getCurrentVersion().getDataVersion().getVersion() ++ ); ++ ++ // now we need to parse it ++ return new TaskResult<>(PoiChunk.parse(this.world, this.chunkX, this.chunkZ, converted), null); ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr2) { ++ LOGGER.error("Failed to run parse poi data for task: " + this.toString() + ", poi data will be lost", thr2); ++ return new TaskResult<>(PoiChunk.empty(this.world, this.chunkX, this.chunkZ), null); ++ } ++ } ++ ++ @Override ++ protected TaskResult runOnMain(final PoiChunk data, final Throwable throwable) { ++ throw new UnsupportedOperationException(); ++ } ++ } ++ ++ public static final class EntityDataLoadTask extends CallbackDataLoadTask { ++ ++ public EntityDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, ++ final int chunkZ, final PrioritisedExecutor.Priority priority) { ++ super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.ENTITY_DATA, priority); ++ } ++ ++ @Override ++ protected boolean hasOffMain() { ++ return true; ++ } ++ ++ @Override ++ protected boolean hasOnMain() { ++ return false; ++ } ++ ++ @Override ++ protected PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority) { ++ return this.scheduler.loadExecutor.createTask(run, priority); ++ } ++ ++ @Override ++ protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) { ++ throw new UnsupportedOperationException(); ++ } ++ ++ @Override ++ protected TaskResult completeOnMainOffMain(final CompoundTag data, final Throwable throwable) { ++ throw new UnsupportedOperationException(); ++ } ++ ++ @Override ++ protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) { ++ if (throwable != null) { ++ LOGGER.error("Failed to load entity data for task: " + this.toString() + ", entity data will be lost", throwable); ++ return new TaskResult<>(null, null); ++ } ++ ++ if (data == null || data.isEmpty()) { ++ // nothing to do ++ return new TaskResult<>(null, null); ++ } ++ ++ try { ++ // note: data comes from the I/O thread, so we need to copy it ++ return new TaskResult<>(EntityStorage.upgradeChunkTag(data.copy()), null); ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr2) { ++ LOGGER.error("Failed to run converters for entity data for task: " + this.toString() + ", entity data will be lost", thr2); ++ return new TaskResult<>(null, thr2); ++ } ++ } ++ ++ @Override ++ protected TaskResult runOnMain(final CompoundTag data, final Throwable throwable) { ++ throw new UnsupportedOperationException(); ++ } ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java +new file mode 100644 +index 0000000000000000000000000000000000000000..322675a470eacbf0e5452f4009c643f2d0b4ce24 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkProgressionTask.java +@@ -0,0 +1,105 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; ++import java.lang.invoke.VarHandle; ++import java.util.Map; ++import java.util.function.BiConsumer; ++ ++public abstract class ChunkProgressionTask { ++ ++ private final MultiThreadedQueue> waiters = new MultiThreadedQueue<>(); ++ private ChunkAccess completedChunk; ++ private Throwable completedThrowable; ++ ++ protected final ChunkTaskScheduler scheduler; ++ protected final ServerLevel world; ++ protected final int chunkX; ++ protected final int chunkZ; ++ ++ protected volatile boolean completed; ++ protected static final VarHandle COMPLETED_HANDLE = ConcurrentUtil.getVarHandle(ChunkProgressionTask.class, "completed", boolean.class); ++ ++ protected ChunkProgressionTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ) { ++ this.scheduler = scheduler; ++ this.world = world; ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ } ++ ++ // Used only for debug json ++ public abstract boolean isScheduled(); ++ ++ // Note: It is the responsibility of the task to set the chunk's status once it has completed ++ public abstract ChunkStatus getTargetStatus(); ++ ++ /* Only executed once */ ++ /* Implementations must be prepared to handle cases where cancel() is called before schedule() */ ++ public abstract void schedule(); ++ ++ /* May be called multiple times */ ++ public abstract void cancel(); ++ ++ public abstract PrioritisedExecutor.Priority getPriority(); ++ ++ /* Schedule lock is always held for the priority update calls */ ++ ++ public abstract void lowerPriority(final PrioritisedExecutor.Priority priority); ++ ++ public abstract void setPriority(final PrioritisedExecutor.Priority priority); ++ ++ public abstract void raisePriority(final PrioritisedExecutor.Priority priority); ++ ++ public final void onComplete(final BiConsumer onComplete) { ++ if (!this.waiters.add(onComplete)) { ++ try { ++ onComplete.accept(this.completedChunk, this.completedThrowable); ++ } catch (final Throwable throwable) { ++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of( ++ "Consumer", ChunkTaskScheduler.stringIfNull(onComplete), ++ "Completed throwable", ChunkTaskScheduler.stringIfNull(this.completedThrowable) ++ ), throwable); ++ if (throwable instanceof ThreadDeath) { ++ throw (ThreadDeath)throwable; ++ } ++ } ++ } ++ } ++ ++ protected final void complete(final ChunkAccess chunk, final Throwable throwable) { ++ try { ++ this.complete0(chunk, throwable); ++ } catch (final Throwable thr2) { ++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of( ++ "Completed throwable", ChunkTaskScheduler.stringIfNull(throwable) ++ ), thr2); ++ if (thr2 instanceof ThreadDeath) { ++ throw (ThreadDeath)thr2; ++ } ++ } ++ } ++ ++ private void complete0(final ChunkAccess chunk, final Throwable throwable) { ++ if ((boolean)COMPLETED_HANDLE.getAndSet((ChunkProgressionTask)this, (boolean)true)) { ++ throw new IllegalStateException("Already completed"); ++ } ++ this.completedChunk = chunk; ++ this.completedThrowable = throwable; ++ ++ BiConsumer consumer; ++ while ((consumer = this.waiters.pollOrBlockAdds()) != null) { ++ consumer.accept(chunk, throwable); ++ } ++ } ++ ++ @Override ++ public String toString() { ++ return "ChunkProgressionTask{class: " + this.getClass().getName() + ", for world: " + this.world.getWorld().getName() + ++ ", chunk: (" + this.chunkX + "," + this.chunkZ + "), hashcode: " + System.identityHashCode(this) + ", priority: " + this.getPriority() + ++ ", status: " + this.getTargetStatus().toString() + ", scheduled: " + this.isScheduled() + "}"; ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java +new file mode 100644 +index 0000000000000000000000000000000000000000..84d6af5c28cd0e81d50701bebe122f462720fbf8 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java +@@ -0,0 +1,775 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import com.mojang.logging.LogUtils; ++import io.papermc.paper.configuration.GlobalConfiguration; ++import io.papermc.paper.util.CoordinateUtils; ++import io.papermc.paper.util.TickThread; ++import net.minecraft.CrashReport; ++import net.minecraft.CrashReportCategory; ++import net.minecraft.ReportedException; ++import io.papermc.paper.util.MCUtil; ++import net.minecraft.server.MinecraftServer; ++import net.minecraft.server.level.ChunkHolder; ++import net.minecraft.server.level.ChunkMap; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.server.level.TicketType; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; ++import net.minecraft.world.level.chunk.LevelChunk; ++import org.bukkit.Bukkit; ++import org.slf4j.Logger; ++import java.io.File; ++import java.util.ArrayDeque; ++import java.util.ArrayList; ++import java.util.Arrays; ++import java.util.Collections; ++import java.util.List; ++import java.util.Map; ++import java.util.Objects; ++import java.util.concurrent.atomic.AtomicBoolean; ++import java.util.concurrent.atomic.AtomicLong; ++import java.util.concurrent.locks.ReentrantLock; ++import java.util.function.BooleanSupplier; ++import java.util.function.Consumer; ++ ++public final class ChunkTaskScheduler { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ static int newChunkSystemIOThreads; ++ static int newChunkSystemWorkerThreads; ++ static int newChunkSystemGenParallelism; ++ static int newChunkSystemLoadParallelism; ++ ++ public static ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool workerThreads; ++ ++ private static boolean initialised = false; ++ ++ public static void init(final GlobalConfiguration.ChunkSystem config) { ++ if (initialised) { ++ return; ++ } ++ initialised = true; ++ newChunkSystemIOThreads = config.ioThreads; ++ newChunkSystemWorkerThreads = config.workerThreads; ++ if (newChunkSystemIOThreads < 0) { ++ newChunkSystemIOThreads = 1; ++ } else { ++ newChunkSystemIOThreads = Math.max(1, newChunkSystemIOThreads); ++ } ++ int defaultWorkerThreads = Runtime.getRuntime().availableProcessors() / 2; ++ if (defaultWorkerThreads <= 4) { ++ defaultWorkerThreads = defaultWorkerThreads <= 3 ? 1 : 2; ++ } else { ++ defaultWorkerThreads = defaultWorkerThreads / 2; ++ } ++ defaultWorkerThreads = Integer.getInteger("Paper.WorkerThreadCount", Integer.valueOf(defaultWorkerThreads)); ++ ++ if (newChunkSystemWorkerThreads < 0) { ++ newChunkSystemWorkerThreads = defaultWorkerThreads; ++ } else { ++ newChunkSystemWorkerThreads = Math.max(1, newChunkSystemWorkerThreads); ++ } ++ ++ String newChunkSystemGenParallelism = config.genParallelism; ++ if (newChunkSystemGenParallelism.equalsIgnoreCase("default")) { ++ newChunkSystemGenParallelism = "true"; ++ } ++ boolean useParallelGen; ++ if (newChunkSystemGenParallelism.equalsIgnoreCase("on") || newChunkSystemGenParallelism.equalsIgnoreCase("enabled") ++ || newChunkSystemGenParallelism.equalsIgnoreCase("true")) { ++ useParallelGen = true; ++ } else if (newChunkSystemGenParallelism.equalsIgnoreCase("off") || newChunkSystemGenParallelism.equalsIgnoreCase("disabled") ++ || newChunkSystemGenParallelism.equalsIgnoreCase("false")) { ++ useParallelGen = false; ++ } else { ++ throw new IllegalStateException("Invalid option for gen-parallelism: must be one of [on, off, enabled, disabled, true, false, default]"); ++ } ++ ++ ChunkTaskScheduler.newChunkSystemGenParallelism = useParallelGen ? newChunkSystemWorkerThreads : 1; ++ ChunkTaskScheduler.newChunkSystemLoadParallelism = newChunkSystemWorkerThreads; ++ ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.init(newChunkSystemIOThreads); ++ workerThreads = new ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool( ++ "Paper Chunk System Worker Pool", newChunkSystemWorkerThreads, ++ (final Thread thread, final Integer id) -> { ++ thread.setPriority(Thread.NORM_PRIORITY - 2); ++ thread.setName("Tuinity Chunk System Worker #" + id.intValue()); ++ thread.setUncaughtExceptionHandler(io.papermc.paper.chunk.system.scheduling.NewChunkHolder.CHUNKSYSTEM_UNCAUGHT_EXCEPTION_HANDLER); ++ }, (long)(20.0e6)); // 20ms ++ ++ LOGGER.info("Chunk system is using " + newChunkSystemIOThreads + " I/O threads, " + newChunkSystemWorkerThreads + " worker threads, and gen parallelism of " + ChunkTaskScheduler.newChunkSystemGenParallelism + " threads"); ++ } ++ ++ public final ServerLevel world; ++ public final PrioritisedThreadPool workers; ++ public final PrioritisedThreadPool.PrioritisedPoolExecutor lightExecutor; ++ public final PrioritisedThreadPool.PrioritisedPoolExecutor genExecutor; ++ public final PrioritisedThreadPool.PrioritisedPoolExecutor parallelGenExecutor; ++ public final PrioritisedThreadPool.PrioritisedPoolExecutor loadExecutor; ++ ++ private final PrioritisedThreadedTaskQueue mainThreadExecutor = new PrioritisedThreadedTaskQueue(); ++ ++ final ReentrantLock schedulingLock = new ReentrantLock(); ++ public final ChunkHolderManager chunkHolderManager; ++ ++ static { ++ ChunkStatus.EMPTY.writeRadius = 0; ++ ChunkStatus.STRUCTURE_STARTS.writeRadius = 0; ++ ChunkStatus.STRUCTURE_REFERENCES.writeRadius = 0; ++ ChunkStatus.BIOMES.writeRadius = 0; ++ ChunkStatus.NOISE.writeRadius = 0; ++ ChunkStatus.SURFACE.writeRadius = 0; ++ ChunkStatus.CARVERS.writeRadius = 0; ++ ChunkStatus.FEATURES.writeRadius = 1; ++ ChunkStatus.INITIALIZE_LIGHT.writeRadius = 0; ++ ChunkStatus.LIGHT.writeRadius = 2; ++ ChunkStatus.SPAWN.writeRadius = 0; ++ ChunkStatus.FULL.writeRadius = 0; ++ ++ /* ++ It's important that the neighbour read radius is taken into account. If _any_ later status is using some chunk as ++ a neighbour, it must be also safe if that neighbour is being generated. i.e for any status later than FEATURES, ++ for a status to be parallel safe it must not read the block data from its neighbours. ++ */ ++ final List parallelCapableStatus = Arrays.asList( ++ // No-op executor. ++ ChunkStatus.EMPTY, ++ ++ // This is parallel capable, as CB has fixed the concurrency issue with stronghold generations. ++ // Does not touch neighbour chunks. ++ ChunkStatus.STRUCTURE_STARTS, ++ ++ // Surprisingly this is parallel capable. It is simply reading the already-created structure starts ++ // into the structure references for the chunk. So while it reads from it neighbours, its neighbours ++ // will not change, even if executed in parallel. ++ ChunkStatus.STRUCTURE_REFERENCES, ++ ++ // Safe. Mojang runs it in parallel as well. ++ ChunkStatus.BIOMES, ++ ++ // Safe. Mojang runs it in parallel as well. ++ ChunkStatus.NOISE, ++ ++ // Parallel safe. Only touches the target chunk. Biome retrieval is now noise based, which is ++ // completely thread-safe. ++ ChunkStatus.SURFACE, ++ ++ // No global state is modified in the carvers. It only touches the specified chunk. So it is parallel safe. ++ ChunkStatus.CARVERS, ++ ++ // FEATURES is not parallel safe. It writes to neighbours. ++ ++ // no-op executor ++ ChunkStatus.INITIALIZE_LIGHT ++ ++ // LIGHT is not parallel safe. It also doesn't run on the generation executor, so no point. ++ ++ // Only writes to the specified chunk. State is not read by later statuses. Parallel safe. ++ // Note: it may look unsafe because it writes to a worldgenregion, but the region size is always 0 - ++ // see the task margin. ++ // However, if the neighbouring FEATURES chunk is unloaded, but then fails to load in again (for whatever ++ // reason), then it would write to this chunk - and since this status reads blocks from itself, it's not ++ // safe to execute this in parallel. ++ // SPAWN ++ ++ // FULL is executed on main. ++ ); ++ ++ for (final ChunkStatus status : parallelCapableStatus) { ++ status.isParallelCapable = true; ++ } ++ } ++ ++ public ChunkTaskScheduler(final ServerLevel world, final PrioritisedThreadPool workers) { ++ this.world = world; ++ this.workers = workers; ++ ++ final String worldName = world.getWorld().getName(); ++ this.genExecutor = workers.createExecutor("Chunk single-threaded generation executor for world '" + worldName + "'", 1); ++ // same as genExecutor, as there are race conditions between updating blocks in FEATURE status while lighting chunks ++ this.lightExecutor = this.genExecutor; ++ this.parallelGenExecutor = newChunkSystemGenParallelism <= 1 ? this.genExecutor ++ : workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", newChunkSystemGenParallelism); ++ this.loadExecutor = workers.createExecutor("Chunk load executor for world '" + worldName + "'", newChunkSystemLoadParallelism); ++ this.chunkHolderManager = new ChunkHolderManager(world, this); ++ } ++ ++ private final AtomicBoolean failedChunkSystem = new AtomicBoolean(); ++ ++ public static Object stringIfNull(final Object obj) { ++ return obj == null ? "null" : obj; ++ } ++ ++ public void unrecoverableChunkSystemFailure(final int chunkX, final int chunkZ, final Map objectsOfInterest, final Throwable thr) { ++ final NewChunkHolder holder = this.chunkHolderManager.getChunkHolder(chunkX, chunkZ); ++ LOGGER.error("Chunk system error at chunk (" + chunkX + "," + chunkZ + "), holder: " + holder + ", exception:", new Throwable(thr)); ++ ++ if (this.failedChunkSystem.getAndSet(true)) { ++ return; ++ } ++ ++ final ReportedException reportedException = thr instanceof ReportedException ? (ReportedException)thr : new ReportedException(new CrashReport("Chunk system error", thr)); ++ ++ CrashReportCategory crashReportCategory = reportedException.getReport().addCategory("Chunk system details"); ++ crashReportCategory.setDetail("Chunk coordinate", new ChunkPos(chunkX, chunkZ).toString()); ++ crashReportCategory.setDetail("ChunkHolder", Objects.toString(holder)); ++ crashReportCategory.setDetail("unrecoverableChunkSystemFailure caller thread", Thread.currentThread().getName()); ++ ++ crashReportCategory = reportedException.getReport().addCategory("Chunk System Objects of Interest"); ++ for (final Map.Entry entry : objectsOfInterest.entrySet()) { ++ if (entry.getValue() instanceof Throwable thrObject) { ++ crashReportCategory.setDetailError(Objects.toString(entry.getKey()), thrObject); ++ } else { ++ crashReportCategory.setDetail(Objects.toString(entry.getKey()), Objects.toString(entry.getValue())); ++ } ++ } ++ ++ final Runnable crash = () -> { ++ throw new RuntimeException("Chunk system crash propagated from unrecoverableChunkSystemFailure", reportedException); ++ }; ++ ++ // this may not be good enough, specifically thanks to stupid ass plugins swallowing exceptions ++ this.scheduleChunkTask(chunkX, chunkZ, crash, PrioritisedExecutor.Priority.BLOCKING); ++ // so, make the main thread pick it up ++ MinecraftServer.chunkSystemCrash = new RuntimeException("Chunk system crash propagated from unrecoverableChunkSystemFailure", reportedException); ++ } ++ ++ public boolean executeMainThreadTask() { ++ TickThread.ensureTickThread("Cannot execute main thread task off-main"); ++ return this.mainThreadExecutor.executeTask(); ++ } ++ ++ public void raisePriority(final int x, final int z, final PrioritisedExecutor.Priority priority) { ++ this.chunkHolderManager.raisePriority(x, z, priority); ++ } ++ ++ public void setPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) { ++ this.chunkHolderManager.setPriority(x, z, priority); ++ } ++ ++ public void lowerPriority(final int x, final int z, final PrioritisedExecutor.Priority priority) { ++ this.chunkHolderManager.lowerPriority(x, z, priority); ++ } ++ ++ private final AtomicLong chunkLoadCounter = new AtomicLong(); ++ ++ public void scheduleTickingState(final int chunkX, final int chunkZ, final ChunkHolder.FullChunkStatus toStatus, ++ final boolean addTicket, final PrioritisedExecutor.Priority priority, ++ final Consumer onComplete) { ++ if (!TickThread.isTickThread()) { ++ this.scheduleChunkTask(chunkX, chunkZ, () -> { ++ ChunkTaskScheduler.this.scheduleTickingState(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); ++ }, priority); ++ return; ++ } ++ if (this.chunkHolderManager.ticketLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Cannot schedule chunk load during ticket level update"); ++ } ++ if (this.schedulingLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Cannot schedule chunk loading recursively"); ++ } ++ ++ if (toStatus == ChunkHolder.FullChunkStatus.INACCESSIBLE) { ++ throw new IllegalArgumentException("Cannot wait for INACCESSIBLE status"); ++ } ++ ++ final int minLevel = 33 - (toStatus.ordinal() - 1); ++ final Long chunkReference = addTicket ? Long.valueOf(this.chunkLoadCounter.getAndIncrement()) : null; ++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ); ++ ++ if (addTicket) { ++ this.chunkHolderManager.addTicketAtLevel(TicketType.CHUNK_LOAD, chunkKey, minLevel, chunkReference); ++ this.chunkHolderManager.processTicketUpdates(); ++ } ++ ++ final Consumer loadCallback = (final LevelChunk chunk) -> { ++ try { ++ if (onComplete != null) { ++ onComplete.accept(chunk); ++ } ++ } finally { ++ if (addTicket) { ++ ChunkTaskScheduler.this.chunkHolderManager.addAndRemoveTickets(chunkKey, ++ TicketType.UNKNOWN, minLevel, new ChunkPos(chunkKey), ++ TicketType.CHUNK_LOAD, minLevel, chunkReference ++ ); ++ } ++ } ++ }; ++ ++ final boolean scheduled; ++ final LevelChunk chunk; ++ this.chunkHolderManager.ticketLock.lock(); ++ try { ++ this.schedulingLock.lock(); ++ try { ++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkKey); ++ if (chunkHolder == null || chunkHolder.getTicketLevel() > minLevel) { ++ scheduled = false; ++ chunk = null; ++ } else { ++ final ChunkHolder.FullChunkStatus currStatus = chunkHolder.getChunkStatus(); ++ if (currStatus.isOrAfter(toStatus)) { ++ scheduled = false; ++ chunk = (LevelChunk)chunkHolder.getCurrentChunk(); ++ } else { ++ scheduled = true; ++ chunk = null; ++ ++ final int radius = toStatus.ordinal() - 1; // 0 -> BORDER, 1 -> TICKING, 2 -> ENTITY_TICKING ++ for (int dz = -radius; dz <= radius; ++dz) { ++ for (int dx = -radius; dx <= radius; ++dx) { ++ final NewChunkHolder neighbour = ++ (dx | dz) == 0 ? chunkHolder : this.chunkHolderManager.getChunkHolder(dx + chunkX, dz + chunkZ); ++ if (neighbour != null) { ++ neighbour.raisePriority(priority); ++ } ++ } ++ } ++ ++ // ticket level should schedule for us ++ chunkHolder.addFullStatusConsumer(toStatus, loadCallback); ++ } ++ } ++ } finally { ++ this.schedulingLock.unlock(); ++ } ++ } finally { ++ this.chunkHolderManager.ticketLock.unlock(); ++ } ++ ++ if (!scheduled) { ++ // couldn't schedule ++ try { ++ loadCallback.accept(chunk); ++ } catch (final ThreadDeath thr) { ++ throw thr; ++ } catch (final Throwable thr) { ++ LOGGER.error("Failed to process chunk full status callback", thr); ++ } ++ } ++ } ++ ++ public void scheduleChunkLoad(final int chunkX, final int chunkZ, final boolean gen, final ChunkStatus toStatus, final boolean addTicket, ++ final PrioritisedExecutor.Priority priority, final Consumer onComplete) { ++ if (gen) { ++ this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); ++ return; ++ } ++ this.scheduleChunkLoad(chunkX, chunkZ, ChunkStatus.EMPTY, addTicket, priority, (final ChunkAccess chunk) -> { ++ if (chunk == null) { ++ onComplete.accept(null); ++ } else { ++ if (chunk.getStatus().isOrAfter(toStatus)) { ++ this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); ++ } else { ++ onComplete.accept(null); ++ } ++ } ++ }); ++ } ++ ++ public void scheduleChunkLoad(final int chunkX, final int chunkZ, final ChunkStatus toStatus, final boolean addTicket, ++ final PrioritisedExecutor.Priority priority, final Consumer onComplete) { ++ if (!TickThread.isTickThread()) { ++ this.scheduleChunkTask(chunkX, chunkZ, () -> { ++ ChunkTaskScheduler.this.scheduleChunkLoad(chunkX, chunkZ, toStatus, addTicket, priority, onComplete); ++ }, priority); ++ return; ++ } ++ if (this.chunkHolderManager.ticketLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Cannot schedule chunk load during ticket level update"); ++ } ++ if (this.schedulingLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Cannot schedule chunk loading recursively"); ++ } ++ ++ if (toStatus == ChunkStatus.FULL) { ++ this.scheduleTickingState(chunkX, chunkZ, ChunkHolder.FullChunkStatus.BORDER, addTicket, priority, (Consumer)onComplete); ++ return; ++ } ++ ++ final int minLevel = 33 + ChunkStatus.getDistance(toStatus); ++ final Long chunkReference = addTicket ? Long.valueOf(this.chunkLoadCounter.getAndIncrement()) : null; ++ final long chunkKey = CoordinateUtils.getChunkKey(chunkX, chunkZ); ++ ++ if (addTicket) { ++ this.chunkHolderManager.addTicketAtLevel(TicketType.CHUNK_LOAD, chunkKey, minLevel, chunkReference); ++ this.chunkHolderManager.processTicketUpdates(); ++ } ++ ++ final Consumer loadCallback = (final ChunkAccess chunk) -> { ++ try { ++ if (onComplete != null) { ++ onComplete.accept(chunk); ++ } ++ } finally { ++ if (addTicket) { ++ ChunkTaskScheduler.this.chunkHolderManager.addAndRemoveTickets(chunkKey, ++ TicketType.UNKNOWN, minLevel, new ChunkPos(chunkKey), ++ TicketType.CHUNK_LOAD, minLevel, chunkReference ++ ); ++ } ++ } ++ }; ++ ++ final List tasks = new ArrayList<>(); ++ ++ final boolean scheduled; ++ final ChunkAccess chunk; ++ this.chunkHolderManager.ticketLock.lock(); ++ try { ++ this.schedulingLock.lock(); ++ try { ++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkKey); ++ if (chunkHolder == null || chunkHolder.getTicketLevel() > minLevel) { ++ scheduled = false; ++ chunk = null; ++ } else { ++ final ChunkStatus genStatus = chunkHolder.getCurrentGenStatus(); ++ if (genStatus != null && genStatus.isOrAfter(toStatus)) { ++ scheduled = false; ++ chunk = chunkHolder.getCurrentChunk(); ++ } else { ++ scheduled = true; ++ chunk = null; ++ chunkHolder.raisePriority(priority); ++ ++ if (!chunkHolder.upgradeGenTarget(toStatus)) { ++ this.schedule(chunkX, chunkZ, toStatus, chunkHolder, tasks); ++ } ++ chunkHolder.addStatusConsumer(toStatus, loadCallback); ++ } ++ } ++ } finally { ++ this.schedulingLock.unlock(); ++ } ++ } finally { ++ this.chunkHolderManager.ticketLock.unlock(); ++ } ++ ++ for (int i = 0, len = tasks.size(); i < len; ++i) { ++ tasks.get(i).schedule(); ++ } ++ ++ if (!scheduled) { ++ // couldn't schedule ++ try { ++ loadCallback.accept(chunk); ++ } catch (final ThreadDeath thr) { ++ throw thr; ++ } catch (final Throwable thr) { ++ LOGGER.error("Failed to process chunk status callback", thr); ++ } ++ } ++ } ++ ++ private ChunkProgressionTask createTask(final int chunkX, final int chunkZ, final ChunkAccess chunk, ++ final NewChunkHolder chunkHolder, final List neighbours, ++ final ChunkStatus toStatus, final PrioritisedExecutor.Priority initialPriority) { ++ if (toStatus == ChunkStatus.EMPTY) { ++ return new ChunkLoadTask(this, this.world, chunkX, chunkZ, chunkHolder, initialPriority); ++ } ++ if (toStatus == ChunkStatus.LIGHT) { ++ return new ChunkLightTask(this, this.world, chunkX, chunkZ, chunk, initialPriority); ++ } ++ if (toStatus == ChunkStatus.FULL) { ++ return new ChunkFullTask(this, this.world, chunkX, chunkZ, chunkHolder, chunk, initialPriority); ++ } ++ ++ return new ChunkUpgradeGenericStatusTask(this, this.world, chunkX, chunkZ, chunk, neighbours, toStatus, initialPriority); ++ } ++ ++ ChunkProgressionTask schedule(final int chunkX, final int chunkZ, final ChunkStatus targetStatus, final NewChunkHolder chunkHolder, ++ final List allTasks) { ++ return this.schedule(chunkX, chunkZ, targetStatus, chunkHolder, allTasks, chunkHolder.getEffectivePriority()); ++ } ++ ++ // rets new task scheduled for the _specified_ chunk ++ // note: this must hold the scheduling lock ++ // minPriority is only used to pass the priority through to neighbours, as priority calculation has not yet been done ++ // schedule will ignore the generation target, so it should be checked by the caller to ensure the target is not regressed! ++ private ChunkProgressionTask schedule(final int chunkX, final int chunkZ, final ChunkStatus targetStatus, ++ final NewChunkHolder chunkHolder, final List allTasks, ++ final PrioritisedExecutor.Priority minPriority) { ++ if (!this.schedulingLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Not holding scheduling lock"); ++ } ++ ++ if (chunkHolder.hasGenerationTask()) { ++ chunkHolder.upgradeGenTarget(targetStatus); ++ return null; ++ } ++ ++ final PrioritisedExecutor.Priority requestedPriority = PrioritisedExecutor.Priority.max(minPriority, chunkHolder.getEffectivePriority()); ++ final ChunkStatus currentGenStatus = chunkHolder.getCurrentGenStatus(); ++ final ChunkAccess chunk = chunkHolder.getCurrentChunk(); ++ ++ if (currentGenStatus == null) { ++ // not yet loaded ++ final ChunkProgressionTask task = this.createTask( ++ chunkX, chunkZ, chunk, chunkHolder, Collections.emptyList(), ChunkStatus.EMPTY, requestedPriority ++ ); ++ ++ allTasks.add(task); ++ ++ final List chunkHolderNeighbours = new ArrayList<>(1); ++ chunkHolderNeighbours.add(chunkHolder); ++ ++ chunkHolder.setGenerationTarget(targetStatus); ++ chunkHolder.setGenerationTask(task, ChunkStatus.EMPTY, chunkHolderNeighbours); ++ ++ return task; ++ } ++ ++ if (currentGenStatus.isOrAfter(targetStatus)) { ++ // nothing to do ++ return null; ++ } ++ ++ // we know for sure now that we want to schedule _something_, so set the target ++ chunkHolder.setGenerationTarget(targetStatus); ++ ++ final ChunkStatus chunkRealStatus = chunk.getStatus(); ++ final ChunkStatus toStatus = currentGenStatus.getNextStatus(); ++ ++ // if this chunk has already generated up to or past the specified status, then we don't ++ // need the neighbours AT ALL. ++ final int neighbourReadRadius = chunkRealStatus.isOrAfter(toStatus) ? toStatus.loadRange : toStatus.getRange(); ++ ++ boolean unGeneratedNeighbours = false; ++ ++ // copied from MCUtil.getSpiralOutChunks ++ for (int r = 1; r <= neighbourReadRadius; r++) { ++ int x = -r; ++ int z = r; ++ ++ // Iterates the edge of half of the box; then negates for other half. ++ while (x <= r && z > -r) { ++ final int radius = Math.max(Math.abs(x), Math.abs(z)); ++ final ChunkStatus requiredNeighbourStatus = ChunkMap.getDependencyStatus(toStatus, radius); ++ ++ unGeneratedNeighbours |= this.checkNeighbour( ++ chunkX + x, chunkZ + z, requiredNeighbourStatus, chunkHolder, allTasks, requestedPriority ++ ); ++ unGeneratedNeighbours |= this.checkNeighbour( ++ chunkX - x, chunkZ - z, requiredNeighbourStatus, chunkHolder, allTasks, requestedPriority ++ ); ++ ++ if (x < r) { ++ x++; ++ } else { ++ z--; ++ } ++ } ++ } ++ ++ if (unGeneratedNeighbours) { ++ // can't schedule, but neighbour completion will schedule for us when they're ALL done ++ ++ // propagate our priority to neighbours ++ chunkHolder.recalculateNeighbourPriorities(); ++ return null; ++ } ++ ++ // need to gather neighbours ++ ++ final List neighbours; ++ final List chunkHolderNeighbours; ++ if (neighbourReadRadius <= 0) { ++ neighbours = new ArrayList<>(1); ++ chunkHolderNeighbours = new ArrayList<>(1); ++ neighbours.add(chunk); ++ chunkHolderNeighbours.add(chunkHolder); ++ } else { ++ // the iteration order is _very_ important, as all generation statuses expect a certain order such that: ++ // chunkAtRelative = neighbours.get(relX + relZ * (2 * radius + 1)) ++ neighbours = new ArrayList<>((2 * neighbourReadRadius + 1) * (2 * neighbourReadRadius + 1)); ++ chunkHolderNeighbours = new ArrayList<>((2 * neighbourReadRadius + 1) * (2 * neighbourReadRadius + 1)); ++ for (int dz = -neighbourReadRadius; dz <= neighbourReadRadius; ++dz) { ++ for (int dx = -neighbourReadRadius; dx <= neighbourReadRadius; ++dx) { ++ final NewChunkHolder holder = (dx | dz) == 0 ? chunkHolder : this.chunkHolderManager.getChunkHolder(dx + chunkX, dz + chunkZ); ++ neighbours.add(holder.getChunkForNeighbourAccess()); ++ chunkHolderNeighbours.add(holder); ++ } ++ } ++ } ++ ++ final ChunkProgressionTask task = this.createTask(chunkX, chunkZ, chunk, chunkHolder, neighbours, toStatus, chunkHolder.getEffectivePriority()); ++ allTasks.add(task); ++ ++ chunkHolder.setGenerationTask(task, toStatus, chunkHolderNeighbours); ++ ++ return task; ++ } ++ ++ // rets true if the neighbour is not at the required status, false otherwise ++ private boolean checkNeighbour(final int chunkX, final int chunkZ, final ChunkStatus requiredStatus, final NewChunkHolder center, ++ final List tasks, final PrioritisedExecutor.Priority minPriority) { ++ final NewChunkHolder chunkHolder = this.chunkHolderManager.getChunkHolder(chunkX, chunkZ); ++ ++ if (chunkHolder == null) { ++ throw new IllegalStateException("Missing chunkholder when required"); ++ } ++ ++ final ChunkStatus holderStatus = chunkHolder.getCurrentGenStatus(); ++ if (holderStatus != null && holderStatus.isOrAfter(requiredStatus)) { ++ return false; ++ } ++ ++ if (chunkHolder.hasFailedGeneration()) { ++ return true; ++ } ++ ++ center.addGenerationBlockingNeighbour(chunkHolder); ++ chunkHolder.addWaitingNeighbour(center, requiredStatus); ++ ++ if (chunkHolder.upgradeGenTarget(requiredStatus)) { ++ return true; ++ } ++ ++ // not at status required, so we need to schedule its generation ++ this.schedule( ++ chunkX, chunkZ, requiredStatus, chunkHolder, tasks, minPriority ++ ); ++ ++ return true; ++ } ++ ++ /** ++ * @deprecated Chunk tasks must be tied to coordinates in the future ++ */ ++ @Deprecated ++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run) { ++ return this.scheduleChunkTask(run, PrioritisedExecutor.Priority.NORMAL); ++ } ++ ++ /** ++ * @deprecated Chunk tasks must be tied to coordinates in the future ++ */ ++ @Deprecated ++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final Runnable run, final PrioritisedExecutor.Priority priority) { ++ return this.mainThreadExecutor.queueRunnable(run, priority); ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run) { ++ return this.createChunkTask(chunkX, chunkZ, run, PrioritisedExecutor.Priority.NORMAL); ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask createChunkTask(final int chunkX, final int chunkZ, final Runnable run, ++ final PrioritisedExecutor.Priority priority) { ++ return this.mainThreadExecutor.createTask(run, priority); ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run) { ++ return this.mainThreadExecutor.queueRunnable(run); ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask scheduleChunkTask(final int chunkX, final int chunkZ, final Runnable run, ++ final PrioritisedExecutor.Priority priority) { ++ return this.mainThreadExecutor.queueRunnable(run, priority); ++ } ++ ++ public void executeTasksUntil(final BooleanSupplier exit) { ++ if (Bukkit.isPrimaryThread()) { ++ this.mainThreadExecutor.executeConditionally(exit); ++ } else { ++ long counter = 1L; ++ while (!exit.getAsBoolean()) { ++ counter = ConcurrentUtil.linearLongBackoff(counter, 100_000L, 5_000_000L); // 100us, 5ms ++ } ++ } ++ } ++ ++ public boolean halt(final boolean sync, final long maxWaitNS) { ++ this.lightExecutor.halt(); ++ this.genExecutor.halt(); ++ this.parallelGenExecutor.halt(); ++ this.loadExecutor.halt(); ++ final long time = System.nanoTime(); ++ if (sync) { ++ for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) { ++ if ( ++ !this.lightExecutor.isActive() && ++ !this.genExecutor.isActive() && ++ !this.parallelGenExecutor.isActive() && ++ !this.loadExecutor.isActive() ++ ) { ++ return true; ++ } ++ if ((System.nanoTime() - time) >= maxWaitNS) { ++ return false; ++ } ++ } ++ } ++ ++ return true; ++ } ++ ++ public static final ArrayDeque WAITING_CHUNKS = new ArrayDeque<>(); // stack ++ ++ public static final class ChunkInfo { ++ ++ public final int chunkX; ++ public final int chunkZ; ++ public final ServerLevel world; ++ ++ public ChunkInfo(final int chunkX, final int chunkZ, final ServerLevel world) { ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ this.world = world; ++ } ++ ++ @Override ++ public String toString() { ++ return "[( " + this.chunkX + "," + this.chunkZ + ") in '" + this.world.getWorld().getName() + "']"; ++ } ++ } ++ ++ public static void pushChunkWait(final ServerLevel world, final int chunkX, final int chunkZ) { ++ synchronized (WAITING_CHUNKS) { ++ WAITING_CHUNKS.push(new ChunkInfo(chunkX, chunkZ, world)); ++ } ++ } ++ ++ public static void popChunkWait() { ++ synchronized (WAITING_CHUNKS) { ++ WAITING_CHUNKS.pop(); ++ } ++ } ++ ++ public static ChunkInfo[] getChunkInfos() { ++ synchronized (WAITING_CHUNKS) { ++ return WAITING_CHUNKS.toArray(new ChunkInfo[0]); ++ } ++ } ++ ++ public static void dumpAllChunkLoadInfo(final boolean longPrint) { ++ final ChunkInfo[] chunkInfos = getChunkInfos(); ++ if (chunkInfos.length > 0) { ++ LOGGER.error("Chunk wait task info below: "); ++ for (final ChunkInfo chunkInfo : chunkInfos) { ++ final NewChunkHolder holder = chunkInfo.world.chunkTaskScheduler.chunkHolderManager.getChunkHolder(chunkInfo.chunkX, chunkInfo.chunkZ); ++ LOGGER.error("Chunk wait: " + chunkInfo); ++ LOGGER.error("Chunk holder: " + holder); ++ } ++ ++ if (longPrint) { ++ final File file = new File(new File(new File("."), "debug"), "chunks-watchdog.txt"); ++ LOGGER.error("Writing chunk information dump to " + file); ++ try { ++ MCUtil.dumpChunks(file, true); ++ LOGGER.error("Successfully written chunk information!"); ++ } catch (final Throwable thr) { ++ MinecraftServer.LOGGER.warn("Failed to dump chunk information to file " + file.toString(), thr); ++ } ++ } ++ } ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java +new file mode 100644 +index 0000000000000000000000000000000000000000..73ce0909bd89244835a0d0f2030a25871461f1e0 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java +@@ -0,0 +1,209 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import com.mojang.datafixers.util.Either; ++import com.mojang.logging.LogUtils; ++import net.minecraft.server.level.ChunkHolder; ++import net.minecraft.server.level.ChunkMap; ++import net.minecraft.server.level.ServerChunkCache; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; ++import net.minecraft.world.level.chunk.ProtoChunk; ++import org.slf4j.Logger; ++import java.lang.invoke.VarHandle; ++import java.util.List; ++import java.util.Map; ++import java.util.concurrent.CompletableFuture; ++ ++public final class ChunkUpgradeGenericStatusTask extends ChunkProgressionTask implements Runnable { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ protected final ChunkAccess fromChunk; ++ protected final ChunkStatus fromStatus; ++ protected final ChunkStatus toStatus; ++ protected final List neighbours; ++ ++ protected final PrioritisedExecutor.PrioritisedTask generateTask; ++ ++ public ChunkUpgradeGenericStatusTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, ++ final int chunkZ, final ChunkAccess chunk, final List neighbours, ++ final ChunkStatus toStatus, final PrioritisedExecutor.Priority priority) { ++ super(scheduler, world, chunkX, chunkZ); ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.fromChunk = chunk; ++ this.fromStatus = chunk.getStatus(); ++ this.toStatus = toStatus; ++ this.neighbours = neighbours; ++ this.generateTask = (this.toStatus.isParallelCapable ? this.scheduler.parallelGenExecutor : this.scheduler.genExecutor) ++ .createTask(this, priority); ++ } ++ ++ @Override ++ public ChunkStatus getTargetStatus() { ++ return this.toStatus; ++ } ++ ++ private boolean isEmptyTask() { ++ // must use fromStatus here to avoid any race condition with run() overwriting the status ++ final boolean generation = !this.fromStatus.isOrAfter(this.toStatus); ++ return (generation && this.toStatus.isEmptyGenStatus()) || (!generation && this.toStatus.isEmptyLoadStatus()); ++ } ++ ++ @Override ++ public void run() { ++ final ChunkAccess chunk = this.fromChunk; ++ ++ final ServerChunkCache serverChunkCache = this.world.chunkSource; ++ final ChunkMap chunkMap = serverChunkCache.chunkMap; ++ ++ final CompletableFuture> completeFuture; ++ ++ final boolean generation; ++ boolean completing = false; ++ ++ // note: should optimise the case where the chunk does not need to execute the status, because ++ // schedule() calls this synchronously if it will run through that path ++ ++ try { ++ generation = !chunk.getStatus().isOrAfter(this.toStatus); ++ if (generation) { ++ if (this.toStatus.isEmptyGenStatus()) { ++ if (chunk instanceof ProtoChunk) { ++ ((ProtoChunk)chunk).setStatus(this.toStatus); ++ } ++ completing = true; ++ this.complete(chunk, null); ++ return; ++ } ++ completeFuture = this.toStatus.generate(Runnable::run, this.world, chunkMap.generator, chunkMap.structureTemplateManager, ++ serverChunkCache.getLightEngine(), null, this.neighbours, false) ++ .whenComplete((final Either either, final Throwable throwable) -> { ++ final ChunkAccess newChunk = (either == null) ? null : either.left().orElse(null); ++ if (newChunk instanceof ProtoChunk) { ++ ((ProtoChunk)newChunk).setStatus(ChunkUpgradeGenericStatusTask.this.toStatus); ++ } ++ } ++ ); ++ } else { ++ if (this.toStatus.isEmptyLoadStatus()) { ++ completing = true; ++ this.complete(chunk, null); ++ return; ++ } ++ completeFuture = this.toStatus.load(this.world, chunkMap.structureTemplateManager, serverChunkCache.getLightEngine(), null, chunk); ++ } ++ } catch (final Throwable throwable) { ++ if (!completing) { ++ this.complete(null, throwable); ++ ++ if (throwable instanceof ThreadDeath) { ++ throw (ThreadDeath)throwable; ++ } ++ return; ++ } ++ ++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of( ++ "Target status", ChunkTaskScheduler.stringIfNull(this.toStatus), ++ "From status", ChunkTaskScheduler.stringIfNull(this.fromStatus), ++ "Generation task", this ++ ), throwable); ++ ++ if (!(throwable instanceof ThreadDeath)) { ++ LOGGER.error("Failed to complete status for chunk: status:" + this.toStatus + ", chunk: (" + this.chunkX + "," + this.chunkZ + "), world: " + this.world.getWorld().getName(), throwable); ++ } else { ++ // ensure the chunk system can respond, then die ++ throw (ThreadDeath)throwable; ++ } ++ return; ++ } ++ ++ if (!completeFuture.isDone() && !this.toStatus.warnedAboutNoImmediateComplete.getAndSet(true)) { ++ LOGGER.warn("Future status not complete after scheduling: " + this.toStatus.toString() + ", generate: " + generation); ++ } ++ ++ final Either either; ++ final ChunkAccess newChunk; ++ ++ try { ++ either = completeFuture.join(); ++ newChunk = (either == null) ? null : either.left().orElse(null); ++ } catch (final Throwable throwable) { ++ this.complete(null, throwable); ++ // ensure the chunk system can respond, then die ++ if (throwable instanceof ThreadDeath) { ++ throw (ThreadDeath)throwable; ++ } ++ return; ++ } ++ ++ if (newChunk == null) { ++ this.complete(null, new IllegalStateException("Chunk for status: " + ChunkUpgradeGenericStatusTask.this.toStatus.toString() + ", generation: " + generation + " should not be null! Either: " + either).fillInStackTrace()); ++ return; ++ } ++ ++ this.complete(newChunk, null); ++ } ++ ++ protected volatile boolean scheduled; ++ protected static final VarHandle SCHEDULED_HANDLE = ConcurrentUtil.getVarHandle(ChunkUpgradeGenericStatusTask.class, "scheduled", boolean.class); ++ ++ @Override ++ public boolean isScheduled() { ++ return this.scheduled; ++ } ++ ++ @Override ++ public void schedule() { ++ if ((boolean)SCHEDULED_HANDLE.getAndSet((ChunkUpgradeGenericStatusTask)this, true)) { ++ throw new IllegalStateException("Cannot double call schedule()"); ++ } ++ if (this.isEmptyTask()) { ++ if (this.generateTask.cancel()) { ++ this.run(); ++ } ++ } else { ++ this.generateTask.queue(); ++ } ++ } ++ ++ @Override ++ public void cancel() { ++ if (this.generateTask.cancel()) { ++ this.complete(null, null); ++ } ++ } ++ ++ @Override ++ public PrioritisedExecutor.Priority getPriority() { ++ return this.generateTask.getPriority(); ++ } ++ ++ @Override ++ public void lowerPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.generateTask.lowerPriority(priority); ++ } ++ ++ @Override ++ public void setPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.generateTask.setPriority(priority); ++ } ++ ++ @Override ++ public void raisePriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.generateTask.raisePriority(priority); ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java +new file mode 100644 +index 0000000000000000000000000000000000000000..396d72c00e47cf1669ae20dc839c1c961b1f262a +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/GenericDataLoadTask.java +@@ -0,0 +1,746 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.completable.Completable; ++import ca.spottedleaf.concurrentutil.executor.Cancellable; ++import ca.spottedleaf.concurrentutil.executor.standard.DelayedPrioritisedTask; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import com.mojang.logging.LogUtils; ++import io.papermc.paper.chunk.system.io.RegionFileIOThread; ++import net.minecraft.nbt.CompoundTag; ++import net.minecraft.server.level.ServerLevel; ++import org.slf4j.Logger; ++import java.lang.invoke.VarHandle; ++import java.util.Map; ++import java.util.concurrent.atomic.AtomicBoolean; ++import java.util.concurrent.atomic.AtomicLong; ++import java.util.function.BiConsumer; ++ ++public abstract class GenericDataLoadTask { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ protected static final CompoundTag CANCELLED_DATA = new CompoundTag(); ++ ++ // reference count is the upper 32 bits ++ protected final AtomicLong stageAndReferenceCount = new AtomicLong(STAGE_NOT_STARTED); ++ ++ protected static final long STAGE_MASK = 0xFFFFFFFFL; ++ protected static final long STAGE_CANCELLED = 0xFFFFFFFFL; ++ protected static final long STAGE_NOT_STARTED = 0L; ++ protected static final long STAGE_LOADING = 1L; ++ protected static final long STAGE_PROCESSING = 2L; ++ protected static final long STAGE_COMPLETED = 3L; ++ ++ // for loading data off disk ++ protected final LoadDataFromDiskTask loadDataFromDiskTask; ++ // processing off-main ++ protected final PrioritisedExecutor.PrioritisedTask processOffMain; ++ // processing on-main ++ protected final PrioritisedExecutor.PrioritisedTask processOnMain; ++ ++ protected final ChunkTaskScheduler scheduler; ++ protected final ServerLevel world; ++ protected final int chunkX; ++ protected final int chunkZ; ++ protected final RegionFileIOThread.RegionFileType type; ++ ++ public GenericDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, ++ final int chunkZ, final RegionFileIOThread.RegionFileType type, ++ final PrioritisedExecutor.Priority priority) { ++ this.scheduler = scheduler; ++ this.world = world; ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ this.type = type; ++ ++ final ProcessOnMainTask mainTask; ++ if (this.hasOnMain()) { ++ mainTask = new ProcessOnMainTask(); ++ this.processOnMain = this.createOnMain(mainTask, priority); ++ } else { ++ mainTask = null; ++ this.processOnMain = null; ++ } ++ ++ final ProcessOffMainTask offMainTask; ++ if (this.hasOffMain()) { ++ offMainTask = new ProcessOffMainTask(mainTask); ++ this.processOffMain = this.createOffMain(offMainTask, priority); ++ } else { ++ offMainTask = null; ++ this.processOffMain = null; ++ } ++ ++ if (this.processOffMain == null && this.processOnMain == null) { ++ throw new IllegalStateException("Illegal class implementation: " + this.getClass().getName() + ", should be able to schedule at least one task!"); ++ } ++ ++ this.loadDataFromDiskTask = new LoadDataFromDiskTask(world, chunkX, chunkZ, type, new DataLoadCallback(offMainTask, mainTask), priority); ++ } ++ ++ public static final record TaskResult(L left, R right) {} ++ ++ protected abstract boolean hasOffMain(); ++ ++ protected abstract boolean hasOnMain(); ++ ++ protected abstract PrioritisedExecutor.PrioritisedTask createOffMain(final Runnable run, final PrioritisedExecutor.Priority priority); ++ ++ protected abstract PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority); ++ ++ protected abstract TaskResult runOffMain(final CompoundTag data, final Throwable throwable); ++ ++ protected abstract TaskResult runOnMain(final OnMain data, final Throwable throwable); ++ ++ protected abstract void onComplete(final TaskResult result); ++ ++ protected abstract TaskResult completeOnMainOffMain(final OnMain data, final Throwable throwable); ++ ++ @Override ++ public String toString() { ++ return "GenericDataLoadTask{class: " + this.getClass().getName() + ", world: " + this.world.getWorld().getName() + ++ ", chunk: (" + this.chunkX + "," + this.chunkZ + "), hashcode: " + System.identityHashCode(this) + ", priority: " + this.getPriority() + ++ ", type: " + this.type.toString() + "}"; ++ } ++ ++ public PrioritisedExecutor.Priority getPriority() { ++ if (this.processOnMain != null) { ++ return this.processOnMain.getPriority(); ++ } else { ++ return this.processOffMain.getPriority(); ++ } ++ } ++ ++ public void lowerPriority(final PrioritisedExecutor.Priority priority) { ++ // can't lower I/O tasks, we don't know what they affect ++ if (this.processOffMain != null) { ++ this.processOffMain.lowerPriority(priority); ++ } ++ if (this.processOnMain != null) { ++ this.processOnMain.lowerPriority(priority); ++ } ++ } ++ ++ public void setPriority(final PrioritisedExecutor.Priority priority) { ++ // can't lower I/O tasks, we don't know what they affect ++ this.loadDataFromDiskTask.raisePriority(priority); ++ if (this.processOffMain != null) { ++ this.processOffMain.setPriority(priority); ++ } ++ if (this.processOnMain != null) { ++ this.processOnMain.setPriority(priority); ++ } ++ } ++ ++ public void raisePriority(final PrioritisedExecutor.Priority priority) { ++ // can't lower I/O tasks, we don't know what they affect ++ this.loadDataFromDiskTask.raisePriority(priority); ++ if (this.processOffMain != null) { ++ this.processOffMain.raisePriority(priority); ++ } ++ if (this.processOnMain != null) { ++ this.processOnMain.raisePriority(priority); ++ } ++ } ++ ++ // returns whether scheduleNow() needs to be called ++ public boolean schedule(final boolean delay) { ++ if (this.stageAndReferenceCount.get() != STAGE_NOT_STARTED || ++ !this.stageAndReferenceCount.compareAndSet(STAGE_NOT_STARTED, (1L << 32) | STAGE_LOADING)) { ++ // try and increment reference count ++ int failures = 0; ++ for (long curr = this.stageAndReferenceCount.get();;) { ++ if ((curr & STAGE_MASK) == STAGE_CANCELLED || (curr & STAGE_MASK) == STAGE_COMPLETED) { ++ // cancelled or completed, nothing to do here ++ return false; ++ } ++ ++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, curr + (1L << 32)))) { ++ // successful ++ return false; ++ } ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ if (!delay) { ++ this.scheduleNow(); ++ return false; ++ } ++ return true; ++ } ++ ++ public void scheduleNow() { ++ this.loadDataFromDiskTask.schedule(); // will schedule the rest ++ } ++ ++ // assumes the current stage cannot be completed ++ // returns false if cancelled, returns true if can proceed ++ private boolean advanceStage(final long expect, final long to) { ++ int failures = 0; ++ for (long curr = this.stageAndReferenceCount.get();;) { ++ if ((curr & STAGE_MASK) != expect) { ++ // must be cancelled ++ return false; ++ } ++ ++ final long newVal = (curr & ~STAGE_MASK) | to; ++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) { ++ return true; ++ } ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ public boolean cancel() { ++ int failures = 0; ++ for (long curr = this.stageAndReferenceCount.get();;) { ++ if ((curr & STAGE_MASK) == STAGE_COMPLETED || (curr & STAGE_MASK) == STAGE_CANCELLED) { ++ return false; ++ } ++ ++ if ((curr & STAGE_MASK) == STAGE_NOT_STARTED || (curr & ~STAGE_MASK) == (1L << 32)) { ++ // no other references, so we can cancel ++ final long newVal = STAGE_CANCELLED; ++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) { ++ this.loadDataFromDiskTask.cancel(); ++ if (this.processOffMain != null) { ++ this.processOffMain.cancel(); ++ } ++ if (this.processOnMain != null) { ++ this.processOnMain.cancel(); ++ } ++ this.onComplete(null); ++ return true; ++ } ++ } else { ++ if ((curr & ~STAGE_MASK) == (0L << 32)) { ++ throw new IllegalStateException("Reference count cannot be zero here"); ++ } ++ // just decrease the reference count ++ final long newVal = curr - (1L << 32); ++ if (curr == (curr = this.stageAndReferenceCount.compareAndExchange(curr, newVal))) { ++ return false; ++ } ++ } ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ protected final class DataLoadCallback implements BiConsumer { ++ ++ protected final ProcessOffMainTask offMainTask; ++ protected final ProcessOnMainTask onMainTask; ++ ++ public DataLoadCallback(final ProcessOffMainTask offMainTask, final ProcessOnMainTask onMainTask) { ++ this.offMainTask = offMainTask; ++ this.onMainTask = onMainTask; ++ } ++ ++ @Override ++ public void accept(final CompoundTag compoundTag, final Throwable throwable) { ++ if (GenericDataLoadTask.this.stageAndReferenceCount.get() == STAGE_CANCELLED) { ++ // don't try to schedule further ++ return; ++ } ++ ++ try { ++ if (compoundTag == CANCELLED_DATA) { ++ // cancelled, except this isn't possible ++ LOGGER.error("Data callback says cancelled, but stage does not?"); ++ return; ++ } ++ ++ // get off of the regionfile callback ASAP, no clue what locks are held right now... ++ if (GenericDataLoadTask.this.processOffMain != null) { ++ this.offMainTask.data = compoundTag; ++ this.offMainTask.throwable = throwable; ++ GenericDataLoadTask.this.processOffMain.queue(); ++ return; ++ } else { ++ // no off-main task, so go straight to main ++ this.onMainTask.data = (OnMain)compoundTag; ++ this.onMainTask.throwable = throwable; ++ GenericDataLoadTask.this.processOnMain.queue(); ++ } ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr2) { ++ LOGGER.error("Failed I/O callback for task: " + GenericDataLoadTask.this.toString(), thr2); ++ GenericDataLoadTask.this.scheduler.unrecoverableChunkSystemFailure( ++ GenericDataLoadTask.this.chunkX, GenericDataLoadTask.this.chunkZ, Map.of( ++ "Callback throwable", ChunkTaskScheduler.stringIfNull(throwable) ++ ), thr2); ++ } ++ } ++ } ++ ++ protected final class ProcessOffMainTask implements Runnable { ++ ++ protected CompoundTag data; ++ protected Throwable throwable; ++ protected final ProcessOnMainTask schedule; ++ ++ public ProcessOffMainTask(final ProcessOnMainTask schedule) { ++ this.schedule = schedule; ++ } ++ ++ @Override ++ public void run() { ++ if (!GenericDataLoadTask.this.advanceStage(STAGE_LOADING, this.schedule == null ? STAGE_COMPLETED : STAGE_PROCESSING)) { ++ // cancelled ++ return; ++ } ++ final TaskResult newData = GenericDataLoadTask.this.runOffMain(this.data, this.throwable); ++ ++ if (GenericDataLoadTask.this.stageAndReferenceCount.get() == STAGE_CANCELLED) { ++ // don't try to schedule further ++ return; ++ } ++ ++ if (this.schedule != null) { ++ final TaskResult syncComplete = GenericDataLoadTask.this.completeOnMainOffMain(newData.left, newData.right); ++ ++ if (syncComplete != null) { ++ if (GenericDataLoadTask.this.advanceStage(STAGE_PROCESSING, STAGE_COMPLETED)) { ++ GenericDataLoadTask.this.onComplete(syncComplete); ++ } // else: cancelled ++ return; ++ } ++ ++ this.schedule.data = newData.left; ++ this.schedule.throwable = newData.right; ++ ++ GenericDataLoadTask.this.processOnMain.queue(); ++ } else { ++ GenericDataLoadTask.this.onComplete((TaskResult)newData); ++ } ++ } ++ } ++ ++ protected final class ProcessOnMainTask implements Runnable { ++ ++ protected OnMain data; ++ protected Throwable throwable; ++ ++ @Override ++ public void run() { ++ if (!GenericDataLoadTask.this.advanceStage(STAGE_PROCESSING, STAGE_COMPLETED)) { ++ // cancelled ++ return; ++ } ++ final TaskResult result = GenericDataLoadTask.this.runOnMain(this.data, this.throwable); ++ ++ GenericDataLoadTask.this.onComplete(result); ++ } ++ } ++ ++ public static final class LoadDataFromDiskTask { ++ ++ protected volatile int priority; ++ protected static final VarHandle PRIORITY_HANDLE = ConcurrentUtil.getVarHandle(LoadDataFromDiskTask.class, "priority", int.class); ++ ++ protected static final int PRIORITY_EXECUTED = Integer.MIN_VALUE >>> 0; ++ protected static final int PRIORITY_LOAD_SCHEDULED = Integer.MIN_VALUE >>> 1; ++ protected static final int PRIORITY_UNLOAD_SCHEDULED = Integer.MIN_VALUE >>> 2; ++ ++ protected static final int PRIORITY_FLAGS = ~Character.MAX_VALUE; ++ ++ protected final int getPriorityVolatile() { ++ return (int)PRIORITY_HANDLE.getVolatile((LoadDataFromDiskTask)this); ++ } ++ ++ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) { ++ return (int)PRIORITY_HANDLE.compareAndExchange((LoadDataFromDiskTask)this, (int)expect, (int)update); ++ } ++ ++ protected final int getAndOrPriorityVolatile(final int val) { ++ return (int)PRIORITY_HANDLE.getAndBitwiseOr((LoadDataFromDiskTask)this, (int)val); ++ } ++ ++ protected final void setPriorityPlain(final int val) { ++ PRIORITY_HANDLE.set((LoadDataFromDiskTask)this, (int)val); ++ } ++ ++ private final ServerLevel world; ++ private final int chunkX; ++ private final int chunkZ; ++ ++ private final RegionFileIOThread.RegionFileType type; ++ private Cancellable dataLoadTask; ++ private Cancellable dataUnloadCancellable; ++ private DelayedPrioritisedTask dataUnloadTask; ++ ++ private final BiConsumer onComplete; ++ ++ // onComplete should be caller sensitive, it may complete synchronously with schedule() - which does ++ // hold a priority lock. ++ public LoadDataFromDiskTask(final ServerLevel world, final int chunkX, final int chunkZ, ++ final RegionFileIOThread.RegionFileType type, ++ final BiConsumer onComplete, ++ final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.world = world; ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ this.type = type; ++ this.onComplete = onComplete; ++ this.setPriorityPlain(priority.priority); ++ } ++ ++ private void complete(final CompoundTag data, final Throwable throwable) { ++ try { ++ this.onComplete.accept(data, throwable); ++ } catch (final Throwable thr2) { ++ this.world.chunkTaskScheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of( ++ "Completed throwable", ChunkTaskScheduler.stringIfNull(throwable), ++ "Regionfile type", ChunkTaskScheduler.stringIfNull(this.type) ++ ), thr2); ++ if (thr2 instanceof ThreadDeath) { ++ throw (ThreadDeath)thr2; ++ } ++ } ++ } ++ ++ protected boolean markExecuting() { ++ return (this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) == 0; ++ } ++ ++ protected boolean isMarkedExecuted() { ++ return (this.getPriorityVolatile() & PRIORITY_EXECUTED) != 0; ++ } ++ ++ public void lowerPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ ++ int failures = 0; ++ for (int curr = this.getPriorityVolatile();;) { ++ if ((curr & PRIORITY_EXECUTED) != 0) { ++ // cancelled or executed ++ return; ++ } ++ ++ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) { ++ RegionFileIOThread.lowerPriority(this.world, this.chunkX, this.chunkZ, this.type, priority); ++ return; ++ } ++ ++ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) { ++ if (this.dataUnloadTask != null) { ++ this.dataUnloadTask.lowerPriority(priority); ++ } ++ // no return - we need to propagate priority ++ } ++ ++ if (!priority.isHigherPriority(curr & ~PRIORITY_FLAGS)) { ++ return; ++ } ++ ++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) { ++ return; ++ } ++ ++ // failed, retry ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ public void setPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ ++ int failures = 0; ++ for (int curr = this.getPriorityVolatile();;) { ++ if ((curr & PRIORITY_EXECUTED) != 0) { ++ // cancelled or executed ++ return; ++ } ++ ++ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) { ++ RegionFileIOThread.setPriority(this.world, this.chunkX, this.chunkZ, this.type, priority); ++ return; ++ } ++ ++ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) { ++ if (this.dataUnloadTask != null) { ++ this.dataUnloadTask.setPriority(priority); ++ } ++ // no return - we need to propagate priority ++ } ++ ++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) { ++ return; ++ } ++ ++ // failed, retry ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ public void raisePriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ ++ int failures = 0; ++ for (int curr = this.getPriorityVolatile();;) { ++ if ((curr & PRIORITY_EXECUTED) != 0) { ++ // cancelled or executed ++ return; ++ } ++ ++ if ((curr & PRIORITY_LOAD_SCHEDULED) != 0) { ++ RegionFileIOThread.raisePriority(this.world, this.chunkX, this.chunkZ, this.type, priority); ++ return; ++ } ++ ++ if ((curr & PRIORITY_UNLOAD_SCHEDULED) != 0) { ++ if (this.dataUnloadTask != null) { ++ this.dataUnloadTask.raisePriority(priority); ++ } ++ // no return - we need to propagate priority ++ } ++ ++ if (!priority.isLowerPriority(curr & ~PRIORITY_FLAGS)) { ++ return; ++ } ++ ++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority | (curr & PRIORITY_FLAGS)))) { ++ return; ++ } ++ ++ // failed, retry ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ public void cancel() { ++ if ((this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) != 0) { ++ // cancelled or executed already ++ return; ++ } ++ ++ // OK if we miss the field read, the task cannot complete if the cancelled bit is set and ++ // the write to dataLoadTask will check for the cancelled bit ++ if (this.dataUnloadCancellable != null) { ++ this.dataUnloadCancellable.cancel(); ++ } ++ ++ if (this.dataLoadTask != null) { ++ this.dataLoadTask.cancel(); ++ } ++ ++ this.complete(CANCELLED_DATA, null); ++ } ++ ++ private final AtomicBoolean scheduled = new AtomicBoolean(); ++ ++ public void schedule() { ++ if (this.scheduled.getAndSet(true)) { ++ throw new IllegalStateException("schedule() called twice"); ++ } ++ int priority = this.getPriorityVolatile(); ++ ++ if ((priority & PRIORITY_EXECUTED) != 0) { ++ // cancelled ++ return; ++ } ++ ++ final BiConsumer consumer = (final CompoundTag data, final Throwable thr) -> { ++ // because cancelScheduled() cannot actually stop this task from executing in every case, we need ++ // to mark complete here to ensure we do not double complete ++ if (LoadDataFromDiskTask.this.markExecuting()) { ++ LoadDataFromDiskTask.this.complete(data, thr); ++ } // else: cancelled ++ }; ++ ++ final PrioritisedExecutor.Priority initialPriority = PrioritisedExecutor.Priority.getPriority(priority); ++ boolean scheduledUnload = false; ++ ++ final NewChunkHolder holder = this.world.chunkTaskScheduler.chunkHolderManager.getChunkHolder(this.chunkX, this.chunkZ); ++ if (holder != null) { ++ final BiConsumer unloadConsumer = (final CompoundTag data, final Throwable thr) -> { ++ if (data != null) { ++ consumer.accept(data, null); ++ } else { ++ // need to schedule task ++ LoadDataFromDiskTask.this.schedule(false, consumer, PrioritisedExecutor.Priority.getPriority(LoadDataFromDiskTask.this.getPriorityVolatile() & ~PRIORITY_FLAGS)); ++ } ++ }; ++ Cancellable unloadCancellable = null; ++ CompoundTag syncComplete = null; ++ final NewChunkHolder.UnloadTask unloadTask = holder.getUnloadTask(this.type); // can be null if no task exists ++ final Completable unloadCompletable = unloadTask == null ? null : unloadTask.completable(); ++ if (unloadCompletable != null) { ++ unloadCancellable = unloadCompletable.addAsynchronousWaiter(unloadConsumer); ++ if (unloadCancellable == null) { ++ syncComplete = unloadCompletable.getResult(); ++ } ++ } ++ ++ if (syncComplete != null) { ++ consumer.accept(syncComplete, null); ++ return; ++ } ++ ++ if (unloadCancellable != null) { ++ scheduledUnload = true; ++ this.dataUnloadCancellable = unloadCancellable; ++ this.dataUnloadTask = unloadTask.task(); ++ } ++ } ++ ++ this.schedule(scheduledUnload, consumer, initialPriority); ++ } ++ ++ private void schedule(final boolean scheduledUnload, final BiConsumer consumer, final PrioritisedExecutor.Priority initialPriority) { ++ int priority = this.getPriorityVolatile(); ++ ++ if ((priority & PRIORITY_EXECUTED) != 0) { ++ // cancelled ++ return; ++ } ++ ++ if (!scheduledUnload) { ++ this.dataLoadTask = RegionFileIOThread.loadDataAsync( ++ this.world, this.chunkX, this.chunkZ, this.type, consumer, ++ initialPriority.isHigherPriority(PrioritisedExecutor.Priority.NORMAL), initialPriority ++ ); ++ } ++ ++ int failures = 0; ++ for (;;) { ++ if (priority == (priority = this.compareAndExchangePriorityVolatile(priority, priority | (scheduledUnload ? PRIORITY_UNLOAD_SCHEDULED : PRIORITY_LOAD_SCHEDULED)))) { ++ return; ++ } ++ ++ if ((priority & PRIORITY_EXECUTED) != 0) { ++ // cancelled or executed ++ if (this.dataUnloadCancellable != null) { ++ this.dataUnloadCancellable.cancel(); ++ } ++ ++ if (this.dataLoadTask != null) { ++ this.dataLoadTask.cancel(); ++ } ++ return; ++ } ++ ++ if (scheduledUnload) { ++ if (this.dataUnloadTask != null) { ++ this.dataUnloadTask.setPriority(PrioritisedExecutor.Priority.getPriority(priority & ~PRIORITY_FLAGS)); ++ } ++ } else { ++ RegionFileIOThread.setPriority(this.world, this.chunkX, this.chunkZ, this.type, PrioritisedExecutor.Priority.getPriority(priority & ~PRIORITY_FLAGS)); ++ } ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ /* ++ private static final class LoadDataPriorityHolder extends PriorityHolder { ++ ++ protected final LoadDataFromDiskTask task; ++ ++ protected LoadDataPriorityHolder(final PrioritisedExecutor.Priority priority, final LoadDataFromDiskTask task) { ++ super(priority); ++ this.task = task; ++ } ++ ++ @Override ++ protected void cancelScheduled() { ++ final Cancellable dataLoadTask = this.task.dataLoadTask; ++ if (dataLoadTask != null) { ++ // OK if we miss the field read, the task cannot complete if the cancelled bit is set and ++ // the write to dataLoadTask will check for the cancelled bit ++ this.task.dataLoadTask.cancel(); ++ } ++ this.task.complete(CANCELLED_DATA, null); ++ } ++ ++ @Override ++ protected PrioritisedExecutor.Priority getScheduledPriority() { ++ final LoadDataFromDiskTask task = this.task; ++ return RegionFileIOThread.getPriority(task.world, task.chunkX, task.chunkZ, task.type); ++ } ++ ++ @Override ++ protected void scheduleTask(final PrioritisedExecutor.Priority priority) { ++ final LoadDataFromDiskTask task = this.task; ++ final BiConsumer consumer = (final CompoundTag data, final Throwable thr) -> { ++ // because cancelScheduled() cannot actually stop this task from executing in every case, we need ++ // to mark complete here to ensure we do not double complete ++ if (LoadDataPriorityHolder.this.markExecuting()) { ++ LoadDataPriorityHolder.this.task.complete(data, thr); ++ } // else: cancelled ++ }; ++ task.dataLoadTask = RegionFileIOThread.loadDataAsync( ++ task.world, task.chunkX, task.chunkZ, task.type, consumer, ++ priority.isHigherPriority(PrioritisedExecutor.Priority.NORMAL), priority ++ ); ++ if (this.isMarkedExecuted()) { ++ // if we are marked as completed, it could be: ++ // 1. we were cancelled ++ // 2. the consumer was completed ++ // in the 2nd case, cancel() does nothing ++ // in the 1st case, we ensure cancel() is called as it is possible for the cancelling thread ++ // to miss the field write here ++ task.dataLoadTask.cancel(); ++ } ++ } ++ ++ @Override ++ protected void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority) { ++ final LoadDataFromDiskTask task = this.task; ++ RegionFileIOThread.lowerPriority(task.world, task.chunkX, task.chunkZ, task.type, priority); ++ } ++ ++ @Override ++ protected void setPriorityScheduled(final PrioritisedExecutor.Priority priority) { ++ final LoadDataFromDiskTask task = this.task; ++ RegionFileIOThread.setPriority(task.world, task.chunkX, task.chunkZ, task.type, priority); ++ } ++ ++ @Override ++ protected void raisePriorityScheduled(final PrioritisedExecutor.Priority priority) { ++ final LoadDataFromDiskTask task = this.task; ++ RegionFileIOThread.raisePriority(task.world, task.chunkX, task.chunkZ, task.type, priority); ++ } ++ } ++ */ ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java +new file mode 100644 +index 0000000000000000000000000000000000000000..8013dd333e27aa5fd0beb431fa32491eec9f5246 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java +@@ -0,0 +1,2077 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.completable.Completable; ++import ca.spottedleaf.concurrentutil.executor.Cancellable; ++import ca.spottedleaf.concurrentutil.executor.standard.DelayedPrioritisedTask; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import com.google.gson.JsonArray; ++import com.google.gson.JsonElement; ++import com.google.gson.JsonObject; ++import com.google.gson.JsonPrimitive; ++import com.mojang.logging.LogUtils; ++import io.papermc.paper.chunk.system.io.RegionFileIOThread; ++import io.papermc.paper.chunk.system.poi.PoiChunk; ++import io.papermc.paper.util.CoordinateUtils; ++import io.papermc.paper.util.TickThread; ++import io.papermc.paper.util.WorldUtil; ++import io.papermc.paper.world.ChunkEntitySlices; ++import it.unimi.dsi.fastutil.objects.Reference2ObjectLinkedOpenHashMap; ++import it.unimi.dsi.fastutil.objects.Reference2ObjectMap; ++import it.unimi.dsi.fastutil.objects.Reference2ObjectOpenHashMap; ++import it.unimi.dsi.fastutil.objects.ReferenceLinkedOpenHashSet; ++import net.minecraft.nbt.CompoundTag; ++import net.minecraft.server.level.ChunkHolder; ++import net.minecraft.server.level.ChunkMap; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.server.level.TicketType; ++import net.minecraft.world.entity.Entity; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; ++import net.minecraft.world.level.chunk.ImposterProtoChunk; ++import net.minecraft.world.level.chunk.LevelChunk; ++import net.minecraft.world.level.chunk.storage.ChunkSerializer; ++import net.minecraft.world.level.chunk.storage.EntityStorage; ++import org.slf4j.Logger; ++import java.lang.invoke.VarHandle; ++import java.util.ArrayList; ++import java.util.Iterator; ++import java.util.List; ++import java.util.Map; ++import java.util.Objects; ++import java.util.concurrent.atomic.AtomicBoolean; ++import java.util.function.Consumer; ++ ++public final class NewChunkHolder { ++ ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ ++ public static final Thread.UncaughtExceptionHandler CHUNKSYSTEM_UNCAUGHT_EXCEPTION_HANDLER = new Thread.UncaughtExceptionHandler() { ++ @Override ++ public void uncaughtException(final Thread thread, final Throwable throwable) { ++ if (!(throwable instanceof ThreadDeath)) { ++ LOGGER.error("Uncaught exception in thread " + thread.getName(), throwable); ++ } ++ } ++ }; ++ ++ public final ServerLevel world; ++ public final int chunkX; ++ public final int chunkZ; ++ ++ public final ChunkTaskScheduler scheduler; ++ ++ // load/unload state ++ ++ // chunk data state ++ ++ private ChunkEntitySlices entityChunk; ++ // entity chunk that is loaded, but not yet deserialized ++ private CompoundTag pendingEntityChunk; ++ ++ ChunkEntitySlices loadInEntityChunk(final boolean transientChunk) { ++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot sync load entity data off-main"); ++ final CompoundTag entityChunk; ++ final ChunkEntitySlices ret; ++ this.scheduler.schedulingLock.lock(); ++ try { ++ if (this.entityChunk != null && (transientChunk || !this.entityChunk.isTransient())) { ++ return this.entityChunk; ++ } ++ final CompoundTag pendingEntityChunk = this.pendingEntityChunk; ++ if (!transientChunk && pendingEntityChunk == null) { ++ throw new IllegalStateException("Must load entity data from disk before loading in the entity chunk!"); ++ } ++ ++ if (this.entityChunk == null) { ++ ret = this.entityChunk = new ChunkEntitySlices( ++ this.world, this.chunkX, this.chunkZ, this.getChunkStatus(), ++ WorldUtil.getMinSection(this.world), WorldUtil.getMaxSection(this.world) ++ ); ++ ++ ret.setTransient(transientChunk); ++ ++ this.world.getEntityLookup().entitySectionLoad(this.chunkX, this.chunkZ, ret); ++ } else { ++ // transientChunk = false here ++ ret = this.entityChunk; ++ this.entityChunk.setTransient(false); ++ } ++ ++ if (!transientChunk) { ++ this.pendingEntityChunk = null; ++ entityChunk = pendingEntityChunk == EMPTY_ENTITY_CHUNK ? null : pendingEntityChunk; ++ } else { ++ entityChunk = null; ++ } ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ ++ if (!transientChunk) { ++ if (entityChunk != null) { ++ final List entities = EntityStorage.readEntities(this.world, entityChunk); ++ ++ this.world.getEntityLookup().addEntityChunkEntities(entities); ++ } ++ } ++ ++ return ret; ++ } ++ ++ // needed to distinguish whether the entity chunk has been read from disk but is empty or whether it has _not_ ++ // been read from disk ++ private static final CompoundTag EMPTY_ENTITY_CHUNK = new CompoundTag(); ++ ++ private ChunkLoadTask.EntityDataLoadTask entityDataLoadTask; ++ // note: if entityDataLoadTask is cancelled, but on its completion entityDataLoadTaskWaiters.size() != 0, ++ // then the task is rescheduled ++ private List entityDataLoadTaskWaiters; ++ ++ public ChunkLoadTask.EntityDataLoadTask getEntityDataLoadTask() { ++ return this.entityDataLoadTask; ++ } ++ ++ // must hold schedule lock for the two below functions ++ ++ // returns only if the data has been loaded from disk, DOES NOT relate to whether it has been deserialized ++ // or added into the world (or even into entityChunk) ++ public boolean isEntityChunkNBTLoaded() { ++ return (this.entityChunk != null && !this.entityChunk.isTransient()) || this.pendingEntityChunk != null; ++ } ++ ++ private void completeEntityLoad(final GenericDataLoadTask.TaskResult result) { ++ final List completeWaiters; ++ ChunkLoadTask.EntityDataLoadTask entityDataLoadTask = null; ++ boolean scheduleEntityTask = false; ++ this.scheduler.schedulingLock.lock(); ++ try { ++ final List waiters = this.entityDataLoadTaskWaiters; ++ this.entityDataLoadTask = null; ++ if (result != null) { ++ this.entityDataLoadTaskWaiters = null; ++ this.pendingEntityChunk = result.left() == null ? EMPTY_ENTITY_CHUNK : result.left(); ++ if (result.right() != null) { ++ LOGGER.error("Unhandled entity data load exception, data data will be lost: ", result.right()); ++ } ++ ++ completeWaiters = waiters; ++ } else { ++ // cancelled ++ completeWaiters = null; ++ ++ // need to re-schedule? ++ if (waiters.isEmpty()) { ++ this.entityDataLoadTaskWaiters = null; ++ // no tasks to schedule _for_ ++ } else { ++ entityDataLoadTask = this.entityDataLoadTask = new ChunkLoadTask.EntityDataLoadTask( ++ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority() ++ ); ++ entityDataLoadTask.addCallback(this::completeEntityLoad); ++ // need one schedule() per waiter ++ for (final GenericDataLoadTaskCallback callback : waiters) { ++ scheduleEntityTask |= entityDataLoadTask.schedule(true); ++ } ++ } ++ } ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ ++ if (scheduleEntityTask) { ++ entityDataLoadTask.scheduleNow(); ++ } ++ ++ // avoid holding the scheduling lock while completing ++ if (completeWaiters != null) { ++ for (final GenericDataLoadTaskCallback callback : completeWaiters) { ++ callback.accept(result); ++ } ++ } ++ ++ this.scheduler.schedulingLock.lock(); ++ try { ++ this.checkUnload(); ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ } ++ ++ // note: it is guaranteed that the consumer cannot be called for the entirety that the schedule lock is held ++ // however, when the consumer is invoked, it will hold the schedule lock ++ public GenericDataLoadTaskCallback getOrLoadEntityData(final Consumer> consumer) { ++ if (this.isEntityChunkNBTLoaded()) { ++ throw new IllegalStateException("Cannot load entity data, it is already loaded"); ++ } ++ // why not just acquire the lock? because the caller NEEDS to call isEntityChunkNBTLoaded before this! ++ if (!this.scheduler.schedulingLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Must hold scheduling lock"); ++ } ++ ++ final GenericDataLoadTaskCallback ret = new EntityDataLoadTaskCallback((Consumer)consumer, this); ++ ++ if (this.entityDataLoadTask == null) { ++ this.entityDataLoadTask = new ChunkLoadTask.EntityDataLoadTask( ++ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority() ++ ); ++ this.entityDataLoadTask.addCallback(this::completeEntityLoad); ++ this.entityDataLoadTaskWaiters = new ArrayList<>(); ++ } ++ this.entityDataLoadTaskWaiters.add(ret); ++ if (this.entityDataLoadTask.schedule(true)) { ++ ret.schedule = this.entityDataLoadTask; ++ } ++ this.checkUnload(); ++ ++ return ret; ++ } ++ ++ private static final class EntityDataLoadTaskCallback extends GenericDataLoadTaskCallback { ++ ++ public EntityDataLoadTaskCallback(final Consumer> consumer, final NewChunkHolder chunkHolder) { ++ super(consumer, chunkHolder); ++ } ++ ++ @Override ++ void internalCancel() { ++ this.chunkHolder.entityDataLoadTaskWaiters.remove(this); ++ this.chunkHolder.entityDataLoadTask.cancel(); ++ } ++ } ++ ++ private PoiChunk poiChunk; ++ ++ private ChunkLoadTask.PoiDataLoadTask poiDataLoadTask; ++ // note: if entityDataLoadTask is cancelled, but on its completion entityDataLoadTaskWaiters.size() != 0, ++ // then the task is rescheduled ++ private List poiDataLoadTaskWaiters; ++ ++ public ChunkLoadTask.PoiDataLoadTask getPoiDataLoadTask() { ++ return this.poiDataLoadTask; ++ } ++ ++ // must hold schedule lock for the two below functions ++ ++ public boolean isPoiChunkLoaded() { ++ return this.poiChunk != null; ++ } ++ ++ private void completePoiLoad(final GenericDataLoadTask.TaskResult result) { ++ final List completeWaiters; ++ ChunkLoadTask.PoiDataLoadTask poiDataLoadTask = null; ++ boolean schedulePoiTask = false; ++ this.scheduler.schedulingLock.lock(); ++ try { ++ final List waiters = this.poiDataLoadTaskWaiters; ++ this.poiDataLoadTask = null; ++ if (result != null) { ++ this.poiDataLoadTaskWaiters = null; ++ this.poiChunk = result.left(); ++ if (result.right() != null) { ++ LOGGER.error("Unhandled poi load exception, poi data will be lost: ", result.right()); ++ } ++ ++ completeWaiters = waiters; ++ } else { ++ // cancelled ++ completeWaiters = null; ++ ++ // need to re-schedule? ++ if (waiters.isEmpty()) { ++ this.poiDataLoadTaskWaiters = null; ++ // no tasks to schedule _for_ ++ } else { ++ poiDataLoadTask = this.poiDataLoadTask = new ChunkLoadTask.PoiDataLoadTask( ++ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority() ++ ); ++ poiDataLoadTask.addCallback(this::completePoiLoad); ++ // need one schedule() per waiter ++ for (final GenericDataLoadTaskCallback callback : waiters) { ++ schedulePoiTask |= poiDataLoadTask.schedule(true); ++ } ++ } ++ } ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ ++ if (schedulePoiTask) { ++ poiDataLoadTask.scheduleNow(); ++ } ++ ++ // avoid holding the scheduling lock while completing ++ if (completeWaiters != null) { ++ for (final GenericDataLoadTaskCallback callback : completeWaiters) { ++ callback.accept(result); ++ } ++ } ++ this.scheduler.schedulingLock.lock(); ++ try { ++ this.checkUnload(); ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ } ++ ++ // note: it is guaranteed that the consumer cannot be called for the entirety that the schedule lock is held ++ // however, when the consumer is invoked, it will hold the schedule lock ++ public GenericDataLoadTaskCallback getOrLoadPoiData(final Consumer> consumer) { ++ if (this.isPoiChunkLoaded()) { ++ throw new IllegalStateException("Cannot load poi data, it is already loaded"); ++ } ++ // why not just acquire the lock? because the caller NEEDS to call isPoiChunkLoaded before this! ++ if (!this.scheduler.schedulingLock.isHeldByCurrentThread()) { ++ throw new IllegalStateException("Must hold scheduling lock"); ++ } ++ ++ final GenericDataLoadTaskCallback ret = new PoiDataLoadTaskCallback((Consumer)consumer, this); ++ ++ if (this.poiDataLoadTask == null) { ++ this.poiDataLoadTask = new ChunkLoadTask.PoiDataLoadTask( ++ this.scheduler, this.world, this.chunkX, this.chunkZ, this.getEffectivePriority() ++ ); ++ this.poiDataLoadTask.addCallback(this::completePoiLoad); ++ this.poiDataLoadTaskWaiters = new ArrayList<>(); ++ } ++ this.poiDataLoadTaskWaiters.add(ret); ++ if (this.poiDataLoadTask.schedule(true)) { ++ ret.schedule = this.poiDataLoadTask; ++ } ++ this.checkUnload(); ++ ++ return ret; ++ } ++ ++ private static final class PoiDataLoadTaskCallback extends GenericDataLoadTaskCallback { ++ ++ public PoiDataLoadTaskCallback(final Consumer> consumer, final NewChunkHolder chunkHolder) { ++ super(consumer, chunkHolder); ++ } ++ ++ @Override ++ void internalCancel() { ++ this.chunkHolder.poiDataLoadTaskWaiters.remove(this); ++ this.chunkHolder.poiDataLoadTask.cancel(); ++ } ++ } ++ ++ public static abstract class GenericDataLoadTaskCallback implements Cancellable, Consumer> { ++ ++ protected final Consumer> consumer; ++ protected final NewChunkHolder chunkHolder; ++ protected boolean completed; ++ protected GenericDataLoadTask schedule; ++ protected final AtomicBoolean scheduled = new AtomicBoolean(); ++ ++ public GenericDataLoadTaskCallback(final Consumer> consumer, ++ final NewChunkHolder chunkHolder) { ++ this.consumer = consumer; ++ this.chunkHolder = chunkHolder; ++ } ++ ++ public void schedule() { ++ if (this.scheduled.getAndSet(true)) { ++ throw new IllegalStateException("Double calling schedule()"); ++ } ++ if (this.schedule != null) { ++ this.schedule.scheduleNow(); ++ this.schedule = null; ++ } ++ } ++ ++ boolean isCompleted() { ++ return this.completed; ++ } ++ ++ // must hold scheduling lock ++ private boolean setCompleted() { ++ if (this.completed) { ++ return false; ++ } ++ return this.completed = true; ++ } ++ ++ @Override ++ public void accept(final GenericDataLoadTask.TaskResult result) { ++ if (result != null) { ++ if (this.setCompleted()) { ++ this.consumer.accept(result); ++ } else { ++ throw new IllegalStateException("Cannot be cancelled at this point"); ++ } ++ } else { ++ throw new NullPointerException("Result cannot be null (cancelled)"); ++ } ++ } ++ ++ // holds scheduling lock ++ abstract void internalCancel(); ++ ++ @Override ++ public boolean cancel() { ++ this.chunkHolder.scheduler.schedulingLock.lock(); ++ try { ++ if (!this.completed) { ++ this.completed = true; ++ this.internalCancel(); ++ return true; ++ } ++ return false; ++ } finally { ++ this.chunkHolder.scheduler.schedulingLock.unlock(); ++ } ++ } ++ } ++ ++ private ChunkAccess currentChunk; ++ ++ // generation status state ++ ++ /** ++ * Current status the chunk has been brought up to by the chunk system. null indicates no work at all ++ */ ++ private ChunkStatus currentGenStatus; ++ ++ // This allows unsynchronised access to the chunk and last gen status ++ private volatile ChunkCompletion lastChunkCompletion; ++ ++ public ChunkCompletion getLastChunkCompletion() { ++ return this.lastChunkCompletion; ++ } ++ ++ public static final record ChunkCompletion(ChunkAccess chunk, ChunkStatus genStatus) {}; ++ ++ /** ++ * The target final chunk status the chunk system will bring the chunk to. ++ */ ++ private ChunkStatus requestedGenStatus; ++ ++ private ChunkProgressionTask generationTask; ++ private ChunkStatus generationTaskStatus; ++ ++ /** ++ * contains the neighbours that this chunk generation is blocking on ++ */ ++ protected final ReferenceLinkedOpenHashSet neighboursBlockingGenTask = new ReferenceLinkedOpenHashSet<>(4); ++ ++ /** ++ * map of ChunkHolder -> Required Status for this chunk ++ */ ++ protected final Reference2ObjectLinkedOpenHashMap neighboursWaitingForUs = new Reference2ObjectLinkedOpenHashMap<>(); ++ ++ public void addGenerationBlockingNeighbour(final NewChunkHolder neighbour) { ++ this.neighboursBlockingGenTask.add(neighbour); ++ } ++ ++ public void addWaitingNeighbour(final NewChunkHolder neighbour, final ChunkStatus requiredStatus) { ++ final boolean wasEmpty = this.neighboursWaitingForUs.isEmpty(); ++ this.neighboursWaitingForUs.put(neighbour, requiredStatus); ++ if (wasEmpty) { ++ this.checkUnload(); ++ } ++ } ++ ++ // priority state ++ ++ // the target priority for this chunk to generate at ++ // TODO this will screw over scheduling at lower priorities to neighbours, fix ++ private PrioritisedExecutor.Priority priority = PrioritisedExecutor.Priority.NORMAL; ++ private boolean priorityLocked; ++ ++ // the priority neighbouring chunks have requested this chunk generate at ++ private PrioritisedExecutor.Priority neighbourRequestedPriority = PrioritisedExecutor.Priority.IDLE; ++ ++ public PrioritisedExecutor.Priority getEffectivePriority() { ++ return PrioritisedExecutor.Priority.max(this.priority, this.neighbourRequestedPriority); ++ } ++ ++ protected void recalculateNeighbourRequestedPriority() { ++ if (this.neighboursWaitingForUs.isEmpty()) { ++ this.neighbourRequestedPriority = PrioritisedExecutor.Priority.IDLE; ++ return; ++ } ++ ++ PrioritisedExecutor.Priority max = PrioritisedExecutor.Priority.IDLE; ++ ++ for (final NewChunkHolder holder : this.neighboursWaitingForUs.keySet()) { ++ final PrioritisedExecutor.Priority neighbourPriority = holder.getEffectivePriority(); ++ if (neighbourPriority.isHigherPriority(max)) { ++ max = neighbourPriority; ++ } ++ } ++ ++ final PrioritisedExecutor.Priority current = this.getEffectivePriority(); ++ this.neighbourRequestedPriority = max; ++ final PrioritisedExecutor.Priority next = this.getEffectivePriority(); ++ ++ if (current == next) { ++ return; ++ } ++ ++ // our effective priority has changed, so change our task ++ if (this.generationTask != null) { ++ this.generationTask.setPriority(next); ++ } ++ ++ // now propagate this to our neighbours ++ this.recalculateNeighbourPriorities(); ++ } ++ ++ public void recalculateNeighbourPriorities() { ++ for (final NewChunkHolder holder : this.neighboursBlockingGenTask) { ++ holder.recalculateNeighbourRequestedPriority(); ++ } ++ } ++ ++ // must hold scheduling lock ++ public void raisePriority(final PrioritisedExecutor.Priority priority) { ++ if (this.priority != null && this.priority.isHigherOrEqualPriority(priority)) { ++ return; ++ } ++ this.setPriority(priority); ++ } ++ ++ private void lockPriority() { ++ this.priority = PrioritisedExecutor.Priority.NORMAL; ++ this.priorityLocked = true; ++ } ++ ++ // must hold scheduling lock ++ public void setPriority(final PrioritisedExecutor.Priority priority) { ++ if (this.priorityLocked) { ++ return; ++ } ++ final PrioritisedExecutor.Priority old = this.getEffectivePriority(); ++ this.priority = priority; ++ final PrioritisedExecutor.Priority newPriority = this.getEffectivePriority(); ++ ++ if (old != newPriority) { ++ if (this.generationTask != null) { ++ this.generationTask.setPriority(newPriority); ++ } ++ } ++ ++ this.recalculateNeighbourPriorities(); ++ } ++ ++ // must hold scheduling lock ++ public void lowerPriority(final PrioritisedExecutor.Priority priority) { ++ if (this.priority != null && this.priority.isLowerOrEqualPriority(priority)) { ++ return; ++ } ++ this.setPriority(priority); ++ } ++ ++ // error handling state ++ private ChunkStatus failedGenStatus; ++ private Throwable genTaskException; ++ private Thread genTaskFailedThread; ++ ++ private boolean failedLightUpdate; ++ ++ public void failedLightUpdate() { ++ this.failedLightUpdate = true; ++ } ++ ++ public boolean hasFailedGeneration() { ++ return this.genTaskException != null; ++ } ++ ++ // ticket level state ++ private int oldTicketLevel = ChunkMap.MAX_CHUNK_DISTANCE + 1; ++ private int currentTicketLevel = ChunkMap.MAX_CHUNK_DISTANCE + 1; ++ ++ public int getTicketLevel() { ++ return this.currentTicketLevel; ++ } ++ ++ public final ChunkHolder vanillaChunkHolder; ++ ++ public NewChunkHolder(final ServerLevel world, final int chunkX, final int chunkZ, final ChunkTaskScheduler scheduler) { ++ this.world = world; ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ this.scheduler = scheduler; ++ this.vanillaChunkHolder = new ChunkHolder(new ChunkPos(chunkX, chunkZ), world, world.getLightEngine(), world.chunkSource.chunkMap, this); ++ } ++ ++ protected ImposterProtoChunk wrappedChunkForNeighbour; ++ ++ // holds scheduling lock ++ public ChunkAccess getChunkForNeighbourAccess() { ++ // Vanilla overrides the status futures with an imposter chunk to prevent writes to full chunks ++ // But we don't store per-status futures, so we need this hack ++ if (this.wrappedChunkForNeighbour != null) { ++ return this.wrappedChunkForNeighbour; ++ } ++ final ChunkAccess ret = this.currentChunk; ++ return ret instanceof LevelChunk fullChunk ? this.wrappedChunkForNeighbour = new ImposterProtoChunk(fullChunk, false) : ret; ++ } ++ ++ public ChunkAccess getCurrentChunk() { ++ return this.currentChunk; ++ } ++ ++ int getCurrentTicketLevel() { ++ return this.currentTicketLevel; ++ } ++ ++ void updateTicketLevel(final int toLevel) { ++ this.currentTicketLevel = toLevel; ++ } ++ ++ private int totalNeighboursUsingThisChunk = 0; ++ ++ // holds schedule lock ++ public void addNeighbourUsingChunk() { ++ final int now = ++this.totalNeighboursUsingThisChunk; ++ ++ if (now == 1) { ++ this.checkUnload(); ++ } ++ } ++ ++ // holds schedule lock ++ public void removeNeighbourUsingChunk() { ++ final int now = --this.totalNeighboursUsingThisChunk; ++ ++ if (now == 0) { ++ this.checkUnload(); ++ } ++ ++ if (now < 0) { ++ throw new IllegalStateException("Neighbours using this chunk cannot be negative"); ++ } ++ } ++ ++ // must hold scheduling lock ++ // returns string reason for why chunk should remain loaded, null otherwise ++ public final String isSafeToUnload() { ++ // is ticket level below threshold? ++ if (this.oldTicketLevel <= ChunkHolderManager.MAX_TICKET_LEVEL) { ++ return "ticket_level"; ++ } ++ ++ // are we being used by another chunk for generation? ++ if (this.totalNeighboursUsingThisChunk != 0) { ++ return "neighbours_generating"; ++ } ++ ++ // are we going to be used by another chunk for generation? ++ if (!this.neighboursWaitingForUs.isEmpty()) { ++ return "neighbours_waiting"; ++ } ++ ++ // chunk must be marked inaccessible (i.e unloaded to plugins) ++ if (this.getChunkStatus() != ChunkHolder.FullChunkStatus.INACCESSIBLE) { ++ return "fullchunkstatus"; ++ } ++ ++ // are we currently generating anything, or have requested generation? ++ if (this.generationTask != null) { ++ return "generating"; ++ } ++ if (this.requestedGenStatus != null) { ++ return "requested_generation"; ++ } ++ ++ // entity data requested? ++ if (this.entityDataLoadTask != null) { ++ return "entity_data_requested"; ++ } ++ ++ // poi data requested? ++ if (this.poiDataLoadTask != null) { ++ return "poi_data_requested"; ++ } ++ ++ // are we pending serialization? ++ if (this.entityDataUnload != null) { ++ return "entity_serialization"; ++ } ++ if (this.poiDataUnload != null) { ++ return "poi_serialization"; ++ } ++ if (this.chunkDataUnload != null) { ++ return "chunk_serialization"; ++ } ++ ++ // Note: light tasks do not need a check, as they add a ticket. ++ ++ // nothing is using this chunk, so it should be unloaded ++ return null; ++ } ++ ++ /** Unloaded from chunk map */ ++ boolean killed; ++ ++ // must hold scheduling lock ++ private void checkUnload() { ++ if (this.killed) { ++ return; ++ } ++ if (this.isSafeToUnload() == null) { ++ // ensure in unload queue ++ this.scheduler.chunkHolderManager.unloadQueue.add(this); ++ } else { ++ // ensure not in unload queue ++ this.scheduler.chunkHolderManager.unloadQueue.remove(this); ++ } ++ } ++ ++ static final record UnloadState(NewChunkHolder holder, ChunkAccess chunk, ChunkEntitySlices entityChunk, PoiChunk poiChunk) {}; ++ ++ // note: these are completed with null to indicate that no write occurred ++ // they are also completed with null to indicate a null write occurred ++ private UnloadTask chunkDataUnload; ++ private UnloadTask entityDataUnload; ++ private UnloadTask poiDataUnload; ++ ++ public static final record UnloadTask(Completable completable, DelayedPrioritisedTask task) {} ++ ++ public UnloadTask getUnloadTask(final RegionFileIOThread.RegionFileType type) { ++ switch (type) { ++ case CHUNK_DATA: ++ return this.chunkDataUnload; ++ case ENTITY_DATA: ++ return this.entityDataUnload; ++ case POI_DATA: ++ return this.poiDataUnload; ++ default: ++ throw new IllegalStateException("Unknown regionfile type " + type); ++ } ++ } ++ ++ private UnloadState unloadState; ++ ++ // holds schedule lock ++ UnloadState unloadStage1() { ++ // because we hold the scheduling lock, we cannot actually unload anything ++ // so we need to null this chunk's state ++ ChunkAccess chunk = this.currentChunk; ++ ChunkEntitySlices entityChunk = this.entityChunk; ++ PoiChunk poiChunk = this.poiChunk; ++ // chunk state ++ this.currentChunk = null; ++ this.currentGenStatus = null; ++ this.wrappedChunkForNeighbour = null; ++ this.lastChunkCompletion = null; ++ // entity chunk state ++ this.entityChunk = null; ++ this.pendingEntityChunk = null; ++ ++ // poi chunk state ++ this.poiChunk = null; ++ ++ // priority state ++ this.priorityLocked = false; ++ ++ if (chunk != null) { ++ this.chunkDataUnload = new UnloadTask(new Completable<>(), new DelayedPrioritisedTask(PrioritisedExecutor.Priority.NORMAL)); ++ } ++ if (poiChunk != null) { ++ this.poiDataUnload = new UnloadTask(new Completable<>(), null); ++ } ++ if (entityChunk != null) { ++ this.entityDataUnload = new UnloadTask(new Completable<>(), null); ++ } ++ ++ return this.unloadState = (chunk != null || entityChunk != null || poiChunk != null) ? new UnloadState(this, chunk, entityChunk, poiChunk) : null; ++ } ++ ++ // data is null if failed or does not need to be saved ++ void completeAsyncChunkDataSave(final CompoundTag data) { ++ if (data != null) { ++ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, data, RegionFileIOThread.RegionFileType.CHUNK_DATA); ++ } ++ this.chunkDataUnload.completable().complete(data); ++ this.scheduler.schedulingLock.lock(); ++ try { ++ // can only write to these fields while holding the schedule lock ++ this.chunkDataUnload = null; ++ this.checkUnload(); ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ } ++ ++ void unloadStage2(final UnloadState state) { ++ this.unloadState = null; ++ final ChunkAccess chunk = state.chunk(); ++ final ChunkEntitySlices entityChunk = state.entityChunk(); ++ final PoiChunk poiChunk = state.poiChunk(); ++ ++ final boolean shouldLevelChunkNotSave = (chunk instanceof LevelChunk levelChunk && levelChunk.mustNotSave); ++ ++ // unload chunk data ++ if (chunk != null) { ++ if (chunk instanceof LevelChunk levelChunk) { ++ levelChunk.setLoaded(false); ++ } ++ ++ if (!shouldLevelChunkNotSave) { ++ this.saveChunk(chunk, true); ++ } else { ++ this.completeAsyncChunkDataSave(null); ++ } ++ ++ if (chunk instanceof LevelChunk levelChunk) { ++ this.world.unload(levelChunk); ++ } ++ } ++ ++ // unload entity data ++ if (entityChunk != null) { ++ this.saveEntities(entityChunk, true); ++ // yes this is a hack to pass the compound tag through... ++ final CompoundTag lastEntityUnload = this.lastEntityUnload; ++ this.lastEntityUnload = null; ++ ++ if (entityChunk.unload()) { ++ this.scheduler.schedulingLock.lock(); ++ try { ++ entityChunk.setTransient(true); ++ this.entityChunk = entityChunk; ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ } else { ++ this.world.getEntityLookup().entitySectionUnload(this.chunkX, this.chunkZ); ++ } ++ // we need to delay the callback until after determining transience, otherwise a potential loader could ++ // set entityChunk before we do ++ this.entityDataUnload.completable().complete(lastEntityUnload); ++ } ++ ++ // unload poi data ++ if (poiChunk != null) { ++ if (poiChunk.isDirty() && !shouldLevelChunkNotSave) { ++ this.savePOI(poiChunk, true); ++ } else { ++ this.poiDataUnload.completable().complete(null); ++ } ++ ++ if (poiChunk.isLoaded()) { ++ this.world.getPoiManager().onUnload(CoordinateUtils.getChunkKey(this.chunkX, this.chunkZ)); ++ } ++ } ++ } ++ ++ boolean unloadStage3() { ++ // can only write to these while holding the schedule lock, and we instantly complete them in stage2 ++ this.poiDataUnload = null; ++ this.entityDataUnload = null; ++ ++ // we need to check if anything has been loaded in the meantime (or if we have transient entities) ++ if (this.entityChunk != null || this.poiChunk != null || this.currentChunk != null) { ++ return false; ++ } ++ ++ return this.isSafeToUnload() == null; ++ } ++ ++ private void cancelGenTask() { ++ if (this.generationTask != null) { ++ this.generationTask.cancel(); ++ } else { ++ // otherwise, we are blocking on neighbours, so remove them ++ if (!this.neighboursBlockingGenTask.isEmpty()) { ++ for (final NewChunkHolder neighbour : this.neighboursBlockingGenTask) { ++ if (neighbour.neighboursWaitingForUs.remove(this) == null) { ++ throw new IllegalStateException("Corrupt state"); ++ } ++ if (neighbour.neighboursWaitingForUs.isEmpty()) { ++ neighbour.checkUnload(); ++ } ++ } ++ this.neighboursBlockingGenTask.clear(); ++ this.checkUnload(); ++ } ++ } ++ } ++ ++ // holds: ticket level update lock ++ // holds: schedule lock ++ public void processTicketLevelUpdate(final List scheduledTasks, final List changedLoadStatus) { ++ final int oldLevel = this.oldTicketLevel; ++ final int newLevel = this.currentTicketLevel; ++ ++ if (oldLevel == newLevel) { ++ return; ++ } ++ ++ this.oldTicketLevel = newLevel; ++ ++ final ChunkHolder.FullChunkStatus oldState = ChunkHolder.getFullChunkStatus(oldLevel); ++ final ChunkHolder.FullChunkStatus newState = ChunkHolder.getFullChunkStatus(newLevel); ++ final boolean oldUnloaded = oldLevel > ChunkHolderManager.MAX_TICKET_LEVEL; ++ final boolean newUnloaded = newLevel > ChunkHolderManager.MAX_TICKET_LEVEL; ++ ++ final ChunkStatus maxGenerationStatusOld = ChunkHolder.getStatus(oldLevel); ++ final ChunkStatus maxGenerationStatusNew = ChunkHolder.getStatus(newLevel); ++ ++ // check for cancellations from downgrading ticket level ++ if (this.requestedGenStatus != null && !newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && newLevel > oldLevel) { ++ // note: cancel() may invoke onChunkGenComplete synchronously here ++ if (newUnloaded) { ++ // need to cancel all tasks ++ // note: requested status must be set to null here before cancellation, to indicate to the ++ // completion logic that we do not want rescheduling to occur ++ this.requestedGenStatus = null; ++ this.cancelGenTask(); ++ } else { ++ final ChunkStatus toCancel = maxGenerationStatusNew.getNextStatus(); ++ final ChunkStatus currentRequestedStatus = this.requestedGenStatus; ++ ++ if (currentRequestedStatus.isOrAfter(toCancel)) { ++ // we do have to cancel something here ++ // clamp requested status to the maximum ++ if (this.currentGenStatus != null && this.currentGenStatus.isOrAfter(maxGenerationStatusNew)) { ++ // already generated to status, so we must cancel ++ this.requestedGenStatus = null; ++ this.cancelGenTask(); ++ } else { ++ // not generated to status, so we may have to cancel ++ // note: gen task is always 1 status above current gen status if not null ++ this.requestedGenStatus = maxGenerationStatusNew; ++ if (this.generationTaskStatus != null && this.generationTaskStatus.isOrAfter(toCancel)) { ++ // TOOD is this even possible? i don't think so ++ throw new IllegalStateException("?????"); ++ } ++ } ++ } ++ } ++ } ++ ++ if (newState != oldState) { ++ if (newState.isOrAfter(oldState)) { ++ // status upgrade ++ if (!oldState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ // may need to schedule full load ++ if (this.currentGenStatus != ChunkStatus.FULL) { ++ if (this.requestedGenStatus != null) { ++ this.requestedGenStatus = ChunkStatus.FULL; ++ } else { ++ this.scheduler.schedule( ++ this.chunkX, this.chunkZ, ChunkStatus.FULL, this, scheduledTasks ++ ); ++ } ++ } else { ++ // now we are fully loaded ++ this.queueBorderFullStatus(true, changedLoadStatus); ++ } ++ } ++ } else { ++ // status downgrade ++ if (!newState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING) && oldState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING)) { ++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.ENTITY_TICKING, null); ++ } ++ ++ if (!newState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING) && oldState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING)) { ++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.TICKING, null); ++ } ++ ++ if (!newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && oldState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.BORDER, null); ++ } ++ } ++ } ++ ++ if (oldState != newState) { ++ if (this.onTicketUpdate(oldState, newState)) { ++ changedLoadStatus.add(this); ++ } ++ } ++ ++ if (oldUnloaded != newUnloaded) { ++ this.checkUnload(); ++ } ++ } ++ ++ /* ++ For full chunks, vanilla just loads chunks around it up to FEATURES, 1 radius ++ ++ For ticking chunks, it updates the persistent entity manager (soon to be completely nuked by EntitySliceManager, which ++ will also need to be updated but with far less implications) ++ It also shoves the scheduled block ticks into the tick scheduler ++ ++ For entity ticking chunks, updates the entity manager (see above) ++ */ ++ ++ static final int NEIGHBOUR_RADIUS = 2; ++ private long fullNeighbourChunksLoadedBitset; ++ ++ private static int getFullNeighbourIndex(final int relativeX, final int relativeZ) { ++ // index = (relativeX + NEIGHBOUR_CACHE_RADIUS) + (relativeZ + NEIGHBOUR_CACHE_RADIUS) * (NEIGHBOUR_CACHE_RADIUS * 2 + 1) ++ // optimised variant of the above by moving some of the ops to compile time ++ return relativeX + (relativeZ * (NEIGHBOUR_RADIUS * 2 + 1)) + (NEIGHBOUR_RADIUS + NEIGHBOUR_RADIUS * ((NEIGHBOUR_RADIUS * 2 + 1))); ++ } ++ public final boolean isNeighbourFullLoaded(final int relativeX, final int relativeZ) { ++ return (this.fullNeighbourChunksLoadedBitset & (1L << getFullNeighbourIndex(relativeX, relativeZ))) != 0; ++ } ++ ++ // returns true if this chunk changed full status ++ public final boolean setNeighbourFullLoaded(final int relativeX, final int relativeZ) { ++ final long before = this.fullNeighbourChunksLoadedBitset; ++ final int index = getFullNeighbourIndex(relativeX, relativeZ); ++ this.fullNeighbourChunksLoadedBitset |= (1L << index); ++ return this.onNeighbourChange(before, this.fullNeighbourChunksLoadedBitset); ++ } ++ ++ // returns true if this chunk changed full status ++ public final boolean setNeighbourFullUnloaded(final int relativeX, final int relativeZ) { ++ final long before = this.fullNeighbourChunksLoadedBitset; ++ final int index = getFullNeighbourIndex(relativeX, relativeZ); ++ this.fullNeighbourChunksLoadedBitset &= ~(1L << index); ++ return this.onNeighbourChange(before, this.fullNeighbourChunksLoadedBitset); ++ } ++ ++ public static boolean areNeighboursFullLoaded(final long bitset, final int radius) { ++ // index = relativeX + (relativeZ * (NEIGHBOUR_CACHE_RADIUS * 2 + 1)) + (NEIGHBOUR_CACHE_RADIUS + NEIGHBOUR_CACHE_RADIUS * ((NEIGHBOUR_CACHE_RADIUS * 2 + 1))) ++ switch (radius) { ++ case 0: { ++ return (bitset & (1L << getFullNeighbourIndex(0, 0))) != 0L; ++ } ++ case 1: { ++ long mask = 0L; ++ for (int dx = -1; dx <= 1; ++dx) { ++ for (int dz = -1; dz <= 1; ++dz) { ++ mask |= (1L << getFullNeighbourIndex(dx, dz)); ++ } ++ } ++ return (bitset & mask) == mask; ++ } ++ case 2: { ++ long mask = 0L; ++ for (int dx = -2; dx <= 2; ++dx) { ++ for (int dz = -2; dz <= 2; ++dz) { ++ mask |= (1L << getFullNeighbourIndex(dx, dz)); ++ } ++ } ++ return (bitset & mask) == mask; ++ } ++ ++ default: { ++ throw new IllegalArgumentException("Radius not recognized: " + radius); ++ } ++ } ++ } ++ ++ // upper 16 bits are pending status, lower 16 bits are current status ++ private volatile long chunkStatus; ++ private static final long PENDING_STATUS_MASK = Long.MIN_VALUE >> 31; ++ private static final ChunkHolder.FullChunkStatus[] CHUNK_STATUS_BY_ID = ChunkHolder.FullChunkStatus.values(); ++ private static final VarHandle CHUNK_STATUS_HANDLE = ConcurrentUtil.getVarHandle(NewChunkHolder.class, "chunkStatus", long.class); ++ ++ public static ChunkHolder.FullChunkStatus getCurrentChunkStatus(final long encoded) { ++ return CHUNK_STATUS_BY_ID[(int)encoded]; ++ } ++ ++ public static ChunkHolder.FullChunkStatus getPendingChunkStatus(final long encoded) { ++ return CHUNK_STATUS_BY_ID[(int)(encoded >>> 32)]; ++ } ++ ++ public ChunkHolder.FullChunkStatus getChunkStatus() { ++ return getCurrentChunkStatus(((long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this))); ++ } ++ ++ public boolean isEntityTickingReady() { ++ return this.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING); ++ } ++ ++ public boolean isTickingReady() { ++ return this.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.TICKING); ++ } ++ ++ public boolean isFullChunkReady() { ++ return this.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER); ++ } ++ ++ private static ChunkHolder.FullChunkStatus getStatusForBitset(final long bitset) { ++ if (areNeighboursFullLoaded(bitset, 2)) { ++ return ChunkHolder.FullChunkStatus.ENTITY_TICKING; ++ } else if (areNeighboursFullLoaded(bitset, 1)) { ++ return ChunkHolder.FullChunkStatus.TICKING; ++ } else if (areNeighboursFullLoaded(bitset, 0)) { ++ return ChunkHolder.FullChunkStatus.BORDER; ++ } else { ++ return ChunkHolder.FullChunkStatus.INACCESSIBLE; ++ } ++ } ++ ++ // note: only while updating ticket level, so holds ticket update lock + scheduling lock ++ protected final boolean onTicketUpdate(final ChunkHolder.FullChunkStatus oldState, final ChunkHolder.FullChunkStatus newState) { ++ if (oldState == newState) { ++ return false; ++ } ++ ++ // preserve border request after full status complete, as it does not set anything in the bitset ++ ChunkHolder.FullChunkStatus byNeighbours = getStatusForBitset(this.fullNeighbourChunksLoadedBitset); ++ if (byNeighbours == ChunkHolder.FullChunkStatus.INACCESSIBLE && newState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && this.currentGenStatus == ChunkStatus.FULL) { ++ byNeighbours = ChunkHolder.FullChunkStatus.BORDER; ++ } ++ ++ final ChunkHolder.FullChunkStatus toSet; ++ ++ if (newState.isOrAfter(byNeighbours)) { ++ // must clamp to neighbours level, even though we have the ticket level ++ toSet = byNeighbours; ++ } else { ++ // must clamp to ticket level, even though we have the neighbours ++ toSet = newState; ++ } ++ ++ long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this); ++ ++ if (curr == ((long)toSet.ordinal() | ((long)toSet.ordinal() << 32))) { ++ // nothing to do ++ return false; ++ } ++ ++ int failures = 0; ++ for (;;) { ++ final long update = (curr & ~PENDING_STATUS_MASK) | ((long)toSet.ordinal() << 32); ++ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) { ++ return true; ++ } ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ protected final boolean onNeighbourChange(final long bitsetBefore, final long bitsetAfter) { ++ ChunkHolder.FullChunkStatus oldState = getStatusForBitset(bitsetBefore); ++ ChunkHolder.FullChunkStatus newState = getStatusForBitset(bitsetAfter); ++ final ChunkHolder.FullChunkStatus currStateTicketLevel = ChunkHolder.getFullChunkStatus(this.oldTicketLevel); ++ if (oldState.isOrAfter(currStateTicketLevel)) { ++ oldState = currStateTicketLevel; ++ } ++ if (newState.isOrAfter(currStateTicketLevel)) { ++ newState = currStateTicketLevel; ++ } ++ // preserve border request after full status complete, as it does not set anything in the bitset ++ if (newState == ChunkHolder.FullChunkStatus.INACCESSIBLE && currStateTicketLevel.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && this.currentGenStatus == ChunkStatus.FULL) { ++ newState = ChunkHolder.FullChunkStatus.BORDER; ++ } ++ ++ if (oldState == newState) { ++ return false; ++ } ++ ++ int failures = 0; ++ for (long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);;) { ++ final long update = (curr & ~PENDING_STATUS_MASK) | ((long)newState.ordinal() << 32); ++ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) { ++ return true; ++ } ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ private boolean queueBorderFullStatus(final boolean loaded, final List changedFullStatus) { ++ final ChunkHolder.FullChunkStatus toStatus = loaded ? ChunkHolder.FullChunkStatus.BORDER : ChunkHolder.FullChunkStatus.INACCESSIBLE; ++ ++ int failures = 0; ++ for (long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);;) { ++ final ChunkHolder.FullChunkStatus currPending = getPendingChunkStatus(curr); ++ if (loaded && currPending != ChunkHolder.FullChunkStatus.INACCESSIBLE) { ++ throw new IllegalStateException("Expected " + ChunkHolder.FullChunkStatus.INACCESSIBLE + " for pending, but got " + currPending); ++ } ++ ++ final long update = (curr & ~PENDING_STATUS_MASK) | ((long)toStatus.ordinal() << 32); ++ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) { ++ if ((int)(update) != (int)(update >>> 32)) { ++ changedFullStatus.add(this); ++ return true; ++ } ++ return false; ++ } ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ // only call on main thread, must hold ticket level and scheduling lock ++ private void onFullChunkLoadChange(final boolean loaded, final List changedFullStatus) { ++ for (int dz = -NEIGHBOUR_RADIUS; dz <= NEIGHBOUR_RADIUS; ++dz) { ++ for (int dx = -NEIGHBOUR_RADIUS; dx <= NEIGHBOUR_RADIUS; ++dx) { ++ final NewChunkHolder holder = (dx | dz) == 0 ? this : this.scheduler.chunkHolderManager.getChunkHolder(dx + this.chunkX, dz + this.chunkZ); ++ if (loaded) { ++ if (holder.setNeighbourFullLoaded(-dx, -dz)) { ++ changedFullStatus.add(holder); ++ } ++ } else { ++ if (holder != null && holder.setNeighbourFullUnloaded(-dx, -dz)) { ++ changedFullStatus.add(holder); ++ } ++ } ++ } ++ } ++ } ++ ++ private ChunkHolder.FullChunkStatus updateCurrentState(final ChunkHolder.FullChunkStatus to) { ++ int failures = 0; ++ for (long curr = (long)CHUNK_STATUS_HANDLE.getVolatile((NewChunkHolder)this);;) { ++ final long update = (curr & PENDING_STATUS_MASK) | (long)to.ordinal(); ++ if (curr == (curr = (long)CHUNK_STATUS_HANDLE.compareAndExchange((NewChunkHolder)this, curr, update))) { ++ return getPendingChunkStatus(curr); ++ } ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ private void changeEntityChunkStatus(final ChunkHolder.FullChunkStatus toStatus) { ++ this.world.getEntityLookup().chunkStatusChange(this.chunkX, this.chunkZ, toStatus); ++ } ++ ++ private boolean processingFullStatus = false; ++ ++ // only to be called on the main thread, no locks need to be held ++ public boolean handleFullStatusChange(final List changedFullStatus) { ++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot update full status thread off-main"); ++ ++ boolean ret = false; ++ ++ if (this.processingFullStatus) { ++ // we cannot process updates recursively ++ return ret; ++ } ++ ++ // note: use opaque reads for chunk status read since we need it to be atomic ++ ++ // test if anything changed ++ final long statusCheck = (long)CHUNK_STATUS_HANDLE.getOpaque((NewChunkHolder)this); ++ if ((int)statusCheck == (int)(statusCheck >>> 32)) { ++ // nothing changed ++ return ret; ++ } ++ ++ final ChunkTaskScheduler scheduler = this.scheduler; ++ final ChunkHolderManager holderManager = scheduler.chunkHolderManager; ++ final int ticketKeep; ++ final Long ticketId; ++ holderManager.ticketLock.lock(); ++ try { ++ ticketKeep = this.currentTicketLevel; ++ ticketId = Long.valueOf(holderManager.getNextStatusUpgradeId()); ++ holderManager.addTicketAtLevel(TicketType.STATUS_UPGRADE, this.chunkX, this.chunkZ, ticketKeep, ticketId); ++ } finally { ++ holderManager.ticketLock.unlock(); ++ } ++ ++ this.processingFullStatus = true; ++ try { ++ for (;;) { ++ final long currStateEncoded = (long)CHUNK_STATUS_HANDLE.getOpaque((NewChunkHolder)this); ++ final ChunkHolder.FullChunkStatus currState = getCurrentChunkStatus(currStateEncoded); ++ ChunkHolder.FullChunkStatus nextState = getPendingChunkStatus(currStateEncoded); ++ if (currState == nextState) { ++ if (nextState == ChunkHolder.FullChunkStatus.INACCESSIBLE) { ++ this.scheduler.schedulingLock.lock(); ++ try { ++ this.checkUnload(); ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ } ++ break; ++ } ++ ++ // chunks cannot downgrade state while status is pending a change ++ final LevelChunk chunk = (LevelChunk)this.currentChunk; ++ ++ // Note: we assume that only load/unload contain plugin logic ++ // plugin logic is anything stupid enough to possibly change the chunk status while it is already ++ // being changed (i.e during load it is possible it will try to set to full ticking) ++ // in order to allow this change, we also need this plugin logic to be contained strictly after all ++ // of the chunk system load callbacks are invoked ++ if (nextState.isOrAfter(currState)) { ++ // state upgrade ++ if (!currState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && nextState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.BORDER); ++ holderManager.ensureInAutosave(this); ++ chunk.pushChunkIntoLoadedMap(); ++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.BORDER); ++ chunk.onChunkLoad(this); ++ this.onFullChunkLoadChange(true, changedFullStatus); ++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.BORDER, chunk); ++ } ++ ++ if (!currState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING) && nextState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING)) { ++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.TICKING); ++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.TICKING); ++ chunk.onChunkTicking(this); ++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.TICKING, chunk); ++ } ++ ++ if (!currState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING) && nextState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING)) { ++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.ENTITY_TICKING); ++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.ENTITY_TICKING); ++ chunk.onChunkEntityTicking(this); ++ this.completeFullStatusConsumers(ChunkHolder.FullChunkStatus.ENTITY_TICKING, chunk); ++ } ++ } else { ++ if (currState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING) && !nextState.isOrAfter(ChunkHolder.FullChunkStatus.ENTITY_TICKING)) { ++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.TICKING); ++ chunk.onChunkNotEntityTicking(this); ++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.TICKING); ++ } ++ ++ if (currState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING) && !nextState.isOrAfter(ChunkHolder.FullChunkStatus.TICKING)) { ++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.BORDER); ++ chunk.onChunkNotTicking(this); ++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.BORDER); ++ } ++ ++ if (currState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER) && !nextState.isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ this.onFullChunkLoadChange(false, changedFullStatus); ++ this.changeEntityChunkStatus(ChunkHolder.FullChunkStatus.INACCESSIBLE); ++ chunk.onChunkUnload(this); ++ nextState = this.updateCurrentState(ChunkHolder.FullChunkStatus.INACCESSIBLE); ++ } ++ } ++ ++ ret = true; ++ } ++ } finally { ++ this.processingFullStatus = false; ++ holderManager.removeTicketAtLevel(TicketType.STATUS_UPGRADE, this.chunkX, this.chunkZ, ticketKeep, ticketId); ++ } ++ ++ return ret; ++ } ++ ++ // note: must hold scheduling lock ++ // rets true if the current requested gen status is not null (effectively, whether further scheduling is not needed) ++ boolean upgradeGenTarget(final ChunkStatus toStatus) { ++ if (toStatus == null) { ++ throw new NullPointerException("toStatus cannot be null"); ++ } ++ if (this.requestedGenStatus == null && this.generationTask == null) { ++ return false; ++ } ++ if (this.requestedGenStatus == null || !this.requestedGenStatus.isOrAfter(toStatus)) { ++ this.requestedGenStatus = toStatus; ++ } ++ return true; ++ } ++ ++ public void setGenerationTarget(final ChunkStatus toStatus) { ++ this.requestedGenStatus = toStatus; ++ } ++ ++ public boolean hasGenerationTask() { ++ return this.generationTask != null; ++ } ++ ++ public ChunkStatus getCurrentGenStatus() { ++ return this.currentGenStatus; ++ } ++ ++ public ChunkStatus getRequestedGenStatus() { ++ return this.requestedGenStatus; ++ } ++ ++ private final Reference2ObjectOpenHashMap>> statusWaiters = new Reference2ObjectOpenHashMap<>(); ++ ++ void addStatusConsumer(final ChunkStatus status, final Consumer consumer) { ++ this.statusWaiters.computeIfAbsent(status, (final ChunkStatus keyInMap) -> { ++ return new ArrayList<>(4); ++ }).add(consumer); ++ } ++ ++ private void completeStatusConsumers(ChunkStatus status, final ChunkAccess chunk) { ++ // need to tell future statuses to complete if cancelled ++ do { ++ this.completeStatusConsumers0(status, chunk); ++ } while (chunk == null && status != (status = status.getNextStatus())); ++ } ++ ++ private void completeStatusConsumers0(final ChunkStatus status, final ChunkAccess chunk) { ++ final List> consumers; ++ consumers = this.statusWaiters.remove(status); ++ ++ if (consumers == null) { ++ return; ++ } ++ ++ // must be scheduled to main, we do not trust the callback to not do anything stupid ++ this.scheduler.scheduleChunkTask(this.chunkX, this.chunkZ, () -> { ++ for (final Consumer consumer : consumers) { ++ try { ++ consumer.accept(chunk); ++ } catch (final ThreadDeath thr) { ++ throw thr; ++ } catch (final Throwable thr) { ++ LOGGER.error("Failed to process chunk status callback", thr); ++ } ++ } ++ }, PrioritisedExecutor.Priority.HIGHEST); ++ } ++ ++ private final Reference2ObjectOpenHashMap>> fullStatusWaiters = new Reference2ObjectOpenHashMap<>(); ++ ++ void addFullStatusConsumer(final ChunkHolder.FullChunkStatus status, final Consumer consumer) { ++ this.fullStatusWaiters.computeIfAbsent(status, (final ChunkHolder.FullChunkStatus keyInMap) -> { ++ return new ArrayList<>(4); ++ }).add(consumer); ++ } ++ ++ private void completeFullStatusConsumers(ChunkHolder.FullChunkStatus status, final LevelChunk chunk) { ++ // need to tell future statuses to complete if cancelled ++ final ChunkHolder.FullChunkStatus max = CHUNK_STATUS_BY_ID[CHUNK_STATUS_BY_ID.length - 1]; ++ ++ for (;;) { ++ this.completeFullStatusConsumers0(status, chunk); ++ if (chunk != null || status == max) { ++ break; ++ } ++ status = CHUNK_STATUS_BY_ID[status.ordinal() + 1]; ++ } ++ } ++ ++ private void completeFullStatusConsumers0(final ChunkHolder.FullChunkStatus status, final LevelChunk chunk) { ++ final List> consumers; ++ consumers = this.fullStatusWaiters.remove(status); ++ ++ if (consumers == null) { ++ return; ++ } ++ ++ // must be scheduled to main, we do not trust the callback to not do anything stupid ++ this.scheduler.scheduleChunkTask(this.chunkX, this.chunkZ, () -> { ++ for (final Consumer consumer : consumers) { ++ try { ++ consumer.accept(chunk); ++ } catch (final ThreadDeath thr) { ++ throw thr; ++ } catch (final Throwable thr) { ++ LOGGER.error("Failed to process chunk status callback", thr); ++ } ++ } ++ }, PrioritisedExecutor.Priority.HIGHEST); ++ } ++ ++ // note: must hold scheduling lock ++ private void onChunkGenComplete(final ChunkAccess newChunk, final ChunkStatus newStatus, ++ final List scheduleList, final List changedLoadStatus) { ++ if (!this.neighboursBlockingGenTask.isEmpty()) { ++ throw new IllegalStateException("Cannot have neighbours blocking this gen task"); ++ } ++ if (newChunk != null || (this.requestedGenStatus == null || !this.requestedGenStatus.isOrAfter(newStatus))) { ++ this.completeStatusConsumers(newStatus, newChunk); ++ } ++ // done now, clear state (must be done before scheduling new tasks) ++ this.generationTask = null; ++ this.generationTaskStatus = null; ++ if (newChunk == null) { ++ // task was cancelled ++ // should be careful as this could be called while holding the schedule lock and/or inside the ++ // ticket level update ++ // while a task may be cancelled, it is possible for it to be later re-scheduled ++ // however, because generationTask is only set to null on _completion_, the scheduler leaves ++ // the rescheduling logic to us here ++ final ChunkStatus requestedGenStatus = this.requestedGenStatus; ++ this.requestedGenStatus = null; ++ if (requestedGenStatus != null) { ++ // it looks like it has been requested, so we must reschedule ++ if (!this.neighboursWaitingForUs.isEmpty()) { ++ for (final Iterator> iterator = this.neighboursWaitingForUs.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) { ++ final Reference2ObjectMap.Entry entry = iterator.next(); ++ ++ final NewChunkHolder chunkHolder = entry.getKey(); ++ final ChunkStatus toStatus = entry.getValue(); ++ ++ if (!requestedGenStatus.isOrAfter(toStatus)) { ++ // if we were cancelled, we are responsible for removing the waiter ++ if (!chunkHolder.neighboursBlockingGenTask.remove(this)) { ++ throw new IllegalStateException("Corrupt state"); ++ } ++ if (chunkHolder.neighboursBlockingGenTask.isEmpty()) { ++ chunkHolder.checkUnload(); ++ } ++ iterator.remove(); ++ continue; ++ } ++ } ++ } ++ ++ // note: only after generationTask -> null, generationTaskStatus -> null, and requestedGenStatus -> null ++ this.scheduler.schedule( ++ this.chunkX, this.chunkZ, requestedGenStatus, this, scheduleList ++ ); ++ ++ // return, can't do anything further ++ return; ++ } ++ ++ if (!this.neighboursWaitingForUs.isEmpty()) { ++ for (final NewChunkHolder chunkHolder : this.neighboursWaitingForUs.keySet()) { ++ if (!chunkHolder.neighboursBlockingGenTask.remove(this)) { ++ throw new IllegalStateException("Corrupt state"); ++ } ++ if (chunkHolder.neighboursBlockingGenTask.isEmpty()) { ++ chunkHolder.checkUnload(); ++ } ++ } ++ this.neighboursWaitingForUs.clear(); ++ } ++ // reset priority, we have nothing left to generate to ++ this.setPriority(PrioritisedExecutor.Priority.NORMAL); ++ this.checkUnload(); ++ return; ++ } ++ ++ this.currentChunk = newChunk; ++ this.currentGenStatus = newStatus; ++ this.lastChunkCompletion = new ChunkCompletion(newChunk, newStatus); ++ ++ final ChunkStatus requestedGenStatus = this.requestedGenStatus; ++ ++ List needsScheduling = null; ++ boolean recalculatePriority = false; ++ for (final Iterator> iterator ++ = this.neighboursWaitingForUs.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) { ++ final Reference2ObjectMap.Entry entry = iterator.next(); ++ final NewChunkHolder neighbour = entry.getKey(); ++ final ChunkStatus requiredStatus = entry.getValue(); ++ ++ if (!newStatus.isOrAfter(requiredStatus)) { ++ if (requestedGenStatus == null || !requestedGenStatus.isOrAfter(requiredStatus)) { ++ // if we're cancelled, still need to clear this map ++ if (!neighbour.neighboursBlockingGenTask.remove(this)) { ++ throw new IllegalStateException("Neighbour is not waiting for us?"); ++ } ++ if (neighbour.neighboursBlockingGenTask.isEmpty()) { ++ neighbour.checkUnload(); ++ } ++ ++ iterator.remove(); ++ } ++ continue; ++ } ++ ++ // doesn't matter what isCancelled is here, we need to schedule if we can ++ ++ recalculatePriority = true; ++ if (!neighbour.neighboursBlockingGenTask.remove(this)) { ++ throw new IllegalStateException("Neighbour is not waiting for us?"); ++ } ++ ++ if (neighbour.neighboursBlockingGenTask.isEmpty()) { ++ if (neighbour.requestedGenStatus != null) { ++ if (needsScheduling == null) { ++ needsScheduling = new ArrayList<>(); ++ } ++ needsScheduling.add(neighbour); ++ } else { ++ neighbour.checkUnload(); ++ } ++ } ++ ++ // remove last; access to entry will throw if removed ++ iterator.remove(); ++ } ++ ++ if (newStatus == ChunkStatus.FULL) { ++ this.lockPriority(); ++ // must use oldTicketLevel, we hold the schedule lock but not the ticket level lock ++ // however, schedule lock needs to be held for ticket level callback, so we're fine here ++ if (ChunkHolder.getFullChunkStatus(this.oldTicketLevel).isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) { ++ this.queueBorderFullStatus(true, changedLoadStatus); ++ } ++ } ++ ++ if (recalculatePriority) { ++ this.recalculateNeighbourRequestedPriority(); ++ } ++ ++ if (requestedGenStatus != null && !newStatus.isOrAfter(requestedGenStatus)) { ++ this.scheduleNeighbours(needsScheduling, scheduleList); ++ ++ // we need to schedule more tasks now ++ this.scheduler.schedule( ++ this.chunkX, this.chunkZ, requestedGenStatus, this, scheduleList ++ ); ++ } else { ++ // we're done now ++ if (requestedGenStatus != null) { ++ this.requestedGenStatus = null; ++ } ++ // reached final stage, so stop scheduling now ++ this.setPriority(PrioritisedExecutor.Priority.NORMAL); ++ this.checkUnload(); ++ ++ this.scheduleNeighbours(needsScheduling, scheduleList); ++ } ++ } ++ ++ private void scheduleNeighbours(final List needsScheduling, final List scheduleList) { ++ if (needsScheduling != null) { ++ for (int i = 0, len = needsScheduling.size(); i < len; ++i) { ++ final NewChunkHolder neighbour = needsScheduling.get(i); ++ ++ this.scheduler.schedule( ++ neighbour.chunkX, neighbour.chunkZ, neighbour.requestedGenStatus, neighbour, scheduleList ++ ); ++ } ++ } ++ } ++ ++ public void setGenerationTask(final ChunkProgressionTask generationTask, final ChunkStatus taskStatus, ++ final List neighbours) { ++ if (this.generationTask != null || (this.currentGenStatus != null && this.currentGenStatus.isOrAfter(taskStatus))) { ++ throw new IllegalStateException("Currently generating or provided task is trying to generate to a level we are already at!"); ++ } ++ if (this.requestedGenStatus == null || !this.requestedGenStatus.isOrAfter(taskStatus)) { ++ throw new IllegalStateException("Cannot schedule generation task when not requested"); ++ } ++ this.generationTask = generationTask; ++ this.generationTaskStatus = taskStatus; ++ ++ for (int i = 0, len = neighbours.size(); i < len; ++i) { ++ neighbours.get(i).addNeighbourUsingChunk(); ++ } ++ ++ this.checkUnload(); ++ ++ generationTask.onComplete((final ChunkAccess access, final Throwable thr) -> { ++ if (generationTask != this.generationTask) { ++ throw new IllegalStateException( ++ "Cannot complete generation task '" + generationTask + "' because we are waiting on '" + this.generationTask + "' instead!" ++ ); ++ } ++ if (thr != null) { ++ if (this.genTaskException != null) { ++ // first one is probably the TRUE problem ++ return; ++ } ++ // don't set generation task to null, so that scheduling will not attempt to create another task and it ++ // will automatically block any further scheduling usage of this chunk as it will wait forever for a failed ++ // task to complete ++ this.genTaskException = thr; ++ this.failedGenStatus = taskStatus; ++ this.genTaskFailedThread = Thread.currentThread(); ++ ++ this.scheduler.unrecoverableChunkSystemFailure(this.chunkX, this.chunkZ, Map.of( ++ "Generation task", ChunkTaskScheduler.stringIfNull(generationTask), ++ "Task to status", ChunkTaskScheduler.stringIfNull(taskStatus) ++ ), thr); ++ return; ++ } ++ ++ final boolean scheduleTasks; ++ List tasks = ChunkHolderManager.getCurrentTicketUpdateScheduling(); ++ if (tasks == null) { ++ scheduleTasks = true; ++ tasks = new ArrayList<>(); ++ } else { ++ scheduleTasks = false; ++ // we are currently updating ticket levels, so we already hold the schedule lock ++ // this means we have to leave the ticket level update to handle the scheduling ++ } ++ final List changedLoadStatus = new ArrayList<>(); ++ this.scheduler.schedulingLock.lock(); ++ try { ++ for (int i = 0, len = neighbours.size(); i < len; ++i) { ++ neighbours.get(i).removeNeighbourUsingChunk(); ++ } ++ this.onChunkGenComplete(access, taskStatus, tasks, changedLoadStatus); ++ } finally { ++ this.scheduler.schedulingLock.unlock(); ++ } ++ this.scheduler.chunkHolderManager.addChangedStatuses(changedLoadStatus); ++ ++ if (scheduleTasks) { ++ // can't hold the lock while scheduling, so we have to build the tasks and then schedule after ++ for (int i = 0, len = tasks.size(); i < len; ++i) { ++ tasks.get(i).schedule(); ++ } ++ } ++ }); ++ } ++ ++ public PoiChunk getPoiChunk() { ++ return this.poiChunk; ++ } ++ ++ public ChunkEntitySlices getEntityChunk() { ++ return this.entityChunk; ++ } ++ ++ public long lastAutoSave; ++ ++ public static final record SaveStat(boolean savedChunk, boolean savedEntityChunk, boolean savedPoiChunk) {} ++ ++ public SaveStat save(final boolean shutdown, final boolean unloading) { ++ TickThread.ensureTickThread(this.world, this.chunkX, this.chunkZ, "Cannot save data off-main"); ++ ++ ChunkAccess chunk = this.getCurrentChunk(); ++ PoiChunk poi = this.getPoiChunk(); ++ ChunkEntitySlices entities = this.getEntityChunk(); ++ boolean executedUnloadTask = false; ++ ++ if (shutdown) { ++ // make sure that the async unloads complete ++ if (this.unloadState != null) { ++ // must have errored during unload ++ chunk = this.unloadState.chunk(); ++ poi = this.unloadState.poiChunk(); ++ entities = this.unloadState.entityChunk(); ++ } ++ final UnloadTask chunkUnloadTask = this.chunkDataUnload; ++ final DelayedPrioritisedTask chunkDataUnloadTask = chunkUnloadTask == null ? null : chunkUnloadTask.task(); ++ if (chunkDataUnloadTask != null) { ++ final PrioritisedExecutor.PrioritisedTask unloadTask = chunkDataUnloadTask.getTask(); ++ if (unloadTask != null) { ++ executedUnloadTask = unloadTask.execute(); ++ } ++ } ++ } ++ ++ boolean canSaveChunk = !(chunk instanceof LevelChunk levelChunk && levelChunk.mustNotSave) && ++ (chunk != null && ((shutdown || chunk instanceof LevelChunk) && chunk.isUnsaved())); ++ boolean canSavePOI = !(chunk instanceof LevelChunk levelChunk && levelChunk.mustNotSave) && (poi != null && poi.isDirty()); ++ boolean canSaveEntities = entities != null; ++ ++ try (co.aikar.timings.Timing ignored = this.world.timings.chunkSave.startTiming()) { // Paper ++ if (canSaveChunk) { ++ canSaveChunk = this.saveChunk(chunk, unloading); ++ } ++ if (canSavePOI) { ++ canSavePOI = this.savePOI(poi, unloading); ++ } ++ if (canSaveEntities) { ++ // on shutdown, we need to force transient entity chunks to save ++ canSaveEntities = this.saveEntities(entities, unloading || shutdown); ++ if (unloading || shutdown) { ++ this.lastEntityUnload = null; ++ } ++ } ++ } ++ ++ return executedUnloadTask | canSaveChunk | canSaveEntities | canSavePOI ? new SaveStat(executedUnloadTask || canSaveChunk, canSaveEntities, canSavePOI): null; ++ } ++ ++ static final class AsyncChunkSerializeTask implements Runnable { ++ ++ private final ServerLevel world; ++ private final ChunkAccess chunk; ++ private final ChunkSerializer.AsyncSaveData asyncSaveData; ++ private final NewChunkHolder toComplete; ++ ++ public AsyncChunkSerializeTask(final ServerLevel world, final ChunkAccess chunk, final ChunkSerializer.AsyncSaveData asyncSaveData, ++ final NewChunkHolder toComplete) { ++ this.world = world; ++ this.chunk = chunk; ++ this.asyncSaveData = asyncSaveData; ++ this.toComplete = toComplete; ++ } ++ ++ @Override ++ public void run() { ++ final CompoundTag toSerialize; ++ try { ++ toSerialize = ChunkSerializer.saveChunk(this.world, this.chunk, this.asyncSaveData); ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable throwable) { ++ LOGGER.error("Failed to asynchronously save chunk " + this.chunk.getPos() + " for world '" + this.world.getWorld().getName() + "', falling back to synchronous save", throwable); ++ this.world.chunkTaskScheduler.scheduleChunkTask(this.chunk.locX, this.chunk.locZ, () -> { ++ final CompoundTag synchronousSave; ++ try { ++ synchronousSave = ChunkSerializer.saveChunk(AsyncChunkSerializeTask.this.world, AsyncChunkSerializeTask.this.chunk, AsyncChunkSerializeTask.this.asyncSaveData); ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable throwable2) { ++ LOGGER.error("Failed to synchronously save chunk " + AsyncChunkSerializeTask.this.chunk.getPos() + " for world '" + AsyncChunkSerializeTask.this.world.getWorld().getName() + "', chunk data will be lost", throwable2); ++ AsyncChunkSerializeTask.this.toComplete.completeAsyncChunkDataSave(null); ++ return; ++ } ++ ++ AsyncChunkSerializeTask.this.toComplete.completeAsyncChunkDataSave(synchronousSave); ++ LOGGER.info("Successfully serialized chunk " + AsyncChunkSerializeTask.this.chunk.getPos() + " for world '" + AsyncChunkSerializeTask.this.world.getWorld().getName() + "' synchronously"); ++ ++ }, PrioritisedExecutor.Priority.HIGHEST); ++ return; ++ } ++ this.toComplete.completeAsyncChunkDataSave(toSerialize); ++ } ++ ++ @Override ++ public String toString() { ++ return "AsyncChunkSerializeTask{" + ++ "chunk={pos=" + this.chunk.getPos() + ",world=\"" + this.world.getWorld().getName() + "\"}" + ++ "}"; ++ } ++ } ++ ++ private boolean saveChunk(final ChunkAccess chunk, final boolean unloading) { ++ if (!chunk.isUnsaved()) { ++ if (unloading) { ++ this.completeAsyncChunkDataSave(null); ++ } ++ return false; ++ } ++ boolean completing = false; ++ try { ++ if (unloading) { ++ try { ++ final ChunkSerializer.AsyncSaveData asyncSaveData = ChunkSerializer.getAsyncSaveData(this.world, chunk); ++ ++ final PrioritisedExecutor.PrioritisedTask task = this.scheduler.loadExecutor.createTask(new AsyncChunkSerializeTask(this.world, chunk, asyncSaveData, this)); ++ ++ this.chunkDataUnload.task().setTask(task); ++ ++ task.queue(); ++ ++ chunk.setUnsaved(false); ++ ++ return true; ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr) { ++ LOGGER.error("Failed to prepare async chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "', falling back to synchronous save", thr); ++ // fall through to synchronous save ++ } ++ } ++ ++ final CompoundTag save = ChunkSerializer.saveChunk(this.world, chunk, null); ++ ++ if (unloading) { ++ completing = true; ++ this.completeAsyncChunkDataSave(save); ++ LOGGER.info("Successfully serialized chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "' synchronously"); ++ } else { ++ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, save, RegionFileIOThread.RegionFileType.CHUNK_DATA); ++ } ++ chunk.setUnsaved(false); ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr) { ++ LOGGER.error("Failed to save chunk data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'"); ++ if (unloading && !completing) { ++ this.completeAsyncChunkDataSave(null); ++ } ++ } ++ ++ return true; ++ } ++ ++ private boolean lastEntitySaveNull; ++ private CompoundTag lastEntityUnload; ++ private boolean saveEntities(final ChunkEntitySlices entities, final boolean unloading) { ++ try { ++ CompoundTag mergeFrom = null; ++ if (entities.isTransient()) { ++ if (!unloading) { ++ // if we're a transient chunk, we cannot save until unloading because otherwise a double save will ++ // result in double adding the entities ++ return false; ++ } ++ try { ++ mergeFrom = RegionFileIOThread.loadData(this.world, this.chunkX, this.chunkZ, RegionFileIOThread.RegionFileType.ENTITY_DATA, PrioritisedExecutor.Priority.BLOCKING); ++ } catch (final Exception ex) { ++ LOGGER.error("Cannot merge transient entities for chunk (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "', data on disk will be replaced", ex); ++ } ++ } ++ ++ final CompoundTag save = entities.save(); ++ if (mergeFrom != null) { ++ if (save == null) { ++ // don't override the data on disk with nothing ++ return false; ++ } else { ++ EntityStorage.copyEntities(mergeFrom, save); ++ } ++ } ++ if (save == null && this.lastEntitySaveNull) { ++ return false; ++ } ++ ++ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, save, RegionFileIOThread.RegionFileType.ENTITY_DATA); ++ this.lastEntitySaveNull = save == null; ++ if (unloading) { ++ this.lastEntityUnload = save; ++ } ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr) { ++ LOGGER.error("Failed to save entity data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'"); ++ } ++ ++ return true; ++ } ++ ++ private boolean lastPoiSaveNull; ++ private boolean savePOI(final PoiChunk poi, final boolean unloading) { ++ try { ++ final CompoundTag save = poi.save(); ++ poi.setDirty(false); ++ if (save == null && this.lastPoiSaveNull) { ++ if (unloading) { ++ this.poiDataUnload.completable().complete(null); ++ } ++ return false; ++ } ++ ++ RegionFileIOThread.scheduleSave(this.world, this.chunkX, this.chunkZ, save, RegionFileIOThread.RegionFileType.POI_DATA); ++ this.lastPoiSaveNull = save == null; ++ if (unloading) { ++ this.poiDataUnload.completable().complete(save); ++ } ++ } catch (final ThreadDeath death) { ++ throw death; ++ } catch (final Throwable thr) { ++ LOGGER.error("Failed to save poi data (" + this.chunkX + "," + this.chunkZ + ") in world '" + this.world.getWorld().getName() + "'"); ++ } ++ ++ return true; ++ } ++ ++ @Override ++ public String toString() { ++ final ChunkCompletion lastCompletion = this.lastChunkCompletion; ++ final ChunkEntitySlices entityChunk = this.entityChunk; ++ final long chunkStatus = this.chunkStatus; ++ final int fullChunkStatus = (int)chunkStatus; ++ final int pendingChunkStatus = (int)(chunkStatus >>> 32); ++ final ChunkHolder.FullChunkStatus currentFullStatus = fullChunkStatus < 0 || fullChunkStatus >= CHUNK_STATUS_BY_ID.length ? null : CHUNK_STATUS_BY_ID[fullChunkStatus]; ++ final ChunkHolder.FullChunkStatus pendingFullStatus = pendingChunkStatus < 0 || pendingChunkStatus >= CHUNK_STATUS_BY_ID.length ? null : CHUNK_STATUS_BY_ID[pendingChunkStatus]; ++ return "NewChunkHolder{" + ++ "world=" + this.world.getWorld().getName() + ++ ", chunkX=" + this.chunkX + ++ ", chunkZ=" + this.chunkZ + ++ ", entityChunkFromDisk=" + (entityChunk != null && !entityChunk.isTransient()) + ++ ", lastChunkCompletion={chunk_class=" + (lastCompletion == null || lastCompletion.chunk() == null ? "null" : lastCompletion.chunk().getClass().getName()) + ",status=" + (lastCompletion == null ? "null" : lastCompletion.genStatus()) + "}" + ++ ", currentGenStatus=" + this.currentGenStatus + ++ ", requestedGenStatus=" + this.requestedGenStatus + ++ ", generationTask=" + this.generationTask + ++ ", generationTaskStatus=" + this.generationTaskStatus + ++ ", priority=" + this.priority + ++ ", priorityLocked=" + this.priorityLocked + ++ ", neighbourRequestedPriority=" + this.neighbourRequestedPriority + ++ ", effective_priority=" + this.getEffectivePriority() + ++ ", oldTicketLevel=" + this.oldTicketLevel + ++ ", currentTicketLevel=" + this.currentTicketLevel + ++ ", totalNeighboursUsingThisChunk=" + this.totalNeighboursUsingThisChunk + ++ ", fullNeighbourChunksLoadedBitset=" + this.fullNeighbourChunksLoadedBitset + ++ ", chunkStatusRaw=" + chunkStatus + ++ ", currentChunkStatus=" + currentFullStatus + ++ ", pendingChunkStatus=" + pendingFullStatus + ++ ", is_unload_safe=" + this.isSafeToUnload() + ++ ", killed=" + this.killed + ++ '}'; ++ } ++ ++ private static JsonElement serializeCompletable(final Completable completable) { ++ if (completable == null) { ++ return new JsonPrimitive("null"); ++ } ++ ++ final JsonObject ret = new JsonObject(); ++ final boolean isCompleted = completable.isCompleted(); ++ ret.addProperty("completed", Boolean.valueOf(isCompleted)); ++ ++ if (isCompleted) { ++ ret.addProperty("completed_exceptionally", Boolean.valueOf(completable.getThrowable() != null)); ++ } ++ ++ return ret; ++ } ++ ++ // holds ticket and scheduling lock ++ public JsonObject getDebugJson() { ++ final JsonObject ret = new JsonObject(); ++ ++ final ChunkCompletion lastCompletion = this.lastChunkCompletion; ++ final ChunkEntitySlices slices = this.entityChunk; ++ final PoiChunk poiChunk = this.poiChunk; ++ ++ ret.addProperty("chunkX", Integer.valueOf(this.chunkX)); ++ ret.addProperty("chunkZ", Integer.valueOf(this.chunkZ)); ++ ret.addProperty("entity_chunk", slices == null ? "null" : "transient=" + slices.isTransient()); ++ ret.addProperty("poi_chunk", "null=" + (poiChunk == null)); ++ ret.addProperty("completed_chunk_class", lastCompletion == null ? "null" : lastCompletion.chunk().getClass().getName()); ++ ret.addProperty("completed_gen_status", lastCompletion == null ? "null" : lastCompletion.genStatus().toString()); ++ ret.addProperty("priority", Objects.toString(this.priority)); ++ ret.addProperty("neighbour_requested_priority", Objects.toString(this.neighbourRequestedPriority)); ++ ret.addProperty("generation_task", Objects.toString(this.generationTask)); ++ ret.addProperty("is_safe_unload", Objects.toString(this.isSafeToUnload())); ++ ret.addProperty("old_ticket_level", Integer.valueOf(this.oldTicketLevel)); ++ ret.addProperty("current_ticket_level", Integer.valueOf(this.currentTicketLevel)); ++ ret.addProperty("neighbours_using_chunk", Integer.valueOf(this.totalNeighboursUsingThisChunk)); ++ ++ final JsonObject neighbourWaitState = new JsonObject(); ++ ret.add("neighbour_state", neighbourWaitState); ++ ++ final JsonArray blockingGenNeighbours = new JsonArray(); ++ neighbourWaitState.add("blocking_gen_task", blockingGenNeighbours); ++ for (final NewChunkHolder blockingGenNeighbour : this.neighboursBlockingGenTask) { ++ final JsonObject neighbour = new JsonObject(); ++ blockingGenNeighbours.add(neighbour); ++ ++ neighbour.addProperty("chunkX", Integer.valueOf(blockingGenNeighbour.chunkX)); ++ neighbour.addProperty("chunkZ", Integer.valueOf(blockingGenNeighbour.chunkZ)); ++ } ++ ++ final JsonArray neighboursWaitingForUs = new JsonArray(); ++ neighbourWaitState.add("neighbours_waiting_on_us", neighboursWaitingForUs); ++ for (final Reference2ObjectMap.Entry entry : this.neighboursWaitingForUs.reference2ObjectEntrySet()) { ++ final NewChunkHolder holder = entry.getKey(); ++ final ChunkStatus status = entry.getValue(); ++ ++ final JsonObject neighbour = new JsonObject(); ++ neighboursWaitingForUs.add(neighbour); ++ ++ ++ neighbour.addProperty("chunkX", Integer.valueOf(holder.chunkX)); ++ neighbour.addProperty("chunkZ", Integer.valueOf(holder.chunkZ)); ++ neighbour.addProperty("waiting_for", Objects.toString(status)); ++ } ++ ++ ret.addProperty("fullchunkstatus", Objects.toString(this.getChunkStatus())); ++ ret.addProperty("fullchunkstatus_raw", Long.valueOf(this.chunkStatus)); ++ ret.addProperty("generation_task", Objects.toString(this.generationTask)); ++ ret.addProperty("requested_generation", Objects.toString(this.requestedGenStatus)); ++ ret.addProperty("has_entity_load_task", Boolean.valueOf(this.entityDataLoadTask != null)); ++ ret.addProperty("has_poi_load_task", Boolean.valueOf(this.poiDataLoadTask != null)); ++ ++ final UnloadTask entityDataUnload = this.entityDataUnload; ++ final UnloadTask poiDataUnload = this.poiDataUnload; ++ final UnloadTask chunkDataUnload = this.chunkDataUnload; ++ ++ ret.add("entity_unload_completable", serializeCompletable(entityDataUnload == null ? null : entityDataUnload.completable())); ++ ret.add("poi_unload_completable", serializeCompletable(poiDataUnload == null ? null : poiDataUnload.completable())); ++ ret.add("chunk_unload_completable", serializeCompletable(chunkDataUnload == null ? null : chunkDataUnload.completable())); ++ ++ final DelayedPrioritisedTask unloadTask = chunkDataUnload == null ? null : chunkDataUnload.task(); ++ if (unloadTask == null) { ++ ret.addProperty("unload_task_priority", "null"); ++ ret.addProperty("unload_task_priority_raw", "null"); ++ } else { ++ ret.addProperty("unload_task_priority", Objects.toString(unloadTask.getPriority())); ++ ret.addProperty("unload_task_priority_raw", Integer.valueOf(unloadTask.getPriorityInternal())); ++ } ++ ++ ret.addProperty("killed", Boolean.valueOf(this.killed)); ++ ++ return ret; ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/PriorityHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/PriorityHolder.java +new file mode 100644 +index 0000000000000000000000000000000000000000..b4c56bf12dc8dd17452210ece4fd67411cc6b2fd +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/PriorityHolder.java +@@ -0,0 +1,215 @@ ++package io.papermc.paper.chunk.system.scheduling; ++ ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import java.lang.invoke.VarHandle; ++ ++public abstract class PriorityHolder { ++ ++ protected volatile int priority; ++ protected static final VarHandle PRIORITY_HANDLE = ConcurrentUtil.getVarHandle(PriorityHolder.class, "priority", int.class); ++ ++ protected static final int PRIORITY_SCHEDULED = Integer.MIN_VALUE >>> 0; ++ protected static final int PRIORITY_EXECUTED = Integer.MIN_VALUE >>> 1; ++ ++ protected final int getPriorityVolatile() { ++ return (int)PRIORITY_HANDLE.getVolatile((PriorityHolder)this); ++ } ++ ++ protected final int compareAndExchangePriorityVolatile(final int expect, final int update) { ++ return (int)PRIORITY_HANDLE.compareAndExchange((PriorityHolder)this, (int)expect, (int)update); ++ } ++ ++ protected final int getAndOrPriorityVolatile(final int val) { ++ return (int)PRIORITY_HANDLE.getAndBitwiseOr((PriorityHolder)this, (int)val); ++ } ++ ++ protected final void setPriorityPlain(final int val) { ++ PRIORITY_HANDLE.set((PriorityHolder)this, (int)val); ++ } ++ ++ protected PriorityHolder(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ this.setPriorityPlain(priority.priority); ++ } ++ ++ // used only for debug json ++ public boolean isScheduled() { ++ return (this.getPriorityVolatile() & PRIORITY_SCHEDULED) != 0; ++ } ++ ++ // returns false if cancelled ++ protected boolean markExecuting() { ++ return (this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) == 0; ++ } ++ ++ protected boolean isMarkedExecuted() { ++ return (this.getPriorityVolatile() & PRIORITY_EXECUTED) != 0; ++ } ++ ++ public void cancel() { ++ if ((this.getAndOrPriorityVolatile(PRIORITY_EXECUTED) & PRIORITY_EXECUTED) != 0) { ++ // cancelled already ++ return; ++ } ++ this.cancelScheduled(); ++ } ++ ++ public void schedule() { ++ int priority = this.getPriorityVolatile(); ++ ++ if ((priority & PRIORITY_SCHEDULED) != 0) { ++ throw new IllegalStateException("schedule() called twice"); ++ } ++ ++ if ((priority & PRIORITY_EXECUTED) != 0) { ++ // cancelled ++ return; ++ } ++ ++ this.scheduleTask(PrioritisedExecutor.Priority.getPriority(priority)); ++ ++ int failures = 0; ++ for (;;) { ++ if (priority == (priority = this.compareAndExchangePriorityVolatile(priority, priority | PRIORITY_SCHEDULED))) { ++ return; ++ } ++ ++ if ((priority & PRIORITY_SCHEDULED) != 0) { ++ throw new IllegalStateException("schedule() called twice"); ++ } ++ ++ if ((priority & PRIORITY_EXECUTED) != 0) { ++ // cancelled or executed ++ return; ++ } ++ ++ this.setPriorityScheduled(PrioritisedExecutor.Priority.getPriority(priority)); ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ public final PrioritisedExecutor.Priority getPriority() { ++ final int ret = this.getPriorityVolatile(); ++ if ((ret & PRIORITY_EXECUTED) != 0) { ++ return PrioritisedExecutor.Priority.COMPLETING; ++ } ++ if ((ret & PRIORITY_SCHEDULED) != 0) { ++ return this.getScheduledPriority(); ++ } ++ return PrioritisedExecutor.Priority.getPriority(ret); ++ } ++ ++ public final void lowerPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ ++ int failures = 0; ++ for (int curr = this.getPriorityVolatile();;) { ++ if ((curr & PRIORITY_EXECUTED) != 0) { ++ return; ++ } ++ ++ if ((curr & PRIORITY_SCHEDULED) != 0) { ++ this.lowerPriorityScheduled(priority); ++ return; ++ } ++ ++ if (!priority.isLowerPriority(curr)) { ++ return; ++ } ++ ++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority))) { ++ return; ++ } ++ ++ // failed, retry ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ public final void setPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ ++ int failures = 0; ++ for (int curr = this.getPriorityVolatile();;) { ++ if ((curr & PRIORITY_EXECUTED) != 0) { ++ return; ++ } ++ ++ if ((curr & PRIORITY_SCHEDULED) != 0) { ++ this.setPriorityScheduled(priority); ++ return; ++ } ++ ++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority))) { ++ return; ++ } ++ ++ // failed, retry ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ public final void raisePriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ ++ int failures = 0; ++ for (int curr = this.getPriorityVolatile();;) { ++ if ((curr & PRIORITY_EXECUTED) != 0) { ++ return; ++ } ++ ++ if ((curr & PRIORITY_SCHEDULED) != 0) { ++ this.raisePriorityScheduled(priority); ++ return; ++ } ++ ++ if (!priority.isHigherPriority(curr)) { ++ return; ++ } ++ ++ if (curr == (curr = this.compareAndExchangePriorityVolatile(curr, priority.priority))) { ++ return; ++ } ++ ++ // failed, retry ++ ++ ++failures; ++ for (int i = 0; i < failures; ++i) { ++ ConcurrentUtil.backoff(); ++ } ++ } ++ } ++ ++ protected abstract void cancelScheduled(); ++ ++ protected abstract PrioritisedExecutor.Priority getScheduledPriority(); ++ ++ protected abstract void scheduleTask(final PrioritisedExecutor.Priority priority); ++ ++ protected abstract void lowerPriorityScheduled(final PrioritisedExecutor.Priority priority); ++ ++ protected abstract void setPriorityScheduled(final PrioritisedExecutor.Priority priority); ++ ++ protected abstract void raisePriorityScheduled(final PrioritisedExecutor.Priority priority); ++} +diff --git a/src/main/java/io/papermc/paper/command/PaperCommand.java b/src/main/java/io/papermc/paper/command/PaperCommand.java +index a52e6e8be323863ece6624a8ae7f9455279bdc23..e3467aaf6d0c8d486b84362e3c20b3fe631b50ff 100644 +--- a/src/main/java/io/papermc/paper/command/PaperCommand.java ++++ b/src/main/java/io/papermc/paper/command/PaperCommand.java +@@ -40,6 +40,7 @@ public final class PaperCommand extends Command { + commands.put(Set.of("callback"), new CallbackCommand()); + commands.put(Set.of("dumpplugins"), new DumpPluginsCommand()); + commands.put(Set.of("fixlight"), new FixLightCommand()); ++ commands.put(Set.of("debug", "chunkinfo", "holderinfo"), new ChunkDebugCommand()); + + return commands.entrySet().stream() + .flatMap(entry -> entry.getKey().stream().map(s -> Map.entry(s, entry.getValue()))) +diff --git a/src/main/java/io/papermc/paper/command/subcommands/ChunkDebugCommand.java b/src/main/java/io/papermc/paper/command/subcommands/ChunkDebugCommand.java +new file mode 100644 +index 0000000000000000000000000000000000000000..a6fb7ae77d7cad2243e28a33718e4631f65697fa +--- /dev/null ++++ b/src/main/java/io/papermc/paper/command/subcommands/ChunkDebugCommand.java +@@ -0,0 +1,264 @@ ++package io.papermc.paper.command.subcommands; ++ ++import io.papermc.paper.command.CommandUtil; ++import io.papermc.paper.command.PaperSubcommand; ++import java.io.File; ++import java.time.LocalDateTime; ++import java.time.format.DateTimeFormatter; ++import java.util.ArrayList; ++import java.util.Collections; ++import java.util.List; ++import java.util.Locale; ++import io.papermc.paper.util.MCUtil; ++import net.minecraft.server.MinecraftServer; ++import net.minecraft.server.level.ChunkHolder; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ImposterProtoChunk; ++import net.minecraft.world.level.chunk.LevelChunk; ++import net.minecraft.world.level.chunk.ProtoChunk; ++import org.bukkit.Bukkit; ++import org.bukkit.command.CommandSender; ++import org.bukkit.craftbukkit.CraftWorld; ++import org.checkerframework.checker.nullness.qual.NonNull; ++import org.checkerframework.checker.nullness.qual.Nullable; ++import org.checkerframework.framework.qual.DefaultQualifier; ++ ++import static net.kyori.adventure.text.Component.text; ++import static net.kyori.adventure.text.format.NamedTextColor.BLUE; ++import static net.kyori.adventure.text.format.NamedTextColor.DARK_AQUA; ++import static net.kyori.adventure.text.format.NamedTextColor.GREEN; ++import static net.kyori.adventure.text.format.NamedTextColor.RED; ++ ++@DefaultQualifier(NonNull.class) ++public final class ChunkDebugCommand implements PaperSubcommand { ++ @Override ++ public boolean execute(final CommandSender sender, final String subCommand, final String[] args) { ++ switch (subCommand) { ++ case "debug" -> this.doDebug(sender, args); ++ case "chunkinfo" -> this.doChunkInfo(sender, args); ++ case "holderinfo" -> this.doHolderInfo(sender, args); ++ } ++ return true; ++ } ++ ++ @Override ++ public List tabComplete(final CommandSender sender, final String subCommand, final String[] args) { ++ switch (subCommand) { ++ case "debug" -> { ++ if (args.length == 1) { ++ return CommandUtil.getListMatchingLast(sender, args, "help", "chunks"); ++ } ++ } ++ case "holderinfo" -> { ++ List worldNames = new ArrayList<>(); ++ worldNames.add("*"); ++ for (org.bukkit.World world : Bukkit.getWorlds()) { ++ worldNames.add(world.getName()); ++ } ++ if (args.length == 1) { ++ return CommandUtil.getListMatchingLast(sender, args, worldNames); ++ } ++ } ++ case "chunkinfo" -> { ++ List worldNames = new ArrayList<>(); ++ worldNames.add("*"); ++ for (org.bukkit.World world : Bukkit.getWorlds()) { ++ worldNames.add(world.getName()); ++ } ++ if (args.length == 1) { ++ return CommandUtil.getListMatchingLast(sender, args, worldNames); ++ } ++ } ++ } ++ return Collections.emptyList(); ++ } ++ ++ private void doChunkInfo(final CommandSender sender, final String[] args) { ++ List worlds; ++ if (args.length < 1 || args[0].equals("*")) { ++ worlds = Bukkit.getWorlds(); ++ } else { ++ worlds = new ArrayList<>(args.length); ++ for (final String arg : args) { ++ org.bukkit.@Nullable World world = Bukkit.getWorld(arg); ++ if (world == null) { ++ sender.sendMessage(text("World '" + arg + "' is invalid", RED)); ++ return; ++ } ++ worlds.add(world); ++ } ++ } ++ ++ int accumulatedTotal = 0; ++ int accumulatedInactive = 0; ++ int accumulatedBorder = 0; ++ int accumulatedTicking = 0; ++ int accumulatedEntityTicking = 0; ++ ++ for (final org.bukkit.World bukkitWorld : worlds) { ++ final ServerLevel world = ((CraftWorld) bukkitWorld).getHandle(); ++ ++ int total = 0; ++ int inactive = 0; ++ int border = 0; ++ int ticking = 0; ++ int entityTicking = 0; ++ ++ for (final ChunkHolder chunk : io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(world)) { ++ if (chunk.getFullChunkNowUnchecked() == null) { ++ continue; ++ } ++ ++ ++total; ++ ++ ChunkHolder.FullChunkStatus state = chunk.getFullStatus(); ++ ++ switch (state) { ++ case INACCESSIBLE -> ++inactive; ++ case BORDER -> ++border; ++ case TICKING -> ++ticking; ++ case ENTITY_TICKING -> ++entityTicking; ++ } ++ } ++ ++ accumulatedTotal += total; ++ accumulatedInactive += inactive; ++ accumulatedBorder += border; ++ accumulatedTicking += ticking; ++ accumulatedEntityTicking += entityTicking; ++ ++ sender.sendMessage(text().append(text("Chunks in ", BLUE), text(bukkitWorld.getName(), GREEN), text(":"))); ++ sender.sendMessage(text().color(DARK_AQUA).append( ++ text("Total: ", BLUE), text(total), ++ text(" Inactive: ", BLUE), text(inactive), ++ text(" Border: ", BLUE), text(border), ++ text(" Ticking: ", BLUE), text(ticking), ++ text(" Entity: ", BLUE), text(entityTicking) ++ )); ++ } ++ if (worlds.size() > 1) { ++ sender.sendMessage(text().append(text("Chunks in ", BLUE), text("all listed worlds", GREEN), text(":", DARK_AQUA))); ++ sender.sendMessage(text().color(DARK_AQUA).append( ++ text("Total: ", BLUE), text(accumulatedTotal), ++ text(" Inactive: ", BLUE), text(accumulatedInactive), ++ text(" Border: ", BLUE), text(accumulatedBorder), ++ text(" Ticking: ", BLUE), text(accumulatedTicking), ++ text(" Entity: ", BLUE), text(accumulatedEntityTicking) ++ )); ++ } ++ } ++ ++ private void doHolderInfo(final CommandSender sender, final String[] args) { ++ List worlds; ++ if (args.length < 1 || args[0].equals("*")) { ++ worlds = Bukkit.getWorlds(); ++ } else { ++ worlds = new ArrayList<>(args.length); ++ for (final String arg : args) { ++ org.bukkit.@Nullable World world = Bukkit.getWorld(arg); ++ if (world == null) { ++ sender.sendMessage(text("World '" + arg + "' is invalid", RED)); ++ return; ++ } ++ worlds.add(world); ++ } ++ } ++ ++ int accumulatedTotal = 0; ++ int accumulatedCanUnload = 0; ++ int accumulatedNull = 0; ++ int accumulatedReadOnly = 0; ++ int accumulatedProtoChunk = 0; ++ int accumulatedFullChunk = 0; ++ ++ for (final org.bukkit.World bukkitWorld : worlds) { ++ final ServerLevel world = ((CraftWorld) bukkitWorld).getHandle(); ++ ++ int total = 0; ++ int canUnload = 0; ++ int nullChunks = 0; ++ int readOnly = 0; ++ int protoChunk = 0; ++ int fullChunk = 0; ++ ++ for (final ChunkHolder chunk : world.chunkTaskScheduler.chunkHolderManager.getOldChunkHolders()) { // Paper - change updating chunks map ++ final ChunkAccess lastChunk = chunk.getAvailableChunkNow(); ++ ++ ++total; ++ ++ if (lastChunk == null) { ++ ++nullChunks; ++ } else if (lastChunk instanceof ImposterProtoChunk) { ++ ++readOnly; ++ } else if (lastChunk instanceof ProtoChunk) { ++ ++protoChunk; ++ } else if (lastChunk instanceof LevelChunk) { ++ ++fullChunk; ++ } ++ ++ if (chunk.newChunkHolder.isSafeToUnload() == null) { ++ ++canUnload; ++ } ++ } ++ ++ accumulatedTotal += total; ++ accumulatedCanUnload += canUnload; ++ accumulatedNull += nullChunks; ++ accumulatedReadOnly += readOnly; ++ accumulatedProtoChunk += protoChunk; ++ accumulatedFullChunk += fullChunk; ++ ++ sender.sendMessage(text().append(text("Chunks in ", BLUE), text(bukkitWorld.getName(), GREEN), text(":"))); ++ sender.sendMessage(text().color(DARK_AQUA).append( ++ text("Total: ", BLUE), text(total), ++ text(" Unloadable: ", BLUE), text(canUnload), ++ text(" Null: ", BLUE), text(nullChunks), ++ text(" ReadOnly: ", BLUE), text(readOnly), ++ text(" Proto: ", BLUE), text(protoChunk), ++ text(" Full: ", BLUE), text(fullChunk) ++ )); ++ } ++ if (worlds.size() > 1) { ++ sender.sendMessage(text().append(text("Chunks in ", BLUE), text("all listed worlds", GREEN), text(":", DARK_AQUA))); ++ sender.sendMessage(text().color(DARK_AQUA).append( ++ text("Total: ", BLUE), text(accumulatedTotal), ++ text(" Unloadable: ", BLUE), text(accumulatedCanUnload), ++ text(" Null: ", BLUE), text(accumulatedNull), ++ text(" ReadOnly: ", BLUE), text(accumulatedReadOnly), ++ text(" Proto: ", BLUE), text(accumulatedProtoChunk), ++ text(" Full: ", BLUE), text(accumulatedFullChunk) ++ )); ++ } ++ } ++ ++ private void doDebug(final CommandSender sender, final String[] args) { ++ if (args.length < 1) { ++ sender.sendMessage(text("Use /paper debug [chunks] help for more information on a specific command", RED)); ++ return; ++ } ++ ++ final String debugType = args[0].toLowerCase(Locale.ENGLISH); ++ switch (debugType) { ++ case "chunks" -> { ++ if (args.length >= 2 && args[1].toLowerCase(Locale.ENGLISH).equals("help")) { ++ sender.sendMessage(text("Use /paper debug chunks [world] to dump loaded chunk information to a file", RED)); ++ break; ++ } ++ File file = new File(new File(new File("."), "debug"), ++ "chunks-" + DateTimeFormatter.ofPattern("yyyy-MM-dd_HH.mm.ss").format(LocalDateTime.now()) + ".txt"); ++ sender.sendMessage(text("Writing chunk information dump to " + file, GREEN)); ++ try { ++ MCUtil.dumpChunks(file, false); ++ sender.sendMessage(text("Successfully written chunk information!", GREEN)); ++ } catch (Throwable thr) { ++ MinecraftServer.LOGGER.warn("Failed to dump chunk information to file " + file.toString(), thr); ++ sender.sendMessage(text("Failed to dump chunk information, see console", RED)); ++ } ++ } ++ // "help" & default ++ default -> sender.sendMessage(text("Use /paper debug [chunks] help for more information on a specific command", RED)); ++ } ++ } ++ ++} +diff --git a/src/main/java/io/papermc/paper/util/MCUtil.java b/src/main/java/io/papermc/paper/util/MCUtil.java +index 902317d2dc198a1cbfc679810bcb2173644354cb..d58d44faa40be2421f4cb54740a3abdbad72875c 100644 +--- a/src/main/java/io/papermc/paper/util/MCUtil.java ++++ b/src/main/java/io/papermc/paper/util/MCUtil.java +@@ -2,16 +2,29 @@ package io.papermc.paper.util; + + import com.google.common.util.concurrent.ThreadFactoryBuilder; + import io.papermc.paper.math.Position; ++import com.google.gson.JsonArray; ++import com.google.gson.JsonObject; ++import com.google.gson.internal.Streams; ++import com.google.gson.stream.JsonWriter; ++import com.mojang.datafixers.util.Either; + import it.unimi.dsi.fastutil.objects.ObjectRBTreeSet; + import java.lang.ref.Cleaner; ++import it.unimi.dsi.fastutil.objects.ReferenceArrayList; + import net.minecraft.core.BlockPos; + import net.minecraft.core.Direction; + import net.minecraft.server.MinecraftServer; ++import net.minecraft.server.level.ChunkHolder; ++import net.minecraft.server.level.ChunkMap; ++import net.minecraft.server.level.DistanceManager; + import net.minecraft.server.level.ServerLevel; ++import net.minecraft.server.level.ServerPlayer; ++import net.minecraft.server.level.Ticket; + import net.minecraft.world.entity.Entity; + import net.minecraft.world.level.ChunkPos; + import net.minecraft.world.level.ClipContext; + import net.minecraft.world.level.Level; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; + import org.apache.commons.lang.exception.ExceptionUtils; + import org.bukkit.Location; + import org.bukkit.block.BlockFace; +@@ -21,8 +34,11 @@ import org.spigotmc.AsyncCatcher; + + import javax.annotation.Nonnull; + import javax.annotation.Nullable; ++import java.io.*; ++import java.nio.charset.StandardCharsets; + import java.util.List; + import java.util.Queue; ++import java.util.Set; + import java.util.concurrent.CompletableFuture; + import java.util.concurrent.ExecutionException; + import java.util.concurrent.LinkedBlockingQueue; +@@ -511,6 +527,100 @@ public final class MCUtil { + } + } + ++ public static ChunkStatus getChunkStatus(ChunkHolder chunk) { ++ return chunk.getChunkHolderStatus(); ++ } ++ ++ public static void dumpChunks(File file, boolean watchdog) throws IOException { ++ file.getParentFile().mkdirs(); ++ file.createNewFile(); ++ ReferenceArrayList worlds = new ReferenceArrayList<>(org.bukkit.Bukkit.getWorlds()); ++ ReferenceArrayList loadedWorlds = new ReferenceArrayList<>(worlds); ++ JsonObject data = new JsonObject(); ++ ++ data.addProperty("server-version", org.bukkit.Bukkit.getVersion()); ++ data.addProperty("data-version", 1); ++ ++ { ++ JsonArray players = new JsonArray(); ++ data.add("all-players", players); ++ List playerList = MinecraftServer.getServer().getPlayerList().players; ++ for (ServerPlayer player : playerList) { ++ JsonObject playerData = new JsonObject(); ++ players.add(playerData); ++ ++ Level playerWorld = player.getLevel(); ++ org.bukkit.World craftWorld = playerWorld.getWorld(); ++ Entity.RemovalReason removalReason = player.getRemovalReason(); ++ ++ playerData.addProperty("name", player.getScoreboardName()); ++ playerData.addProperty("x", player.getX()); ++ playerData.addProperty("y", player.getY()); ++ playerData.addProperty("z", player.getZ()); ++ playerData.addProperty("world", playerWorld == null ? "null world" : craftWorld.getName()); ++ playerData.addProperty("removalReason", removalReason == null ? "null" : removalReason.name()); ++ ++ if (!worlds.contains(craftWorld)) { ++ worlds.add(craftWorld); ++ } ++ } ++ } ++ ++ JsonArray chunkWaitInformation = new JsonArray(); ++ data.add("chunk-wait-infos", chunkWaitInformation); ++ ++ for (io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.ChunkInfo chunkInfo : io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.getChunkInfos()) { ++ chunkWaitInformation.add(chunkInfo.toString()); ++ } ++ ++ JsonArray worldsData = new JsonArray(); ++ ++ for (org.bukkit.World bukkitWorld : worlds) { ++ JsonObject worldData = new JsonObject(); ++ ++ ServerLevel world = ((org.bukkit.craftbukkit.CraftWorld)bukkitWorld).getHandle(); ++ List players = world.players(); ++ ++ worldData.addProperty("is-loaded", loadedWorlds.contains(bukkitWorld)); ++ worldData.addProperty("name", world.getWorld().getName()); ++ worldData.addProperty("view-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()); // Paper - replace chunk loader system ++ worldData.addProperty("tick-view-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()); // Paper - replace chunk loader system ++ worldData.addProperty("keep-spawn-loaded", world.keepSpawnInMemory); ++ worldData.addProperty("keep-spawn-loaded-range", world.paperConfig().spawn.keepSpawnLoadedRange * 16); ++ ++ JsonArray playersData = new JsonArray(); ++ ++ for (ServerPlayer player : players) { ++ JsonObject playerData = new JsonObject(); ++ ++ playerData.addProperty("name", player.getScoreboardName()); ++ playerData.addProperty("x", player.getX()); ++ playerData.addProperty("y", player.getY()); ++ playerData.addProperty("z", player.getZ()); ++ ++ playersData.add(playerData); ++ } ++ ++ worldData.add("players", playersData); ++ worldData.add("chunk-data", watchdog ? world.chunkTaskScheduler.chunkHolderManager.getDebugJsonForWatchdog() : world.chunkTaskScheduler.chunkHolderManager.getDebugJson()); ++ worldsData.add(worldData); ++ } ++ ++ data.add("worlds", worldsData); ++ ++ StringWriter stringWriter = new StringWriter(); ++ JsonWriter jsonWriter = new JsonWriter(stringWriter); ++ jsonWriter.setIndent(" "); ++ jsonWriter.setLenient(false); ++ Streams.write(data, jsonWriter); ++ ++ String fileData = stringWriter.toString(); ++ ++ try (PrintStream out = new PrintStream(new FileOutputStream(file), false, StandardCharsets.UTF_8)) { ++ out.print(fileData); ++ } ++ } ++ + public static int getTicketLevelFor(net.minecraft.world.level.chunk.ChunkStatus status) { + return net.minecraft.server.level.ChunkMap.MAX_VIEW_DISTANCE + net.minecraft.world.level.chunk.ChunkStatus.getDistance(status); + } +diff --git a/src/main/java/io/papermc/paper/util/TickThread.java b/src/main/java/io/papermc/paper/util/TickThread.java +index d59885ee9c8b29d5bac34dce0597e345e5358c77..fc57850b80303fcade89ca95794f63910404a407 100644 +--- a/src/main/java/io/papermc/paper/util/TickThread.java ++++ b/src/main/java/io/papermc/paper/util/TickThread.java +@@ -6,7 +6,7 @@ import net.minecraft.world.entity.Entity; + import org.bukkit.Bukkit; + import java.util.concurrent.atomic.AtomicInteger; + +-public final class TickThread extends Thread { ++public class TickThread extends Thread { + + public static final boolean STRICT_THREAD_CHECKS = Boolean.getBoolean("paper.strict-thread-checks"); + +@@ -16,6 +16,10 @@ public final class TickThread extends Thread { + } + } + ++ /** ++ * @deprecated ++ */ ++ @Deprecated + public static void softEnsureTickThread(final String reason) { + if (!STRICT_THREAD_CHECKS) { + return; +@@ -23,6 +27,10 @@ public final class TickThread extends Thread { + ensureTickThread(reason); + } + ++ /** ++ * @deprecated ++ */ ++ @Deprecated + public static void ensureTickThread(final String reason) { + if (!isTickThread()) { + MinecraftServer.LOGGER.error("Thread " + Thread.currentThread().getName() + " failed main thread check: " + reason, new Throwable()); +@@ -66,14 +74,14 @@ public final class TickThread extends Thread { + } + + public static boolean isTickThread() { +- return Bukkit.isPrimaryThread(); ++ return Thread.currentThread() instanceof TickThread; + } + + public static boolean isTickThreadFor(final ServerLevel world, final int chunkX, final int chunkZ) { +- return Bukkit.isPrimaryThread(); ++ return Thread.currentThread() instanceof TickThread; + } + + public static boolean isTickThreadFor(final Entity entity) { +- return Bukkit.isPrimaryThread(); ++ return Thread.currentThread() instanceof TickThread; + } + } +diff --git a/src/main/java/io/papermc/paper/world/ChunkEntitySlices.java b/src/main/java/io/papermc/paper/world/ChunkEntitySlices.java +new file mode 100644 +index 0000000000000000000000000000000000000000..f597d65d56964297eeeed6c7e77703764178fee0 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/world/ChunkEntitySlices.java +@@ -0,0 +1,601 @@ ++package io.papermc.paper.world; ++ ++import com.destroystokyo.paper.util.maplist.EntityList; ++import io.papermc.paper.chunk.system.entity.EntityLookup; ++import io.papermc.paper.util.TickThread; ++import it.unimi.dsi.fastutil.objects.Reference2ObjectMap; ++import it.unimi.dsi.fastutil.objects.Reference2ObjectOpenHashMap; ++import net.minecraft.nbt.CompoundTag; ++import net.minecraft.server.level.ChunkHolder; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.util.Mth; ++import net.minecraft.world.entity.Entity; ++import net.minecraft.world.entity.EntityType; ++import net.minecraft.world.entity.boss.EnderDragonPart; ++import net.minecraft.world.entity.boss.enderdragon.EnderDragon; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.storage.EntityStorage; ++import net.minecraft.world.level.entity.Visibility; ++import net.minecraft.world.phys.AABB; ++import org.bukkit.craftbukkit.event.CraftEventFactory; ++import java.util.ArrayList; ++import java.util.Arrays; ++import java.util.Iterator; ++import java.util.List; ++import java.util.function.Predicate; ++ ++public final class ChunkEntitySlices { ++ ++ protected final int minSection; ++ protected final int maxSection; ++ public final int chunkX; ++ public final int chunkZ; ++ protected final ServerLevel world; ++ ++ protected final EntityCollectionBySection allEntities; ++ protected final EntityCollectionBySection hardCollidingEntities; ++ protected final Reference2ObjectOpenHashMap, EntityCollectionBySection> entitiesByClass; ++ protected final EntityList entities = new EntityList(); ++ ++ public ChunkHolder.FullChunkStatus status; ++ ++ protected boolean isTransient; ++ ++ public boolean isTransient() { ++ return this.isTransient; ++ } ++ ++ public void setTransient(final boolean value) { ++ this.isTransient = value; ++ } ++ ++ // TODO implement container search optimisations ++ ++ public ChunkEntitySlices(final ServerLevel world, final int chunkX, final int chunkZ, final ChunkHolder.FullChunkStatus status, ++ final int minSection, final int maxSection) { // inclusive, inclusive ++ this.minSection = minSection; ++ this.maxSection = maxSection; ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ this.world = world; ++ ++ this.allEntities = new EntityCollectionBySection(this); ++ this.hardCollidingEntities = new EntityCollectionBySection(this); ++ this.entitiesByClass = new Reference2ObjectOpenHashMap<>(); ++ ++ this.status = status; ++ } ++ ++ // Paper start - optimise CraftChunk#getEntities ++ public org.bukkit.entity.Entity[] getChunkEntities() { ++ List ret = new java.util.ArrayList<>(); ++ final Entity[] entities = this.entities.getRawData(); ++ for (int i = 0, size = Math.min(entities.length, this.entities.size()); i < size; ++i) { ++ final Entity entity = entities[i]; ++ if (entity == null) { ++ continue; ++ } ++ final org.bukkit.entity.Entity bukkit = entity.getBukkitEntity(); ++ if (bukkit != null && bukkit.isValid()) { ++ ret.add(bukkit); ++ } ++ } ++ ++ return ret.toArray(new org.bukkit.entity.Entity[0]); ++ } ++ ++ public CompoundTag save() { ++ final int len = this.entities.size(); ++ if (len == 0) { ++ return null; ++ } ++ ++ final Entity[] rawData = this.entities.getRawData(); ++ final List collectedEntities = new ArrayList<>(len); ++ for (int i = 0; i < len; ++i) { ++ final Entity entity = rawData[i]; ++ if (entity.shouldBeSaved()) { ++ collectedEntities.add(entity); ++ } ++ } ++ ++ if (collectedEntities.isEmpty()) { ++ return null; ++ } ++ ++ return EntityStorage.saveEntityChunk(collectedEntities, new ChunkPos(this.chunkX, this.chunkZ), this.world); ++ } ++ ++ // returns true if this chunk has transient entities remaining ++ public boolean unload() { ++ final int len = this.entities.size(); ++ final Entity[] collectedEntities = Arrays.copyOf(this.entities.getRawData(), len); ++ ++ for (int i = 0; i < len; ++i) { ++ final Entity entity = collectedEntities[i]; ++ if (entity.isRemoved()) { ++ // removed by us below ++ continue; ++ } ++ if (entity.shouldBeSaved()) { ++ entity.setRemoved(Entity.RemovalReason.UNLOADED_TO_CHUNK); ++ if (entity.isVehicle()) { ++ // we cannot assume that these entities are contained within this chunk, because entities can ++ // desync - so we need to remove them all ++ for (final Entity passenger : entity.getIndirectPassengers()) { ++ passenger.setRemoved(Entity.RemovalReason.UNLOADED_TO_CHUNK); ++ } ++ } ++ } ++ } ++ ++ return this.entities.size() != 0; ++ } ++ ++ private List getAllEntities() { ++ final int len = this.entities.size(); ++ if (len == 0) { ++ return new ArrayList<>(); ++ } ++ ++ final Entity[] rawData = this.entities.getRawData(); ++ final List collectedEntities = new ArrayList<>(len); ++ for (int i = 0; i < len; ++i) { ++ collectedEntities.add(rawData[i]); ++ } ++ ++ return collectedEntities; ++ } ++ ++ public void callEntitiesLoadEvent() { ++ CraftEventFactory.callEntitiesLoadEvent(this.world, new ChunkPos(this.chunkX, this.chunkZ), this.getAllEntities()); ++ } ++ ++ public void callEntitiesUnloadEvent() { ++ CraftEventFactory.callEntitiesUnloadEvent(this.world, new ChunkPos(this.chunkX, this.chunkZ), this.getAllEntities()); ++ } ++ // Paper end - optimise CraftChunk#getEntities ++ ++ public boolean isEmpty() { ++ return this.entities.size() == 0; ++ } ++ ++ public void mergeInto(final ChunkEntitySlices slices) { ++ final Entity[] entities = this.entities.getRawData(); ++ for (int i = 0, size = Math.min(entities.length, this.entities.size()); i < size; ++i) { ++ final Entity entity = entities[i]; ++ slices.addEntity(entity, entity.sectionY); ++ } ++ } ++ ++ private boolean preventStatusUpdates; ++ public boolean startPreventingStatusUpdates() { ++ final boolean ret = this.preventStatusUpdates; ++ this.preventStatusUpdates = true; ++ return ret; ++ } ++ ++ public void stopPreventingStatusUpdates(final boolean prev) { ++ this.preventStatusUpdates = prev; ++ } ++ ++ public void updateStatus(final ChunkHolder.FullChunkStatus status, final EntityLookup lookup) { ++ this.status = status; ++ ++ final Entity[] entities = this.entities.getRawData(); ++ ++ for (int i = 0, size = this.entities.size(); i < size; ++i) { ++ final Entity entity = entities[i]; ++ ++ final Visibility oldVisibility = EntityLookup.getEntityStatus(entity); ++ entity.chunkStatus = status; ++ final Visibility newVisibility = EntityLookup.getEntityStatus(entity); ++ ++ lookup.entityStatusChange(entity, this, oldVisibility, newVisibility, false, false, false); ++ } ++ } ++ ++ public boolean addEntity(final Entity entity, final int chunkSection) { ++ if (!this.entities.add(entity)) { ++ return false; ++ } ++ entity.chunkStatus = this.status; ++ final int sectionIndex = chunkSection - this.minSection; ++ ++ this.allEntities.addEntity(entity, sectionIndex); ++ ++ if (entity.hardCollides()) { ++ this.hardCollidingEntities.addEntity(entity, sectionIndex); ++ } ++ ++ for (final Iterator, EntityCollectionBySection>> iterator = ++ this.entitiesByClass.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) { ++ final Reference2ObjectMap.Entry, EntityCollectionBySection> entry = iterator.next(); ++ ++ if (entry.getKey().isInstance(entity)) { ++ entry.getValue().addEntity(entity, sectionIndex); ++ } ++ } ++ ++ return true; ++ } ++ ++ public boolean removeEntity(final Entity entity, final int chunkSection) { ++ if (!this.entities.remove(entity)) { ++ return false; ++ } ++ entity.chunkStatus = null; ++ final int sectionIndex = chunkSection - this.minSection; ++ ++ this.allEntities.removeEntity(entity, sectionIndex); ++ ++ if (entity.hardCollides()) { ++ this.hardCollidingEntities.removeEntity(entity, sectionIndex); ++ } ++ ++ for (final Iterator, EntityCollectionBySection>> iterator = ++ this.entitiesByClass.reference2ObjectEntrySet().fastIterator(); iterator.hasNext();) { ++ final Reference2ObjectMap.Entry, EntityCollectionBySection> entry = iterator.next(); ++ ++ if (entry.getKey().isInstance(entity)) { ++ entry.getValue().removeEntity(entity, sectionIndex); ++ } ++ } ++ ++ return true; ++ } ++ ++ public void getHardCollidingEntities(final Entity except, final AABB box, final List into, final Predicate predicate) { ++ this.hardCollidingEntities.getEntities(except, box, into, predicate); ++ } ++ ++ public void getEntities(final Entity except, final AABB box, final List into, final Predicate predicate) { ++ this.allEntities.getEntitiesWithEnderDragonParts(except, box, into, predicate); ++ } ++ ++ public void getEntitiesWithoutDragonParts(final Entity except, final AABB box, final List into, final Predicate predicate) { ++ this.allEntities.getEntities(except, box, into, predicate); ++ } ++ ++ public void getEntities(final EntityType type, final AABB box, final List into, ++ final Predicate predicate) { ++ this.allEntities.getEntities(type, box, (List)into, (Predicate)predicate); ++ } ++ ++ protected EntityCollectionBySection initClass(final Class clazz) { ++ final EntityCollectionBySection ret = new EntityCollectionBySection(this); ++ ++ for (int sectionIndex = 0; sectionIndex < this.allEntities.entitiesBySection.length; ++sectionIndex) { ++ final BasicEntityList sectionEntities = this.allEntities.entitiesBySection[sectionIndex]; ++ if (sectionEntities == null) { ++ continue; ++ } ++ ++ final Entity[] storage = sectionEntities.storage; ++ ++ for (int i = 0, len = Math.min(storage.length, sectionEntities.size()); i < len; ++i) { ++ final Entity entity = storage[i]; ++ ++ if (clazz.isInstance(entity)) { ++ ret.addEntity(entity, sectionIndex); ++ } ++ } ++ } ++ ++ return ret; ++ } ++ ++ public void getEntities(final Class clazz, final Entity except, final AABB box, final List into, ++ final Predicate predicate) { ++ EntityCollectionBySection collection = this.entitiesByClass.get(clazz); ++ if (collection != null) { ++ collection.getEntitiesWithEnderDragonParts(except, clazz, box, (List)into, (Predicate)predicate); ++ } else { ++ this.entitiesByClass.putIfAbsent(clazz, collection = this.initClass(clazz)); ++ collection.getEntitiesWithEnderDragonParts(except, clazz, box, (List)into, (Predicate)predicate); ++ } ++ } ++ ++ protected static final class BasicEntityList { ++ ++ protected static final Entity[] EMPTY = new Entity[0]; ++ protected static final int DEFAULT_CAPACITY = 4; ++ ++ protected E[] storage; ++ protected int size; ++ ++ public BasicEntityList() { ++ this(0); ++ } ++ ++ public BasicEntityList(final int cap) { ++ this.storage = (E[])(cap <= 0 ? EMPTY : new Entity[cap]); ++ } ++ ++ public boolean isEmpty() { ++ return this.size == 0; ++ } ++ ++ public int size() { ++ return this.size; ++ } ++ ++ private void resize() { ++ if (this.storage == EMPTY) { ++ this.storage = (E[])new Entity[DEFAULT_CAPACITY]; ++ } else { ++ this.storage = Arrays.copyOf(this.storage, this.storage.length * 2); ++ } ++ } ++ ++ public void add(final E entity) { ++ final int idx = this.size++; ++ if (idx >= this.storage.length) { ++ this.resize(); ++ this.storage[idx] = entity; ++ } else { ++ this.storage[idx] = entity; ++ } ++ } ++ ++ public int indexOf(final E entity) { ++ final E[] storage = this.storage; ++ ++ for (int i = 0, len = Math.min(this.storage.length, this.size); i < len; ++i) { ++ if (storage[i] == entity) { ++ return i; ++ } ++ } ++ ++ return -1; ++ } ++ ++ public boolean remove(final E entity) { ++ final int idx = this.indexOf(entity); ++ if (idx == -1) { ++ return false; ++ } ++ ++ final int size = --this.size; ++ final E[] storage = this.storage; ++ if (idx != size) { ++ System.arraycopy(storage, idx + 1, storage, idx, size - idx); ++ } ++ ++ storage[size] = null; ++ ++ return true; ++ } ++ ++ public boolean has(final E entity) { ++ return this.indexOf(entity) != -1; ++ } ++ } ++ ++ protected static final class EntityCollectionBySection { ++ ++ protected final ChunkEntitySlices manager; ++ protected final long[] nonEmptyBitset; ++ protected final BasicEntityList[] entitiesBySection; ++ protected int count; ++ ++ public EntityCollectionBySection(final ChunkEntitySlices manager) { ++ this.manager = manager; ++ ++ final int sectionCount = manager.maxSection - manager.minSection + 1; ++ ++ this.nonEmptyBitset = new long[(sectionCount + (Long.SIZE - 1)) >>> 6]; // (sectionCount + (Long.SIZE - 1)) / Long.SIZE ++ this.entitiesBySection = new BasicEntityList[sectionCount]; ++ } ++ ++ public void addEntity(final Entity entity, final int sectionIndex) { ++ BasicEntityList list = this.entitiesBySection[sectionIndex]; ++ ++ if (list != null && list.has(entity)) { ++ return; ++ } ++ ++ if (list == null) { ++ this.entitiesBySection[sectionIndex] = list = new BasicEntityList<>(); ++ this.nonEmptyBitset[sectionIndex >>> 6] |= (1L << (sectionIndex & (Long.SIZE - 1))); ++ } ++ ++ list.add(entity); ++ ++this.count; ++ } ++ ++ public void removeEntity(final Entity entity, final int sectionIndex) { ++ final BasicEntityList list = this.entitiesBySection[sectionIndex]; ++ ++ if (list == null || !list.remove(entity)) { ++ return; ++ } ++ ++ --this.count; ++ ++ if (list.isEmpty()) { ++ this.entitiesBySection[sectionIndex] = null; ++ this.nonEmptyBitset[sectionIndex >>> 6] ^= (1L << (sectionIndex & (Long.SIZE - 1))); ++ } ++ } ++ ++ public void getEntities(final Entity except, final AABB box, final List into, final Predicate predicate) { ++ if (this.count == 0) { ++ return; ++ } ++ ++ final int minSection = this.manager.minSection; ++ final int maxSection = this.manager.maxSection; ++ ++ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection); ++ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection); ++ ++ final BasicEntityList[] entitiesBySection = this.entitiesBySection; ++ ++ for (int section = min; section <= max; ++section) { ++ final BasicEntityList list = entitiesBySection[section - minSection]; ++ ++ if (list == null) { ++ continue; ++ } ++ ++ final Entity[] storage = list.storage; ++ ++ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) { ++ final Entity entity = storage[i]; ++ ++ if (entity == null || entity == except || !entity.getBoundingBox().intersects(box)) { ++ continue; ++ } ++ ++ if (predicate != null && !predicate.test(entity)) { ++ continue; ++ } ++ ++ into.add(entity); ++ } ++ } ++ } ++ ++ public void getEntitiesWithEnderDragonParts(final Entity except, final AABB box, final List into, ++ final Predicate predicate) { ++ if (this.count == 0) { ++ return; ++ } ++ ++ final int minSection = this.manager.minSection; ++ final int maxSection = this.manager.maxSection; ++ ++ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection); ++ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection); ++ ++ final BasicEntityList[] entitiesBySection = this.entitiesBySection; ++ ++ for (int section = min; section <= max; ++section) { ++ final BasicEntityList list = entitiesBySection[section - minSection]; ++ ++ if (list == null) { ++ continue; ++ } ++ ++ final Entity[] storage = list.storage; ++ ++ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) { ++ final Entity entity = storage[i]; ++ ++ if (entity == null || entity == except || !entity.getBoundingBox().intersects(box)) { ++ continue; ++ } ++ ++ if (predicate == null || predicate.test(entity)) { ++ into.add(entity); ++ } // else: continue to test the ender dragon parts ++ ++ if (entity instanceof EnderDragon) { ++ for (final EnderDragonPart part : ((EnderDragon)entity).subEntities) { ++ if (part == except || !part.getBoundingBox().intersects(box)) { ++ continue; ++ } ++ ++ if (predicate != null && !predicate.test(part)) { ++ continue; ++ } ++ ++ into.add(part); ++ } ++ } ++ } ++ } ++ } ++ ++ public void getEntitiesWithEnderDragonParts(final Entity except, final Class clazz, final AABB box, final List into, ++ final Predicate predicate) { ++ if (this.count == 0) { ++ return; ++ } ++ ++ final int minSection = this.manager.minSection; ++ final int maxSection = this.manager.maxSection; ++ ++ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection); ++ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection); ++ ++ final BasicEntityList[] entitiesBySection = this.entitiesBySection; ++ ++ for (int section = min; section <= max; ++section) { ++ final BasicEntityList list = entitiesBySection[section - minSection]; ++ ++ if (list == null) { ++ continue; ++ } ++ ++ final Entity[] storage = list.storage; ++ ++ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) { ++ final Entity entity = storage[i]; ++ ++ if (entity == null || entity == except || !entity.getBoundingBox().intersects(box)) { ++ continue; ++ } ++ ++ if (predicate == null || predicate.test(entity)) { ++ into.add(entity); ++ } // else: continue to test the ender dragon parts ++ ++ if (entity instanceof EnderDragon) { ++ for (final EnderDragonPart part : ((EnderDragon)entity).subEntities) { ++ if (part == except || !part.getBoundingBox().intersects(box) || !clazz.isInstance(part)) { ++ continue; ++ } ++ ++ if (predicate != null && !predicate.test(part)) { ++ continue; ++ } ++ ++ into.add(part); ++ } ++ } ++ } ++ } ++ } ++ ++ public void getEntities(final EntityType type, final AABB box, final List into, ++ final Predicate predicate) { ++ if (this.count == 0) { ++ return; ++ } ++ ++ final int minSection = this.manager.minSection; ++ final int maxSection = this.manager.maxSection; ++ ++ final int min = Mth.clamp(Mth.floor(box.minY - 2.0) >> 4, minSection, maxSection); ++ final int max = Mth.clamp(Mth.floor(box.maxY + 2.0) >> 4, minSection, maxSection); ++ ++ final BasicEntityList[] entitiesBySection = this.entitiesBySection; ++ ++ for (int section = min; section <= max; ++section) { ++ final BasicEntityList list = entitiesBySection[section - minSection]; ++ ++ if (list == null) { ++ continue; ++ } ++ ++ final Entity[] storage = list.storage; ++ ++ for (int i = 0, len = Math.min(storage.length, list.size()); i < len; ++i) { ++ final Entity entity = storage[i]; ++ ++ if (entity == null || (type != null && entity.getType() != type) || !entity.getBoundingBox().intersects(box)) { ++ continue; ++ } ++ ++ if (predicate != null && !predicate.test((T)entity)) { ++ continue; ++ } ++ ++ into.add((T)entity); ++ } ++ } ++ } ++ } ++} +diff --git a/src/main/java/net/minecraft/network/Connection.java b/src/main/java/net/minecraft/network/Connection.java +index 3ab557277cff7c8a43b0f3de45d17f2bf78f7747..c457bdcb93fa306a2d67b31c0abb53465d809862 100644 +--- a/src/main/java/net/minecraft/network/Connection.java ++++ b/src/main/java/net/minecraft/network/Connection.java +@@ -92,6 +92,28 @@ public class Connection extends SimpleChannelInboundHandler> { + private int tickCount; + private boolean handlingFault; + public String hostname = ""; // CraftBukkit - add field ++ // Paper start - add pending task queue ++ private final Queue pendingTasks = new java.util.concurrent.ConcurrentLinkedQueue<>(); ++ public void execute(final Runnable run) { ++ if (this.channel == null || !this.channel.isRegistered()) { ++ run.run(); ++ return; ++ } ++ final boolean queue = !this.queue.isEmpty(); ++ if (!queue) { ++ this.channel.eventLoop().execute(run); ++ } else { ++ this.pendingTasks.add(run); ++ if (this.queue.isEmpty()) { ++ // something flushed async, dump tasks now ++ Runnable r; ++ while ((r = this.pendingTasks.poll()) != null) { ++ this.channel.eventLoop().execute(r); ++ } ++ } ++ } ++ } ++ // Paper end - add pending task queue + + public Connection(PacketFlow side) { + this.receiving = side; +@@ -255,6 +277,7 @@ public class Connection extends SimpleChannelInboundHandler> { + } + + private void flushQueue() { ++ try { // Paper - add pending task queue + if (this.channel != null && this.channel.isOpen()) { + Queue queue = this.queue; + +@@ -267,6 +290,12 @@ public class Connection extends SimpleChannelInboundHandler> { + + } + } ++ } finally { // Paper start - add pending task queue ++ Runnable r; ++ while ((r = this.pendingTasks.poll()) != null) { ++ this.channel.eventLoop().execute(r); ++ } ++ } // Paper end - add pending task queue + } + + public void tick() { +diff --git a/src/main/java/net/minecraft/network/protocol/game/ServerboundCommandSuggestionPacket.java b/src/main/java/net/minecraft/network/protocol/game/ServerboundCommandSuggestionPacket.java +index a5e438a834826161c52ca9db57d234d9ff80a591..b8bc1b9b8e8a33df90a963f9f9769292bf595642 100644 +--- a/src/main/java/net/minecraft/network/protocol/game/ServerboundCommandSuggestionPacket.java ++++ b/src/main/java/net/minecraft/network/protocol/game/ServerboundCommandSuggestionPacket.java +@@ -14,7 +14,7 @@ public class ServerboundCommandSuggestionPacket implements Packet { + DedicatedServer dedicatedserver1 = new DedicatedServer(optionset, worldLoader.get(), thread, convertable_conversionsession, resourcepackrepository, worldstem, dedicatedserversettings, DataFixers.getDataFixer(), services, LoggerChunkProgressListener::new); + +diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java +index 96897d883671e018bae5f71545c5f7af205e309c..164ce278f2696d4be6b57404648cb0e856464589 100644 +--- a/src/main/java/net/minecraft/server/MinecraftServer.java ++++ b/src/main/java/net/minecraft/server/MinecraftServer.java +@@ -302,7 +302,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop S spin(Function serverFactory) { + AtomicReference atomicreference = new AtomicReference(); +- Thread thread = new Thread(() -> { ++ Thread thread = new io.papermc.paper.util.TickThread(() -> { // Paper - rewrite chunk system + ((MinecraftServer) atomicreference.get()).runServer(); + }, "Server thread"); + +@@ -585,7 +585,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop { +- return worldserver1.getChunkSource().chunkMap.hasWork(); +- })) { +- this.nextTickTime = Util.getMillis() + 1L; +- iterator = this.getAllLevels().iterator(); +- +- while (iterator.hasNext()) { +- worldserver = (ServerLevel) iterator.next(); +- worldserver.getChunkSource().removeTicketsOnClosing(); +- worldserver.getChunkSource().tick(() -> { +- return true; +- }, false); +- } +- +- this.waitUntilNextTick(); +- } +- +- this.saveAllChunks(false, true, false); +- iterator = this.getAllLevels().iterator(); +- +- while (iterator.hasNext()) { +- worldserver = (ServerLevel) iterator.next(); +- if (worldserver != null) { +- try { +- worldserver.close(); +- } catch (IOException ioexception) { +- MinecraftServer.LOGGER.error("Exception closing the level", ioexception); +- } +- } +- } ++ this.saveAllChunks(false, true, false, true); // Paper - rewrite chunk system - move closing into here + + this.isSaving = false; + this.resources.close(); +@@ -941,7 +922,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop 5000L && this.nextTickTime - this.lastOverloadWarning >= 30000L) { // CraftBukkit +@@ -1101,6 +1090,11 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop { ++ LOGGER.info("Async debug chunks executing"); ++ io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.dumpAllChunkLoadInfo(false); ++ CommandSender sender = MinecraftServer.getServer().console; ++ java.io.File file = new java.io.File(new java.io.File(new java.io.File("."), "debug"), ++ "chunks-" + java.time.format.DateTimeFormatter.ofPattern("yyyy-MM-dd_HH.mm.ss").format(java.time.LocalDateTime.now()) + ".txt"); ++ sender.sendMessage(net.kyori.adventure.text.Component.text("Writing chunk information dump to " + file, net.kyori.adventure.text.format.NamedTextColor.GREEN)); ++ try { ++ io.papermc.paper.util.MCUtil.dumpChunks(file, true); ++ sender.sendMessage(net.kyori.adventure.text.Component.text("Successfully written chunk information!", net.kyori.adventure.text.format.NamedTextColor.GREEN)); ++ } catch (Throwable thr) { ++ MinecraftServer.LOGGER.warn("Failed to dump chunk information to file " + file.toString(), thr); ++ sender.sendMessage(net.kyori.adventure.text.Component.text("Failed to dump chunk information, see console", net.kyori.adventure.text.format.NamedTextColor.RED)); ++ } ++ }; ++ Thread t = new Thread(run); ++ t.setName("Async debug thread #" + ASYNC_DEBUG_CHUNKS_COUNT.getAndIncrement()); ++ t.setDaemon(true); ++ t.start(); ++ return; ++ } ++ // Paper end - rewrite chunk system + this.consoleInput.add(new ConsoleInput(command, commandSource)); + } + +diff --git a/src/main/java/net/minecraft/server/level/ChunkHolder.java b/src/main/java/net/minecraft/server/level/ChunkHolder.java +index 4620e64d8eb81520b75fbfbc64603e5887c7b016..6b6d31e76f48a0de33ac528f990ce841dbd666f1 100644 +--- a/src/main/java/net/minecraft/server/level/ChunkHolder.java ++++ b/src/main/java/net/minecraft/server/level/ChunkHolder.java +@@ -48,17 +48,15 @@ public class ChunkHolder { + private static final Either NOT_DONE_YET = Either.right(ChunkHolder.ChunkLoadingFailure.UNLOADED); + private static final CompletableFuture> UNLOADED_LEVEL_CHUNK_FUTURE = CompletableFuture.completedFuture(ChunkHolder.UNLOADED_LEVEL_CHUNK); + private static final List CHUNK_STATUSES = ChunkStatus.getStatusList(); +- private final AtomicReferenceArray>> futures; ++ // Paper - rewrite chunk system ++ private static final ChunkHolder.FullChunkStatus[] FULL_CHUNK_STATUSES = ChunkHolder.FullChunkStatus.values(); ++ private static final int BLOCKS_BEFORE_RESEND_FUDGE = 64; ++ // Paper - rewrite chunk system + private final LevelHeightAccessor levelHeightAccessor; +- private volatile CompletableFuture> fullChunkFuture; private int fullChunkCreateCount; private volatile boolean isFullChunkReady; // Paper - cache chunk ticking stage +- private volatile CompletableFuture> tickingChunkFuture; private volatile boolean isTickingReady; // Paper - cache chunk ticking stage +- private volatile CompletableFuture> entityTickingChunkFuture; private volatile boolean isEntityTickingReady; // Paper - cache chunk ticking stage +- public CompletableFuture chunkToSave; // Paper - public ++ // Paper - rewrite chunk system + @Nullable + private final DebugBuffer chunkToSaveHistory; +- public int oldTicketLevel; +- private int ticketLevel; +- private int queueLevel; ++ // Paper - rewrite chunk system + public final ChunkPos pos; + private boolean hasChangedSections; + private final ShortSet[] changedBlocksPerSection; +@@ -69,8 +67,22 @@ public class ChunkHolder { + public final ChunkHolder.PlayerProvider playerProvider; + private boolean wasAccessibleSinceLastSave; + private CompletableFuture pendingFullStateConfirmation; ++ // Paper - rewrite chunk system ++ private boolean resendLight; ++ // Paper - rewrite chunk system + + private final ChunkMap chunkMap; // Paper ++ // Paper start - no-tick view distance ++ public final LevelChunk getSendingChunk() { ++ // it's important that we use getChunkAtIfLoadedImmediately to mirror the chunk sending logic used ++ // in Chunk's neighbour callback ++ LevelChunk ret = this.chunkMap.level.getChunkSource().getChunkAtIfLoadedImmediately(this.pos.x, this.pos.z); ++ if (ret != null && ret.areNeighboursLoaded(1)) { ++ return ret; ++ } ++ return null; ++ } ++ // Paper end - no-tick view distance + + // Paper start + public void onChunkAdd() { +@@ -82,148 +94,106 @@ public class ChunkHolder { + } + // Paper end + +- public ChunkHolder(ChunkPos pos, int level, LevelHeightAccessor world, LevelLightEngine lightingProvider, ChunkHolder.LevelChangeListener levelUpdateListener, ChunkHolder.PlayerProvider playersWatchingChunkProvider) { +- this.futures = new AtomicReferenceArray(ChunkHolder.CHUNK_STATUSES.size()); +- this.fullChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE; +- this.tickingChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE; +- this.entityTickingChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE; +- this.chunkToSave = CompletableFuture.completedFuture(null); // CraftBukkit - decompile error ++ public final io.papermc.paper.chunk.system.scheduling.NewChunkHolder newChunkHolder; // Paper - rewrite chunk system ++ ++ public ChunkHolder(ChunkPos pos, LevelHeightAccessor world, LevelLightEngine lightingProvider, ChunkHolder.PlayerProvider playersWatchingChunkProvider, io.papermc.paper.chunk.system.scheduling.NewChunkHolder newChunkHolder) { // Paper - rewrite chunk system ++ this.newChunkHolder = newChunkHolder; // Paper - rewrite chunk system + this.chunkToSaveHistory = null; + this.blockChangedLightSectionFilter = new BitSet(); + this.skyChangedLightSectionFilter = new BitSet(); +- this.pendingFullStateConfirmation = CompletableFuture.completedFuture(null); // CraftBukkit - decompile error ++ // Paper - rewrite chunk system + this.pos = pos; + this.levelHeightAccessor = world; + this.lightEngine = lightingProvider; +- this.onLevelChange = levelUpdateListener; ++ this.onLevelChange = null; // Paper - rewrite chunk system + this.playerProvider = playersWatchingChunkProvider; +- this.oldTicketLevel = ChunkLevel.MAX_LEVEL + 1; +- this.ticketLevel = this.oldTicketLevel; +- this.queueLevel = this.oldTicketLevel; +- this.setTicketLevel(level); ++ // Paper - rewrite chunk system + this.changedBlocksPerSection = new ShortSet[world.getSectionsCount()]; + this.chunkMap = (ChunkMap)playersWatchingChunkProvider; // Paper + } + + // Paper start + public @Nullable ChunkAccess getAvailableChunkNow() { +- // TODO can we just getStatusFuture(EMPTY)? +- for (ChunkStatus curr = ChunkStatus.FULL, next = curr.getParent(); curr != next; curr = next, next = next.getParent()) { +- CompletableFuture> future = this.getFutureIfPresentUnchecked(curr); +- Either either = future.getNow(null); +- if (either == null || either.left().isEmpty()) { +- continue; +- } +- return either.left().get(); +- } +- return null; ++ return this.newChunkHolder.getCurrentChunk(); // Paper - rewrite chunk system + } + // Paper end + // CraftBukkit start + public LevelChunk getFullChunkNow() { +- // Note: We use the oldTicketLevel for isLoaded checks. +- if (!ChunkLevel.fullStatus(this.oldTicketLevel).isOrAfter(FullChunkStatus.FULL)) return null; +- return this.getFullChunkNowUnchecked(); ++ // Paper start - rewrite chunk system ++ ChunkAccess chunk = this.getAvailableChunkNow(); ++ if (!this.isFullChunkReady() || !(chunk instanceof LevelChunk)) return null; // instanceof to avoid a race condition on off-main threads ++ return (LevelChunk)chunk; ++ // Paper end - rewrite chunk system + } + + public LevelChunk getFullChunkNowUnchecked() { +- CompletableFuture> statusFuture = this.getFutureIfPresentUnchecked(ChunkStatus.FULL); +- Either either = (Either) statusFuture.getNow(null); +- return (either == null) ? null : (LevelChunk) either.left().orElse(null); ++ // Paper start - rewrite chunk system ++ ChunkAccess chunk = this.getAvailableChunkNow(); ++ return chunk instanceof LevelChunk ? (LevelChunk)chunk : null; ++ // Paper end - rewrite chunk system + } + // CraftBukkit end + + public CompletableFuture> getFutureIfPresentUnchecked(ChunkStatus leastStatus) { +- CompletableFuture> completablefuture = (CompletableFuture) this.futures.get(leastStatus.getIndex()); +- +- return completablefuture == null ? ChunkHolder.UNLOADED_CHUNK_FUTURE : completablefuture; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public CompletableFuture> getFutureIfPresent(ChunkStatus leastStatus) { +- return ChunkLevel.generationStatus(this.ticketLevel).isOrAfter(leastStatus) ? this.getFutureIfPresentUnchecked(leastStatus) : ChunkHolder.UNLOADED_CHUNK_FUTURE; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public final CompletableFuture> getTickingChunkFuture() { // Paper - final for inline +- return this.tickingChunkFuture; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public final CompletableFuture> getEntityTickingChunkFuture() { // Paper - final for inline +- return this.entityTickingChunkFuture; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public final CompletableFuture> getFullChunkFuture() { // Paper - final for inline +- return this.fullChunkFuture; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + @Nullable + public final LevelChunk getTickingChunk() { // Paper - final for inline +- CompletableFuture> completablefuture = this.getTickingChunkFuture(); +- Either either = (Either) completablefuture.getNow(null); // CraftBukkit - decompile error +- +- return either == null ? null : (LevelChunk) either.left().orElse(null); // CraftBukkit - decompile error ++ // Paper start - rewrite chunk system ++ if (!this.isTickingReady()) { ++ return null; ++ } ++ return (LevelChunk)this.getAvailableChunkNow(); ++ // Paper end - rewrite chunk system + } + + @Nullable + public final LevelChunk getFullChunk() { // Paper - final for inline +- CompletableFuture> completablefuture = this.getFullChunkFuture(); +- Either either = (Either) completablefuture.getNow(null); // CraftBukkit - decompile error +- +- return either == null ? null : (LevelChunk) either.left().orElse(null); // CraftBukkit - decompile error ++ // Paper start - rewrite chunk system ++ if (!this.isFullChunkReady()) { ++ return null; ++ } ++ return (LevelChunk)this.getAvailableChunkNow(); ++ // Paper end - rewrite chunk system + } + + @Nullable + public ChunkStatus getLastAvailableStatus() { +- for (int i = ChunkHolder.CHUNK_STATUSES.size() - 1; i >= 0; --i) { +- ChunkStatus chunkstatus = (ChunkStatus) ChunkHolder.CHUNK_STATUSES.get(i); +- CompletableFuture> completablefuture = this.getFutureIfPresentUnchecked(chunkstatus); +- +- if (((Either) completablefuture.getNow(ChunkHolder.UNLOADED_CHUNK)).left().isPresent()) { +- return chunkstatus; +- } +- } +- +- return null; ++ return this.newChunkHolder.getCurrentGenStatus(); // Paper - rewrite chunk system + } + + // Paper start + public ChunkStatus getChunkHolderStatus() { +- for (ChunkStatus curr = ChunkStatus.FULL, next = curr.getParent(); curr != next; curr = next, next = next.getParent()) { +- CompletableFuture> future = this.getFutureIfPresentUnchecked(curr); +- Either either = future.getNow(null); +- if (either == null || !either.left().isPresent()) { +- continue; +- } +- return curr; +- } +- +- return null; ++ return this.newChunkHolder.getCurrentGenStatus(); // Paper - rewrite chunk system + } + // Paper end + + @Nullable + public ChunkAccess getLastAvailable() { +- for (int i = ChunkHolder.CHUNK_STATUSES.size() - 1; i >= 0; --i) { +- ChunkStatus chunkstatus = (ChunkStatus) ChunkHolder.CHUNK_STATUSES.get(i); +- CompletableFuture> completablefuture = this.getFutureIfPresentUnchecked(chunkstatus); +- +- if (!completablefuture.isCompletedExceptionally()) { +- Optional optional = ((Either) completablefuture.getNow(ChunkHolder.UNLOADED_CHUNK)).left(); +- +- if (optional.isPresent()) { +- return (ChunkAccess) optional.get(); +- } +- } +- } +- +- return null; ++ return this.newChunkHolder.getCurrentChunk(); // Paper - rewrite chunk system + } + +- public final CompletableFuture getChunkToSave() { // Paper - final for inline +- return this.chunkToSave; +- } ++ // Paper - rewrite chunk system + + public void blockChanged(BlockPos pos) { +- LevelChunk chunk = this.getTickingChunk(); ++ LevelChunk chunk = this.getSendingChunk(); // Paper - no-tick view distance + + if (chunk != null) { + int i = this.levelHeightAccessor.getSectionIndex(pos.getY()); +@@ -239,14 +209,15 @@ public class ChunkHolder { + } + + public void sectionLightChanged(LightLayer lightType, int y) { +- Either either = (Either) this.getFutureIfPresent(ChunkStatus.INITIALIZE_LIGHT).getNow(null); // CraftBukkit - decompile error ++ // Paper start - no-tick view distance + +- if (either != null) { +- ChunkAccess ichunkaccess = (ChunkAccess) either.left().orElse(null); // CraftBukkit - decompile error ++ if (true) { ++ ChunkAccess ichunkaccess = this.getAvailableChunkNow(); + + if (ichunkaccess != null) { + ichunkaccess.setUnsaved(true); +- LevelChunk chunk = this.getTickingChunk(); ++ LevelChunk chunk = this.getSendingChunk(); ++ // Paper end - no-tick view distance + + if (chunk != null) { + int j = this.lightEngine.getMinLightSection(); +@@ -273,7 +244,7 @@ public class ChunkHolder { + List list; + + if (!this.skyChangedLightSectionFilter.isEmpty() || !this.blockChangedLightSectionFilter.isEmpty()) { +- list = this.playerProvider.getPlayers(this.pos, true); ++ list = this.getPlayers(true); // Paper - rewrite chunk system + if (!list.isEmpty()) { + ClientboundLightUpdatePacket packetplayoutlightupdate = new ClientboundLightUpdatePacket(chunk.getPos(), this.lightEngine, this.skyChangedLightSectionFilter, this.blockChangedLightSectionFilter); + +@@ -285,7 +256,7 @@ public class ChunkHolder { + } + + if (this.hasChangedSections) { +- list = this.playerProvider.getPlayers(this.pos, false); ++ list = this.getPlayers(false); // Paper - rewrite chunk system + + for (int i = 0; i < this.changedBlocksPerSection.length; ++i) { + ShortSet shortset = this.changedBlocksPerSection[i]; +@@ -343,67 +314,48 @@ public class ChunkHolder { + + } + +- private void broadcast(List players, Packet packet) { +- players.forEach((entityplayer) -> { +- entityplayer.connection.send(packet); +- }); +- } +- +- public CompletableFuture> getOrScheduleFuture(ChunkStatus targetStatus, ChunkMap chunkStorage) { +- int i = targetStatus.getIndex(); +- CompletableFuture> completablefuture = (CompletableFuture) this.futures.get(i); +- +- if (completablefuture != null) { +- Either either = (Either) completablefuture.getNow(ChunkHolder.NOT_DONE_YET); +- +- if (either == null) { +- String s = "value in future for status: " + targetStatus + " was incorrectly set to null at chunk: " + this.pos; ++ // Paper start - rewrite chunk system ++ public List getPlayers(boolean onlyOnWatchDistanceEdge){ ++ // Paper start - per player view distance ++ List ret = new java.util.ArrayList<>(); ++ // there can be potential desync with player's last mapped section and the view distance map, so use the ++ // view distance map here. ++ com.destroystokyo.paper.util.misc.PlayerAreaMap viewDistanceMap = this.chunkMap.playerChunkManager.broadcastMap; // Paper - replace old player chunk manager ++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet players = viewDistanceMap.getObjectsInRange(this.pos); ++ if (players == null) { ++ return ret; ++ } + +- throw chunkStorage.debugFuturesAndCreateReportedException(new IllegalStateException("null value previously set for chunk status"), s); ++ Object[] backingSet = players.getBackingSet(); ++ for (int i = 0, len = backingSet.length; i < len; ++i) { ++ if (!(backingSet[i] instanceof ServerPlayer player)) { ++ continue; + } +- +- if (either == ChunkHolder.NOT_DONE_YET || either.right().isEmpty()) { +- return completablefuture; ++ if (!this.chunkMap.playerChunkManager.isChunkSent(player, this.pos.x, this.pos.z, onlyOnWatchDistanceEdge)) { ++ continue; + } +- } + +- if (ChunkLevel.generationStatus(this.ticketLevel).isOrAfter(targetStatus)) { +- CompletableFuture> completablefuture1 = chunkStorage.schedule(this, targetStatus); +- +- this.updateChunkToSave(completablefuture1, "schedule " + targetStatus); +- this.futures.set(i, completablefuture1); +- return completablefuture1; +- } else { +- return completablefuture == null ? ChunkHolder.UNLOADED_CHUNK_FUTURE : completablefuture; ++ ret.add(player); + } +- } + +- protected void addSaveDependency(String thenDesc, CompletableFuture then) { +- if (this.chunkToSaveHistory != null) { +- this.chunkToSaveHistory.push(new ChunkHolder.ChunkSaveDebug(Thread.currentThread(), then, thenDesc)); +- } +- +- this.chunkToSave = this.chunkToSave.thenCombine(then, (ichunkaccess, object) -> { +- return ichunkaccess; +- }); ++ return ret; + } + +- private void updateChunkToSave(CompletableFuture> then, String thenDesc) { +- if (this.chunkToSaveHistory != null) { +- this.chunkToSaveHistory.push(new ChunkHolder.ChunkSaveDebug(Thread.currentThread(), then, thenDesc)); +- } ++ public void broadcast(Packet packet, boolean onlyOnWatchDistanceEdge) { ++ this.broadcast(this.getPlayers(onlyOnWatchDistanceEdge), packet); ++ } ++ // Paper end - rewrite chunk system + +- this.chunkToSave = this.chunkToSave.thenCombine(then, (ichunkaccess, either) -> { +- return (ChunkAccess) either.map((ichunkaccess1) -> { +- return ichunkaccess1; +- }, (playerchunk_failure) -> { +- return ichunkaccess; +- }); ++ private void broadcast(List players, Packet packet) { ++ players.forEach((entityplayer) -> { ++ entityplayer.connection.send(packet); + }); + } + ++ // Paper - rewrite chunk system ++ + public FullChunkStatus getFullStatus() { +- return ChunkLevel.fullStatus(this.ticketLevel); ++ return this.newChunkHolder.getChunkStatus(); // Paper - rewrite chunk system) { + } + + public final ChunkPos getPos() { // Paper - final for inline +@@ -411,240 +363,27 @@ public class ChunkHolder { + } + + public final int getTicketLevel() { // Paper - final for inline +- return this.ticketLevel; ++ return this.newChunkHolder.getTicketLevel(); // Paper - rewrite chunk system + } + +- public int getQueueLevel() { +- return this.queueLevel; +- } ++ // Paper - rewrite chunk system + +- private void setQueueLevel(int level) { +- this.queueLevel = level; ++ public static ChunkStatus getStatus(int level) { ++ return level < 33 ? ChunkStatus.FULL : ChunkStatus.getStatusAroundFullChunk(level - 33); + } + +- public void setTicketLevel(int level) { +- this.ticketLevel = level; ++ public static ChunkHolder.FullChunkStatus getFullChunkStatus(int distance) { ++ return ChunkHolder.FULL_CHUNK_STATUSES[Mth.clamp(33 - distance + 1, 0, ChunkHolder.FULL_CHUNK_STATUSES.length - 1)]; + } + +- private void scheduleFullChunkPromotion(ChunkMap playerchunkmap, CompletableFuture> completablefuture, Executor executor, FullChunkStatus fullchunkstatus) { +- this.pendingFullStateConfirmation.cancel(false); +- CompletableFuture completablefuture1 = new CompletableFuture(); +- +- completablefuture1.thenRunAsync(() -> { +- playerchunkmap.onFullChunkStatusChange(this.pos, fullchunkstatus); +- }, executor); +- this.pendingFullStateConfirmation = completablefuture1; +- completablefuture.thenAccept((either) -> { +- either.ifLeft((chunk) -> { +- completablefuture1.complete(null); // CraftBukkit - decompile error +- }); +- }); +- } +- +- private void demoteFullChunk(ChunkMap playerchunkmap, FullChunkStatus fullchunkstatus) { +- this.pendingFullStateConfirmation.cancel(false); +- playerchunkmap.onFullChunkStatusChange(this.pos, fullchunkstatus); +- } +- +- protected void updateFutures(ChunkMap chunkStorage, Executor executor) { +- ChunkStatus chunkstatus = ChunkLevel.generationStatus(this.oldTicketLevel); +- ChunkStatus chunkstatus1 = ChunkLevel.generationStatus(this.ticketLevel); +- boolean flag = ChunkLevel.isLoaded(this.oldTicketLevel); +- boolean flag1 = ChunkLevel.isLoaded(this.ticketLevel); +- FullChunkStatus fullchunkstatus = ChunkLevel.fullStatus(this.oldTicketLevel); +- FullChunkStatus fullchunkstatus1 = ChunkLevel.fullStatus(this.ticketLevel); +- // CraftBukkit start +- // ChunkUnloadEvent: Called before the chunk is unloaded: isChunkLoaded is still true and chunk can still be modified by plugins. +- if (fullchunkstatus.isOrAfter(FullChunkStatus.FULL) && !fullchunkstatus1.isOrAfter(FullChunkStatus.FULL)) { +- this.getFutureIfPresentUnchecked(ChunkStatus.FULL).thenAccept((either) -> { +- LevelChunk chunk = (LevelChunk)either.left().orElse(null); +- if (chunk != null) { +- chunkStorage.callbackExecutor.execute(() -> { +- // Minecraft will apply the chunks tick lists to the world once the chunk got loaded, and then store the tick +- // lists again inside the chunk once the chunk becomes inaccessible and set the chunk's needsSaving flag. +- // These actions may however happen deferred, so we manually set the needsSaving flag already here. +- chunk.setUnsaved(true); +- chunk.unloadCallback(); +- }); +- } +- }).exceptionally((throwable) -> { +- // ensure exceptions are printed, by default this is not the case +- MinecraftServer.LOGGER.error("Failed to schedule unload callback for chunk " + ChunkHolder.this.pos, throwable); +- return null; +- }); +- +- // Run callback right away if the future was already done +- chunkStorage.callbackExecutor.run(); +- } +- // CraftBukkit end +- +- if (flag) { +- Either either = Either.right(new ChunkHolder.ChunkLoadingFailure() { +- public String toString() { +- return "Unloaded ticket level " + ChunkHolder.this.pos; +- } +- }); +- +- for (int i = flag1 ? chunkstatus1.getIndex() + 1 : 0; i <= chunkstatus.getIndex(); ++i) { +- CompletableFuture> completablefuture = (CompletableFuture) this.futures.get(i); +- +- if (completablefuture == null) { +- this.futures.set(i, CompletableFuture.completedFuture(either)); +- } +- } +- } +- +- boolean flag2 = fullchunkstatus.isOrAfter(FullChunkStatus.FULL); +- boolean flag3 = fullchunkstatus1.isOrAfter(FullChunkStatus.FULL); +- +- this.wasAccessibleSinceLastSave |= flag3; +- if (!flag2 && flag3) { +- int expectCreateCount = ++this.fullChunkCreateCount; // Paper +- this.fullChunkFuture = chunkStorage.prepareAccessibleChunk(this); +- this.scheduleFullChunkPromotion(chunkStorage, this.fullChunkFuture, executor, FullChunkStatus.FULL); +- // Paper start - cache ticking ready status +- this.fullChunkFuture.thenAccept(either -> { +- final Optional left = either.left(); +- if (left.isPresent() && ChunkHolder.this.fullChunkCreateCount == expectCreateCount) { +- LevelChunk fullChunk = either.left().get(); +- ChunkHolder.this.isFullChunkReady = true; +- io.papermc.paper.chunk.system.ChunkSystem.onChunkBorder(fullChunk, this); +- } +- }); +- this.updateChunkToSave(this.fullChunkFuture, "full"); +- } +- +- if (flag2 && !flag3) { +- // Paper start +- if (this.isFullChunkReady) { +- io.papermc.paper.chunk.system.ChunkSystem.onChunkNotBorder(this.fullChunkFuture.join().left().get(), this); // Paper +- } +- // Paper end +- this.fullChunkFuture.complete(ChunkHolder.UNLOADED_LEVEL_CHUNK); +- this.fullChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE; +- ++this.fullChunkCreateCount; // Paper - cache ticking ready status +- this.isFullChunkReady = false; // Paper - cache ticking ready status +- } +- +- boolean flag4 = fullchunkstatus.isOrAfter(FullChunkStatus.BLOCK_TICKING); +- boolean flag5 = fullchunkstatus1.isOrAfter(FullChunkStatus.BLOCK_TICKING); +- +- if (!flag4 && flag5) { +- this.tickingChunkFuture = chunkStorage.prepareTickingChunk(this); +- this.scheduleFullChunkPromotion(chunkStorage, this.tickingChunkFuture, executor, FullChunkStatus.BLOCK_TICKING); +- // Paper start - cache ticking ready status +- this.tickingChunkFuture.thenAccept(either -> { +- either.ifLeft(chunk -> { +- // note: Here is a very good place to add callbacks to logic waiting on this. +- ChunkHolder.this.isTickingReady = true; +- io.papermc.paper.chunk.system.ChunkSystem.onChunkTicking(chunk, this); +- }); +- }); +- // Paper end +- this.updateChunkToSave(this.tickingChunkFuture, "ticking"); +- } +- +- if (flag4 && !flag5) { +- // Paper start +- if (this.isTickingReady) { +- io.papermc.paper.chunk.system.ChunkSystem.onChunkNotTicking(this.tickingChunkFuture.join().left().get(), this); // Paper +- } +- // Paper end +- this.tickingChunkFuture.complete(ChunkHolder.UNLOADED_LEVEL_CHUNK); this.isTickingReady = false; // Paper - cache chunk ticking stage +- this.tickingChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE; +- } +- +- boolean flag6 = fullchunkstatus.isOrAfter(FullChunkStatus.ENTITY_TICKING); +- boolean flag7 = fullchunkstatus1.isOrAfter(FullChunkStatus.ENTITY_TICKING); +- +- if (!flag6 && flag7) { +- if (this.entityTickingChunkFuture != ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE) { +- throw (IllegalStateException) Util.pauseInIde(new IllegalStateException()); +- } +- +- this.entityTickingChunkFuture = chunkStorage.prepareEntityTickingChunk(this); +- this.scheduleFullChunkPromotion(chunkStorage, this.entityTickingChunkFuture, executor, FullChunkStatus.ENTITY_TICKING); +- // Paper start - cache ticking ready status +- this.entityTickingChunkFuture.thenAccept(either -> { +- either.ifLeft(chunk -> { +- ChunkHolder.this.isEntityTickingReady = true; +- io.papermc.paper.chunk.system.ChunkSystem.onChunkEntityTicking(chunk, this); +- }); +- }); +- // Paper end +- this.updateChunkToSave(this.entityTickingChunkFuture, "entity ticking"); +- } +- +- if (flag6 && !flag7) { +- // Paper start +- if (this.isEntityTickingReady) { +- io.papermc.paper.chunk.system.ChunkSystem.onChunkNotEntityTicking(this.entityTickingChunkFuture.join().left().get(), this); +- } +- // Paper end +- this.entityTickingChunkFuture.complete(ChunkHolder.UNLOADED_LEVEL_CHUNK); this.isEntityTickingReady = false; // Paper - cache chunk ticking stage +- this.entityTickingChunkFuture = ChunkHolder.UNLOADED_LEVEL_CHUNK_FUTURE; +- } +- +- if (!fullchunkstatus1.isOrAfter(fullchunkstatus)) { +- this.demoteFullChunk(chunkStorage, fullchunkstatus1); +- } +- +- this.onLevelChange.onLevelChange(this.pos, this::getQueueLevel, this.ticketLevel, this::setQueueLevel); +- this.oldTicketLevel = this.ticketLevel; +- // CraftBukkit start +- // ChunkLoadEvent: Called after the chunk is loaded: isChunkLoaded returns true and chunk is ready to be modified by plugins. +- if (!fullchunkstatus.isOrAfter(FullChunkStatus.FULL) && fullchunkstatus1.isOrAfter(FullChunkStatus.FULL)) { +- this.getFutureIfPresentUnchecked(ChunkStatus.FULL).thenAccept((either) -> { +- LevelChunk chunk = (LevelChunk)either.left().orElse(null); +- if (chunk != null) { +- chunkStorage.callbackExecutor.execute(() -> { +- chunk.loadCallback(); +- }); +- } +- }).exceptionally((throwable) -> { +- // ensure exceptions are printed, by default this is not the case +- MinecraftServer.LOGGER.error("Failed to schedule load callback for chunk " + ChunkHolder.this.pos, throwable); +- return null; +- }); +- +- // Run callback right away if the future was already done +- chunkStorage.callbackExecutor.run(); +- } +- // CraftBukkit end +- } +- +- public boolean wasAccessibleSinceLastSave() { +- return this.wasAccessibleSinceLastSave; +- } +- +- public void refreshAccessibility() { +- this.wasAccessibleSinceLastSave = ChunkLevel.fullStatus(this.ticketLevel).isOrAfter(FullChunkStatus.FULL); +- } ++ // Paper - rewrite chunk system + + public void replaceProtoChunk(ImposterProtoChunk chunk) { +- for (int i = 0; i < this.futures.length(); ++i) { +- CompletableFuture> completablefuture = (CompletableFuture) this.futures.get(i); +- +- if (completablefuture != null) { +- Optional optional = ((Either) completablefuture.getNow(ChunkHolder.UNLOADED_CHUNK)).left(); +- +- if (!optional.isEmpty() && optional.get() instanceof ProtoChunk) { +- this.futures.set(i, CompletableFuture.completedFuture(Either.left(chunk))); +- } +- } +- } +- +- this.updateChunkToSave(CompletableFuture.completedFuture(Either.left(chunk.getWrapped())), "replaceProto"); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public List>>> getAllFutures() { +- List>>> list = new ArrayList(); +- +- for (int i = 0; i < ChunkHolder.CHUNK_STATUSES.size(); ++i) { +- list.add(Pair.of((ChunkStatus) ChunkHolder.CHUNK_STATUSES.get(i), (CompletableFuture) this.futures.get(i))); +- } +- +- return list; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + @FunctionalInterface +@@ -682,15 +421,15 @@ public class ChunkHolder { + + // Paper start + public final boolean isEntityTickingReady() { +- return this.isEntityTickingReady; ++ return this.newChunkHolder.isEntityTickingReady(); // Paper - rewrite chunk system + } + + public final boolean isTickingReady() { +- return this.isTickingReady; ++ return this.newChunkHolder.isTickingReady(); // Paper - rewrite chunk system + } + + public final boolean isFullChunkReady() { +- return this.isFullChunkReady; ++ return this.newChunkHolder.isFullChunkReady(); // Paper - rewrite chunk system + } + // Paper end + } +diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java +index 19bd6f9aee3ccb1af1b010ee51a54aa2d0bf9c84..284393fd4ae0a7562b6bc9b60cf2c141a2de3a58 100644 +--- a/src/main/java/net/minecraft/server/level/ChunkMap.java ++++ b/src/main/java/net/minecraft/server/level/ChunkMap.java +@@ -127,10 +127,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + private static final int MIN_VIEW_DISTANCE = 2; + public static final int MAX_VIEW_DISTANCE = 32; + public static final int FORCED_TICKET_LEVEL = ChunkLevel.byStatus(FullChunkStatus.ENTITY_TICKING); +- public final Long2ObjectLinkedOpenHashMap updatingChunkMap = new Long2ObjectLinkedOpenHashMap(); +- public volatile Long2ObjectLinkedOpenHashMap visibleChunkMap; +- private final Long2ObjectLinkedOpenHashMap pendingUnloads; +- private final LongSet entitiesInLevel; ++ // Paper - rewrite chunk system + public final ServerLevel level; + private final ThreadedLevelLightEngine lightEngine; + public final BlockableEventLoop mainThreadExecutor; // Paper - public +@@ -139,16 +136,14 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + private final ChunkGeneratorStructureState chunkGeneratorState; + public final Supplier overworldDataStorage; + private final PoiManager poiManager; +- public final LongSet toDrop; ++ // Paper - rewrite chunk system + private boolean modified; +- private final ChunkTaskPriorityQueueSorter queueSorter; +- private final ProcessorHandle> worldgenMailbox; +- private final ProcessorHandle> mainThreadMailbox; ++ // Paper - rewrite chunk system + public final ChunkProgressListener progressListener; + private final ChunkStatusUpdateListener chunkStatusListener; + public final ChunkMap.ChunkDistanceManager distanceManager; + private final AtomicInteger tickingGenerated; +- private final StructureTemplateManager structureTemplateManager; ++ public final StructureTemplateManager structureTemplateManager; // Paper - rewrite chunk system + private final String storageName; + private final PlayerMap playerMap; + public final Int2ObjectMap entityMap; +@@ -157,37 +152,21 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + private final Queue unloadQueue; + int viewDistance; + +- // CraftBukkit start - recursion-safe executor for Chunk loadCallback() and unloadCallback() +- public final CallbackExecutor callbackExecutor = new CallbackExecutor(); +- public static final class CallbackExecutor implements java.util.concurrent.Executor, Runnable { +- +- private final java.util.Queue queue = new java.util.ArrayDeque<>(); +- +- @Override +- public void execute(Runnable runnable) { +- this.queue.add(runnable); +- } +- +- @Override +- public void run() { +- Runnable task; +- while ((task = this.queue.poll()) != null) { +- task.run(); +- } +- } +- }; +- // CraftBukkit end ++ // Paper - rewrite chunk system + + // Paper start - distance maps + private final com.destroystokyo.paper.util.misc.PooledLinkedHashSets pooledLinkedPlayerHashSets = new com.destroystokyo.paper.util.misc.PooledLinkedHashSets<>(); ++ public final io.papermc.paper.chunk.PlayerChunkLoader playerChunkManager = new io.papermc.paper.chunk.PlayerChunkLoader(this, this.pooledLinkedPlayerHashSets); // Paper - replace chunk loader + + void addPlayerToDistanceMaps(ServerPlayer player) { ++ this.playerChunkManager.addPlayer(player); // Paper - replace chunk loader + int chunkX = MCUtil.getChunkCoordinate(player.getX()); + int chunkZ = MCUtil.getChunkCoordinate(player.getZ()); + // Note: players need to be explicitly added to distance maps before they can be updated + } + + void removePlayerFromDistanceMaps(ServerPlayer player) { ++ this.playerChunkManager.removePlayer(player); // Paper - replace chunk loader + + } + +@@ -195,6 +174,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + int chunkX = MCUtil.getChunkCoordinate(player.getX()); + int chunkZ = MCUtil.getChunkCoordinate(player.getZ()); + // Note: players need to be explicitly added to distance maps before they can be updated ++ this.playerChunkManager.updatePlayer(player); // Paper - replace chunk loader + } + // Paper end + // Paper start +@@ -224,16 +204,13 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + } + + public final ChunkHolder getUnloadingChunkHolder(int chunkX, int chunkZ) { +- return this.pendingUnloads.get(io.papermc.paper.util.CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ return null; // Paper - rewrite chunk system + } + // Paper end + + public ChunkMap(ServerLevel world, LevelStorageSource.LevelStorageAccess session, DataFixer dataFixer, StructureTemplateManager structureTemplateManager, Executor executor, BlockableEventLoop mainThreadExecutor, LightChunkGetter chunkProvider, ChunkGenerator chunkGenerator, ChunkProgressListener worldGenerationProgressListener, ChunkStatusUpdateListener chunkStatusChangeListener, Supplier persistentStateManagerFactory, int viewDistance, boolean dsync) { + super(session.getDimensionPath(world.dimension()).resolve("region"), dataFixer, dsync); +- this.visibleChunkMap = this.updatingChunkMap.clone(); +- this.pendingUnloads = new Long2ObjectLinkedOpenHashMap(); +- this.entitiesInLevel = new LongOpenHashSet(); +- this.toDrop = new LongOpenHashSet(); ++ // Paper - rewrite chunk system + this.tickingGenerated = new AtomicInteger(); + this.playerMap = new PlayerMap(); + this.entityMap = new Int2ObjectOpenHashMap(); +@@ -264,19 +241,17 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + + this.chunkGeneratorState = chunkGenerator.createState(iregistrycustom.lookupOrThrow(Registries.STRUCTURE_SET), this.randomState, j, world.spigotConfig); // Spigot + this.mainThreadExecutor = mainThreadExecutor; +- ProcessorMailbox threadedmailbox = ProcessorMailbox.create(executor, "worldgen"); ++ // Paper - rewrite chunk system + + Objects.requireNonNull(mainThreadExecutor); +- ProcessorHandle mailbox = ProcessorHandle.of("main", mainThreadExecutor::tell); ++ // Paper - rewrite chunk system + + this.progressListener = worldGenerationProgressListener; + this.chunkStatusListener = chunkStatusChangeListener; +- ProcessorMailbox threadedmailbox1 = ProcessorMailbox.create(executor, "light"); ++ // Paper - rewrite chunk system + +- this.queueSorter = new ChunkTaskPriorityQueueSorter(ImmutableList.of(threadedmailbox, mailbox, threadedmailbox1), executor, Integer.MAX_VALUE); +- this.worldgenMailbox = this.queueSorter.getProcessor(threadedmailbox, false); +- this.mainThreadMailbox = this.queueSorter.getProcessor(mailbox, false); +- this.lightEngine = new ThreadedLevelLightEngine(chunkProvider, this, this.level.dimensionType().hasSkyLight(), threadedmailbox1, this.queueSorter.getProcessor(threadedmailbox1, false)); ++ // Paper - rewrite chunk system ++ this.lightEngine = new ThreadedLevelLightEngine(chunkProvider, this, this.level.dimensionType().hasSkyLight(), null, null); // Paper - rewrite chunk system + this.distanceManager = new ChunkMap.ChunkDistanceManager(executor, mainThreadExecutor); + this.overworldDataStorage = persistentStateManagerFactory; + this.poiManager = new PoiManager(path.resolve("poi"), dataFixer, dsync, iregistrycustom, world); +@@ -340,20 +315,22 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + + @Nullable + protected ChunkHolder getUpdatingChunkIfPresent(long pos) { +- return (ChunkHolder) this.updatingChunkMap.get(pos); ++ // Paper start - rewrite chunk system ++ io.papermc.paper.chunk.system.scheduling.NewChunkHolder holder = this.level.chunkTaskScheduler.chunkHolderManager.getChunkHolder(pos); ++ return holder == null ? null : holder.vanillaChunkHolder; ++ // Paper end - rewrite chunk system + } + + @Nullable + public ChunkHolder getVisibleChunkIfPresent(long pos) { +- return (ChunkHolder) this.visibleChunkMap.get(pos); ++ // Paper start - rewrite chunk system ++ io.papermc.paper.chunk.system.scheduling.NewChunkHolder holder = this.level.chunkTaskScheduler.chunkHolderManager.getChunkHolder(pos); ++ return holder == null ? null : holder.vanillaChunkHolder; ++ // Paper end - rewrite chunk system + } + + protected IntSupplier getChunkQueueLevel(long pos) { +- return () -> { +- ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos); +- +- return playerchunk == null ? ChunkTaskPriorityQueue.PRIORITY_LEVEL_COUNT - 1 : Math.min(playerchunk.getQueueLevel(), ChunkTaskPriorityQueue.PRIORITY_LEVEL_COUNT - 1); +- }; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public String getChunkDebugData(ChunkPos chunkPos) { +@@ -391,84 +368,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + // Paper end + + private CompletableFuture, ChunkHolder.ChunkLoadingFailure>> getChunkRangeFuture(ChunkHolder centerChunk, int margin, IntFunction distanceToStatus) { +- if (margin == 0) { +- ChunkStatus chunkstatus = (ChunkStatus) distanceToStatus.apply(0); +- +- return centerChunk.getOrScheduleFuture(chunkstatus, this).thenApply((either) -> { +- return either.mapLeft(List::of); +- }); +- } else { +- List>> list = new ArrayList(); +- List list1 = new ArrayList(); +- ChunkPos chunkcoordintpair = centerChunk.getPos(); +- int j = chunkcoordintpair.x; +- int k = chunkcoordintpair.z; +- +- for (int l = -margin; l <= margin; ++l) { +- for (int i1 = -margin; i1 <= margin; ++i1) { +- int j1 = Math.max(Math.abs(i1), Math.abs(l)); +- final ChunkPos chunkcoordintpair1 = new ChunkPos(j + i1, k + l); +- long k1 = chunkcoordintpair1.toLong(); +- ChunkHolder playerchunk1 = this.getUpdatingChunkIfPresent(k1); +- +- if (playerchunk1 == null) { +- return CompletableFuture.completedFuture(Either.right(new ChunkHolder.ChunkLoadingFailure() { +- public String toString() { +- return "Unloaded " + chunkcoordintpair1; +- } +- })); +- } +- +- ChunkStatus chunkstatus1 = (ChunkStatus) distanceToStatus.apply(j1); +- CompletableFuture> completablefuture = playerchunk1.getOrScheduleFuture(chunkstatus1, this); +- +- list1.add(playerchunk1); +- list.add(completablefuture); +- } +- } +- +- CompletableFuture>> completablefuture1 = Util.sequence(list); +- CompletableFuture, ChunkHolder.ChunkLoadingFailure>> completablefuture2 = completablefuture1.thenApply((list2) -> { +- List list3 = Lists.newArrayList(); +- // CraftBukkit start - decompile error +- int cnt = 0; +- +- for (Iterator iterator = list2.iterator(); iterator.hasNext(); ++cnt) { +- final int l1 = cnt; +- // CraftBukkit end +- final Either either = (Either) iterator.next(); +- +- if (either == null) { +- throw this.debugFuturesAndCreateReportedException(new IllegalStateException("At least one of the chunk futures were null"), "n/a"); +- } +- +- Optional optional = either.left(); +- +- if (!optional.isPresent()) { +- return Either.right(new ChunkHolder.ChunkLoadingFailure() { +- public String toString() { +- ChunkPos chunkcoordintpair2 = new ChunkPos(j + l1 % (margin * 2 + 1), k + l1 / (margin * 2 + 1)); +- +- return "Unloaded " + chunkcoordintpair2 + " " + either.right().get(); +- } +- }); +- } +- +- list3.add((ChunkAccess) optional.get()); +- } +- +- return Either.left(list3); +- }); +- Iterator iterator = list1.iterator(); +- +- while (iterator.hasNext()) { +- ChunkHolder playerchunk2 = (ChunkHolder) iterator.next(); +- +- playerchunk2.addSaveDependency("getChunkRangeFuture " + chunkcoordintpair + " " + margin, completablefuture2); +- } +- +- return completablefuture2; +- } ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public ReportedException debugFuturesAndCreateReportedException(IllegalStateException exception, String details) { +@@ -498,263 +398,72 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + } + + public CompletableFuture> prepareEntityTickingChunk(ChunkHolder chunk) { +- return this.getChunkRangeFuture(chunk, 2, (i) -> { +- return ChunkStatus.FULL; +- }).thenApplyAsync((either) -> { +- return either.mapLeft((list) -> { +- return (LevelChunk) list.get(list.size() / 2); +- }); +- }, this.mainThreadExecutor); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + @Nullable + ChunkHolder updateChunkScheduling(long pos, int level, @Nullable ChunkHolder holder, int k) { +- if (!ChunkLevel.isLoaded(k) && !ChunkLevel.isLoaded(level)) { +- return holder; +- } else { +- if (holder != null) { +- holder.setTicketLevel(level); +- } +- +- if (holder != null) { +- if (!ChunkLevel.isLoaded(level)) { +- this.toDrop.add(pos); +- } else { +- this.toDrop.remove(pos); +- } +- } +- +- if (ChunkLevel.isLoaded(level) && holder == null) { +- holder = (ChunkHolder) this.pendingUnloads.remove(pos); +- if (holder != null) { +- holder.setTicketLevel(level); +- } else { +- holder = new ChunkHolder(new ChunkPos(pos), level, this.level, this.lightEngine, this.queueSorter, this); +- // Paper start +- io.papermc.paper.chunk.system.ChunkSystem.onChunkHolderCreate(this.level, holder); +- // Paper end +- } +- +- // Paper start +- holder.onChunkAdd(); +- // Paper end +- this.updatingChunkMap.put(pos, holder); +- this.modified = true; +- } +- +- return holder; +- } ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + @Override + public void close() throws IOException { +- try { +- this.queueSorter.close(); +- this.poiManager.close(); +- } finally { +- super.close(); +- } ++ throw new UnsupportedOperationException("Use ServerChunkCache#close"); // Paper - rewrite chunk system ++ } + ++ // Paper start - rewrite chunk system ++ protected void saveIncrementally() { ++ this.level.chunkTaskScheduler.chunkHolderManager.autoSave(); // Paper - rewrite chunk system + } ++ // Paper end - - rewrite chunk system + + protected void saveAllChunks(boolean flush) { +- if (flush) { +- List list = (List) io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(this.level).stream().filter(ChunkHolder::wasAccessibleSinceLastSave).peek(ChunkHolder::refreshAccessibility).collect(Collectors.toList()); // Paper +- MutableBoolean mutableboolean = new MutableBoolean(); +- +- do { +- mutableboolean.setFalse(); +- list.stream().map((playerchunk) -> { +- CompletableFuture completablefuture; +- +- do { +- completablefuture = playerchunk.getChunkToSave(); +- BlockableEventLoop iasynctaskhandler = this.mainThreadExecutor; +- +- Objects.requireNonNull(completablefuture); +- iasynctaskhandler.managedBlock(completablefuture::isDone); +- } while (completablefuture != playerchunk.getChunkToSave()); +- +- return (ChunkAccess) completablefuture.join(); +- }).filter((ichunkaccess) -> { +- return ichunkaccess instanceof ImposterProtoChunk || ichunkaccess instanceof LevelChunk; +- }).filter(this::save).forEach((ichunkaccess) -> { +- mutableboolean.setTrue(); +- }); +- } while (mutableboolean.isTrue()); +- +- this.processUnloads(() -> { +- return true; +- }); +- this.flushWorker(); +- } else { +- io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(this.level).forEach(this::saveChunkIfNeeded); +- } +- ++ this.level.chunkTaskScheduler.chunkHolderManager.saveAllChunks(flush, false, false); // Paper - rewrite chunk system + } + + protected void tick(BooleanSupplier shouldKeepTicking) { + ProfilerFiller gameprofilerfiller = this.level.getProfiler(); + ++ try (Timing ignored = this.level.timings.poiUnload.startTiming()) { // Paper + gameprofilerfiller.push("poi"); + this.poiManager.tick(shouldKeepTicking); ++ } // Paper + gameprofilerfiller.popPush("chunk_unload"); + if (!this.level.noSave()) { ++ try (Timing ignored = this.level.timings.chunkUnload.startTiming()) { // Paper + this.processUnloads(shouldKeepTicking); ++ } // Paper + } + + gameprofilerfiller.pop(); + } + + public boolean hasWork() { +- return this.lightEngine.hasLightWork() || !this.pendingUnloads.isEmpty() || io.papermc.paper.chunk.system.ChunkSystem.hasAnyChunkHolders(this.level) || this.poiManager.hasWork() || !this.toDrop.isEmpty() || !this.unloadQueue.isEmpty() || this.queueSorter.hasWork() || this.distanceManager.hasTickets(); // Paper ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + private void processUnloads(BooleanSupplier shouldKeepTicking) { +- LongIterator longiterator = this.toDrop.iterator(); +- +- for (int i = 0; longiterator.hasNext() && (shouldKeepTicking.getAsBoolean() || i < 200 || this.toDrop.size() > 2000); longiterator.remove()) { +- long j = longiterator.nextLong(); +- ChunkHolder playerchunk = (ChunkHolder) this.updatingChunkMap.remove(j); +- +- if (playerchunk != null) { +- playerchunk.onChunkRemove(); // Paper +- this.pendingUnloads.put(j, playerchunk); +- this.modified = true; +- ++i; +- this.scheduleUnload(j, playerchunk); +- } +- } +- +- int k = Math.max(0, this.unloadQueue.size() - 2000); +- +- Runnable runnable; +- +- while ((shouldKeepTicking.getAsBoolean() || k > 0) && (runnable = (Runnable) this.unloadQueue.poll()) != null) { +- --k; +- runnable.run(); +- } +- +- int l = 0; +- Iterator objectiterator = io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(this.level).iterator(); // Paper +- +- while (l < 20 && shouldKeepTicking.getAsBoolean() && objectiterator.hasNext()) { +- if (this.saveChunkIfNeeded((ChunkHolder) objectiterator.next())) { +- ++l; +- } +- } ++ this.level.chunkTaskScheduler.chunkHolderManager.processUnloads(); // Paper - rewrite chunk system + + } + + private void scheduleUnload(long pos, ChunkHolder holder) { +- CompletableFuture completablefuture = holder.getChunkToSave(); +- Consumer consumer = (ichunkaccess) -> { // CraftBukkit - decompile error +- CompletableFuture completablefuture1 = holder.getChunkToSave(); +- +- if (completablefuture1 != completablefuture) { +- this.scheduleUnload(pos, holder); +- } else { +- // Paper start +- boolean removed; +- if ((removed = this.pendingUnloads.remove(pos, holder)) && ichunkaccess != null) { +- io.papermc.paper.chunk.system.ChunkSystem.onChunkHolderDelete(this.level, holder); +- // Paper end +- if (ichunkaccess instanceof LevelChunk) { +- ((LevelChunk) ichunkaccess).setLoaded(false); +- } +- +- this.save(ichunkaccess); +- if (this.entitiesInLevel.remove(pos) && ichunkaccess instanceof LevelChunk) { +- LevelChunk chunk = (LevelChunk) ichunkaccess; +- +- this.level.unload(chunk); +- } +- +- this.lightEngine.updateChunkStatus(ichunkaccess.getPos()); +- this.lightEngine.tryScheduleUpdate(); +- this.progressListener.onStatusChange(ichunkaccess.getPos(), (ChunkStatus) null); +- this.chunkSaveCooldowns.remove(ichunkaccess.getPos().toLong()); +- } else if (removed) { // Paper start +- io.papermc.paper.chunk.system.ChunkSystem.onChunkHolderDelete(this.level, holder); +- } // Paper end +- +- } +- }; +- Queue queue = this.unloadQueue; +- +- Objects.requireNonNull(this.unloadQueue); +- completablefuture.thenAcceptAsync(consumer, queue::add).whenComplete((ovoid, throwable) -> { +- if (throwable != null) { +- ChunkMap.LOGGER.error("Failed to save chunk {}", holder.getPos(), throwable); +- } +- +- }); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + protected boolean promoteChunkMap() { +- if (!this.modified) { +- return false; +- } else { +- this.visibleChunkMap = this.updatingChunkMap.clone(); +- this.modified = false; +- return true; +- } ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public CompletableFuture> schedule(ChunkHolder holder, ChunkStatus requiredStatus) { +- ChunkPos chunkcoordintpair = holder.getPos(); +- +- if (requiredStatus == ChunkStatus.EMPTY) { +- return this.scheduleChunkLoad(chunkcoordintpair); +- } else { +- if (requiredStatus == ChunkStatus.LIGHT) { +- this.distanceManager.addTicket(TicketType.LIGHT, chunkcoordintpair, ChunkLevel.byStatus(ChunkStatus.LIGHT), chunkcoordintpair); +- } +- +- if (!requiredStatus.hasLoadDependencies()) { +- Optional optional = ((Either) holder.getOrScheduleFuture(requiredStatus.getParent(), this).getNow(ChunkHolder.UNLOADED_CHUNK)).left(); +- +- if (optional.isPresent() && ((ChunkAccess) optional.get()).getStatus().isOrAfter(requiredStatus)) { +- CompletableFuture> completablefuture = requiredStatus.load(this.level, this.structureTemplateManager, this.lightEngine, (ichunkaccess) -> { +- return this.protoChunkToFullChunk(holder); +- }, (ChunkAccess) optional.get()); +- +- this.progressListener.onStatusChange(chunkcoordintpair, requiredStatus); +- return completablefuture; +- } +- } +- +- return this.scheduleChunkGeneration(holder, requiredStatus); +- } ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + private CompletableFuture> scheduleChunkLoad(ChunkPos pos) { +- return this.readChunk(pos).thenApply((optional) -> { +- return optional.filter((nbttagcompound) -> { +- boolean flag = ChunkMap.isChunkDataValid(nbttagcompound); +- +- if (!flag) { +- ChunkMap.LOGGER.error("Chunk file at {} is missing level data, skipping", pos); +- } +- +- return flag; +- }); +- }).thenApplyAsync((optional) -> { +- this.level.getProfiler().incrementCounter("chunkLoad"); +- if (optional.isPresent()) { +- ProtoChunk protochunk = ChunkSerializer.read(this.level, this.poiManager, pos, (CompoundTag) optional.get()); +- +- this.markPosition(pos, protochunk.getStatus().getChunkType()); +- return Either.left(protochunk); // CraftBukkit - decompile error +- } else { +- return Either.left(this.createEmptyChunk(pos)); // CraftBukkit - decompile error +- } +- }, this.mainThreadExecutor).exceptionallyAsync((throwable) -> { +- return this.handleChunkLoadFailure(throwable, pos); +- }, this.mainThreadExecutor); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + +- private static boolean isChunkDataValid(CompoundTag nbt) { ++ public static boolean isChunkDataValid(CompoundTag nbt) { // Paper - async chunk loading + return nbt.contains("Status", 8); + } + +@@ -790,54 +499,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + } + + private CompletableFuture> scheduleChunkGeneration(ChunkHolder holder, ChunkStatus requiredStatus) { +- ChunkPos chunkcoordintpair = holder.getPos(); +- CompletableFuture, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getChunkRangeFuture(holder, requiredStatus.getRange(), (i) -> { +- return this.getDependencyStatus(requiredStatus, i); +- }); +- +- this.level.getProfiler().incrementCounter(() -> { +- return "chunkGenerate " + requiredStatus; +- }); +- Executor executor = (runnable) -> { +- this.worldgenMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable)); +- }; +- +- return completablefuture.thenComposeAsync((either) -> { +- return (CompletionStage) either.map((list) -> { +- try { +- ChunkAccess ichunkaccess = (ChunkAccess) list.get(list.size() / 2); +- CompletableFuture completablefuture1; +- +- if (ichunkaccess.getStatus().isOrAfter(requiredStatus)) { +- completablefuture1 = requiredStatus.load(this.level, this.structureTemplateManager, this.lightEngine, (ichunkaccess1) -> { +- return this.protoChunkToFullChunk(holder); +- }, ichunkaccess); +- } else { +- completablefuture1 = requiredStatus.generate(executor, this.level, this.generator, this.structureTemplateManager, this.lightEngine, (ichunkaccess1) -> { +- return this.protoChunkToFullChunk(holder); +- }, list); +- } +- +- this.progressListener.onStatusChange(chunkcoordintpair, requiredStatus); +- return completablefuture1; +- } catch (Exception exception) { +- exception.getStackTrace(); +- CrashReport crashreport = CrashReport.forThrowable(exception, "Exception generating new chunk"); +- CrashReportCategory crashreportsystemdetails = crashreport.addCategory("Chunk to be generated"); +- +- crashreportsystemdetails.setDetail("Location", (Object) String.format(Locale.ROOT, "%d,%d", chunkcoordintpair.x, chunkcoordintpair.z)); +- crashreportsystemdetails.setDetail("Position hash", (Object) ChunkPos.asLong(chunkcoordintpair.x, chunkcoordintpair.z)); +- crashreportsystemdetails.setDetail("Generator", (Object) this.generator); +- this.mainThreadExecutor.execute(() -> { +- throw new ReportedException(crashreport); +- }); +- throw new ReportedException(crashreport); +- } +- }, (playerchunk_failure) -> { +- this.releaseLightTicket(chunkcoordintpair); +- return CompletableFuture.completedFuture(Either.right(playerchunk_failure)); +- }); +- }, executor); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + protected void releaseLightTicket(ChunkPos pos) { +@@ -848,7 +510,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + })); + } + +- private ChunkStatus getDependencyStatus(ChunkStatus centerChunkTargetStatus, int distance) { ++ public static ChunkStatus getDependencyStatus(ChunkStatus centerChunkTargetStatus, int distance) { // Paper -> public, static + ChunkStatus chunkstatus1; + + if (distance == 0) { +@@ -860,7 +522,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + return chunkstatus1; + } + +- private static void postLoadProtoChunk(ServerLevel world, List nbt) { ++ public static void postLoadProtoChunk(ServerLevel world, List nbt) { // Paper - public + if (!nbt.isEmpty()) { + // CraftBukkit start - these are spawned serialized (DefinedStructure) and we don't call an add event below at the moment due to ordering complexities + world.addWorldGenChunkEntities(EntityType.loadEntitiesRecursive(nbt, world).filter((entity) -> { +@@ -882,93 +544,15 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + } + + private CompletableFuture> protoChunkToFullChunk(ChunkHolder chunkHolder) { +- CompletableFuture> completablefuture = chunkHolder.getFutureIfPresentUnchecked(ChunkStatus.FULL.getParent()); +- +- return completablefuture.thenApplyAsync((either) -> { +- ChunkStatus chunkstatus = ChunkLevel.generationStatus(chunkHolder.getTicketLevel()); +- +- return !chunkstatus.isOrAfter(ChunkStatus.FULL) ? ChunkHolder.UNLOADED_CHUNK : either.mapLeft((ichunkaccess) -> { +- try (Timing ignored = level.timings.chunkPostLoad.startTimingIfSync()) { // Paper +- ChunkPos chunkcoordintpair = chunkHolder.getPos(); +- ProtoChunk protochunk = (ProtoChunk) ichunkaccess; +- LevelChunk chunk; +- +- if (protochunk instanceof ImposterProtoChunk) { +- chunk = ((ImposterProtoChunk) protochunk).getWrapped(); +- } else { +- chunk = new LevelChunk(this.level, protochunk, (chunk1) -> { +- ChunkMap.postLoadProtoChunk(this.level, protochunk.getEntities()); +- }); +- chunkHolder.replaceProtoChunk(new ImposterProtoChunk(chunk, false)); +- } +- +- chunk.setFullStatus(() -> { +- return ChunkLevel.fullStatus(chunkHolder.getTicketLevel()); +- }); +- chunk.runPostLoad(); +- if (this.entitiesInLevel.add(chunkcoordintpair.toLong())) { +- chunk.setLoaded(true); +- chunk.registerAllBlockEntitiesAfterLevelLoad(); +- chunk.registerTickContainerInLevel(this.level); +- } +- +- return chunk; +- } // Paper +- }); +- }, (runnable) -> { +- ProcessorHandle mailbox = this.mainThreadMailbox; +- long i = chunkHolder.getPos().toLong(); +- +- Objects.requireNonNull(chunkHolder); +- mailbox.tell(ChunkTaskPriorityQueueSorter.message(runnable, i, chunkHolder::getTicketLevel)); +- }); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public CompletableFuture> prepareTickingChunk(ChunkHolder holder) { +- CompletableFuture, ChunkHolder.ChunkLoadingFailure>> completablefuture = this.getChunkRangeFuture(holder, 1, (i) -> { +- return ChunkStatus.FULL; +- }); +- CompletableFuture> completablefuture1 = completablefuture.thenApplyAsync((either) -> { +- return either.mapLeft((list) -> { +- return (LevelChunk) list.get(list.size() / 2); +- }); +- }, (runnable) -> { +- this.mainThreadMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable)); +- }).thenApplyAsync((either) -> { +- return either.ifLeft((chunk) -> { +- chunk.postProcessGeneration(); +- this.level.startTickingChunk(chunk); +- }); +- }, this.mainThreadExecutor); +- +- completablefuture1.handle((either, throwable) -> { +- this.tickingGenerated.getAndIncrement(); +- return null; +- }); +- completablefuture1.thenAcceptAsync((either) -> { +- either.ifLeft((chunk) -> { +- MutableObject mutableobject = new MutableObject(); +- +- this.getPlayers(holder.getPos(), false).forEach((entityplayer) -> { +- this.playerLoadedChunk(entityplayer, mutableobject, chunk); +- }); +- }); +- }, (runnable) -> { +- this.mainThreadMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable)); +- }); +- return completablefuture1; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public CompletableFuture> prepareAccessibleChunk(ChunkHolder holder) { +- return this.getChunkRangeFuture(holder, 1, ChunkStatus::getStatusAroundFullChunk).thenApplyAsync((either) -> { +- return either.mapLeft((list) -> { +- LevelChunk chunk = (LevelChunk) list.get(list.size() / 2); +- +- return chunk; +- }); +- }, (runnable) -> { +- this.mainThreadMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable)); +- }); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public int getTickingGenerated() { +@@ -976,94 +560,22 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + } + + private boolean saveChunkIfNeeded(ChunkHolder chunkHolder) { +- if (!chunkHolder.wasAccessibleSinceLastSave()) { +- return false; +- } else { +- ChunkAccess ichunkaccess = (ChunkAccess) chunkHolder.getChunkToSave().getNow(null); // CraftBukkit - decompile error +- +- if (!(ichunkaccess instanceof ImposterProtoChunk) && !(ichunkaccess instanceof LevelChunk)) { +- return false; +- } else { +- long i = ichunkaccess.getPos().toLong(); +- long j = this.chunkSaveCooldowns.getOrDefault(i, -1L); +- long k = System.currentTimeMillis(); +- +- if (k < j) { +- return false; +- } else { +- boolean flag = this.save(ichunkaccess); +- +- chunkHolder.refreshAccessibility(); +- if (flag) { +- this.chunkSaveCooldowns.put(i, k + 10000L); +- } +- +- return flag; +- } +- } +- } ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public boolean save(ChunkAccess chunk) { +- this.poiManager.flush(chunk.getPos()); +- if (!chunk.isUnsaved()) { +- return false; +- } else { +- chunk.setUnsaved(false); +- ChunkPos chunkcoordintpair = chunk.getPos(); +- +- try { +- ChunkStatus chunkstatus = chunk.getStatus(); +- +- if (chunkstatus.getChunkType() != ChunkStatus.ChunkType.LEVELCHUNK) { +- if (this.isExistingChunkFull(chunkcoordintpair)) { +- return false; +- } +- +- if (chunkstatus == ChunkStatus.EMPTY && chunk.getAllStarts().values().stream().noneMatch(StructureStart::isValid)) { +- return false; +- } +- } +- +- this.level.getProfiler().incrementCounter("chunkSave"); +- CompoundTag nbttagcompound = ChunkSerializer.write(this.level, chunk); +- +- this.write(chunkcoordintpair, nbttagcompound); +- this.markPosition(chunkcoordintpair, chunkstatus.getChunkType()); +- return true; +- } catch (Exception exception) { +- ChunkMap.LOGGER.error("Failed to save chunk {},{}", new Object[]{chunkcoordintpair.x, chunkcoordintpair.z, exception}); +- return false; +- } +- } ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + private boolean isExistingChunkFull(ChunkPos pos) { +- byte b0 = this.chunkTypeCache.get(pos.toLong()); +- +- if (b0 != 0) { +- return b0 == 1; +- } else { +- CompoundTag nbttagcompound; +- +- try { +- nbttagcompound = (CompoundTag) ((Optional) this.readChunk(pos).join()).orElse((Object) null); +- if (nbttagcompound == null) { +- this.markPositionReplaceable(pos); +- return false; +- } +- } catch (Exception exception) { +- ChunkMap.LOGGER.error("Failed to read chunk {}", pos, exception); +- this.markPositionReplaceable(pos); +- return false; +- } +- +- ChunkStatus.ChunkType chunkstatus_type = ChunkSerializer.getChunkTypeFromTag(nbttagcompound); +- +- return this.markPosition(pos, chunkstatus_type) == 1; +- } ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + ++ // Paper start - replace player loader system ++ public void setTickViewDistance(int distance) { ++ this.playerChunkManager.setTickDistance(distance); ++ } ++ // Paper end - replace player loader system + public void setViewDistance(int watchDistance) { + int j = Mth.clamp(watchDistance, 2, 32); + +@@ -1071,33 +583,18 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + int k = this.viewDistance; + + this.viewDistance = j; +- this.distanceManager.updatePlayerTickets(this.viewDistance); +- Iterator objectiterator = io.papermc.paper.chunk.system.ChunkSystem.getUpdatingChunkHolders(this.level).iterator(); // Paper +- +- while (objectiterator.hasNext()) { +- ChunkHolder playerchunk = (ChunkHolder) objectiterator.next(); +- ChunkPos chunkcoordintpair = playerchunk.getPos(); +- MutableObject mutableobject = new MutableObject(); +- +- this.getPlayers(chunkcoordintpair, false).forEach((entityplayer) -> { +- SectionPos sectionposition = entityplayer.getLastSectionPos(); +- boolean flag = ChunkMap.isChunkInRange(chunkcoordintpair.x, chunkcoordintpair.z, sectionposition.x(), sectionposition.z(), k); +- boolean flag1 = ChunkMap.isChunkInRange(chunkcoordintpair.x, chunkcoordintpair.z, sectionposition.x(), sectionposition.z(), this.viewDistance); +- +- this.updateChunkTracking(entityplayer, chunkcoordintpair, mutableobject, flag, flag1); +- }); +- } ++ this.playerChunkManager.setLoadDistance(this.viewDistance); // Paper - replace player loader system + } + + } + +- protected void updateChunkTracking(ServerPlayer player, ChunkPos pos, MutableObject packet, boolean oldWithinViewDistance, boolean newWithinViewDistance) { ++ public void updateChunkTracking(ServerPlayer player, ChunkPos pos, MutableObject packet, boolean oldWithinViewDistance, boolean newWithinViewDistance) { // Paper - public + if (player.level() == this.level) { + if (newWithinViewDistance && !oldWithinViewDistance) { + ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos.toLong()); + + if (playerchunk != null) { +- LevelChunk chunk = playerchunk.getTickingChunk(); ++ LevelChunk chunk = playerchunk.getSendingChunk(); // Paper - replace chunk loader system + + if (chunk != null) { + this.playerLoadedChunk(player, packet, chunk); +@@ -1127,30 +624,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + } + + void dumpChunks(Writer writer) throws IOException { +- CsvOutput csvwriter = CsvOutput.builder().addColumn("x").addColumn("z").addColumn("level").addColumn("in_memory").addColumn("status").addColumn("full_status").addColumn("accessible_ready").addColumn("ticking_ready").addColumn("entity_ticking_ready").addColumn("ticket").addColumn("spawning").addColumn("block_entity_count").addColumn("ticking_ticket").addColumn("ticking_level").addColumn("block_ticks").addColumn("fluid_ticks").build(writer); +- TickingTracker tickingtracker = this.distanceManager.tickingTracker(); +- Iterator objectbidirectionaliterator = io.papermc.paper.chunk.system.ChunkSystem.getVisibleChunkHolders(this.level).iterator(); // Paper +- +- while (objectbidirectionaliterator.hasNext()) { +- ChunkHolder playerchunk = objectbidirectionaliterator.next(); // Paper +- long i = playerchunk.pos.toLong(); // Paper +- ChunkPos chunkcoordintpair = new ChunkPos(i); +- // Paper +- Optional optional = Optional.ofNullable(playerchunk.getLastAvailable()); +- Optional optional1 = optional.flatMap((ichunkaccess) -> { +- return ichunkaccess instanceof LevelChunk ? Optional.of((LevelChunk) ichunkaccess) : Optional.empty(); +- }); +- +- // CraftBukkit - decompile error +- csvwriter.writeRow(chunkcoordintpair.x, chunkcoordintpair.z, playerchunk.getTicketLevel(), optional.isPresent(), optional.map(ChunkAccess::getStatus).orElse(null), optional1.map(LevelChunk::getFullStatus).orElse(null), ChunkMap.printFuture(playerchunk.getFullChunkFuture()), ChunkMap.printFuture(playerchunk.getTickingChunkFuture()), ChunkMap.printFuture(playerchunk.getEntityTickingChunkFuture()), this.distanceManager.getTicketDebugString(i), this.anyPlayerCloseEnoughForSpawning(chunkcoordintpair), optional1.map((chunk) -> { +- return chunk.getBlockEntities().size(); +- }).orElse(0), tickingtracker.getTicketDebugString(i), tickingtracker.getLevel(i), optional1.map((chunk) -> { +- return chunk.getBlockTicks().count(); +- }).orElse(0), optional1.map((chunk) -> { +- return chunk.getFluidTicks().count(); +- }).orElse(0)); +- } +- ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + private static String printFuture(CompletableFuture> future) { +@@ -1169,6 +643,35 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + } + } + ++ // Paper start - Asynchronous chunk io ++ @Nullable ++ @Override ++ public CompoundTag readSync(ChunkPos chunkcoordintpair) throws IOException { ++ // Paper start - rewrite chunk system ++ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) { ++ return io.papermc.paper.chunk.system.io.RegionFileIOThread.loadData( ++ this.level, chunkcoordintpair.x, chunkcoordintpair.z, io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA, ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.getIOBlockingPriorityForCurrentThread() ++ ); ++ } ++ // Paper end - rewrite chunk system ++ return super.readSync(chunkcoordintpair); ++ } ++ ++ @Override ++ public void write(ChunkPos chunkcoordintpair, CompoundTag nbttagcompound) throws IOException { ++ // Paper start - rewrite chunk system ++ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) { ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.scheduleSave( ++ this.level, chunkcoordintpair.x, chunkcoordintpair.z, nbttagcompound, ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA); ++ return; ++ } ++ // Paper end - rewrite chunk system ++ super.write(chunkcoordintpair, nbttagcompound); ++ } ++ // Paper end ++ + private CompletableFuture> readChunk(ChunkPos chunkPos) { + return this.read(chunkPos).thenApplyAsync((optional) -> { + return optional.map((nbttagcompound) -> this.upgradeChunkTag(nbttagcompound, chunkPos)); // CraftBukkit +@@ -1272,15 +775,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + this.removePlayerFromDistanceMaps(player); // Paper - distance maps + } + +- for (int k = i - this.viewDistance - 1; k <= i + this.viewDistance + 1; ++k) { +- for (int l = j - this.viewDistance - 1; l <= j + this.viewDistance + 1; ++l) { +- if (ChunkMap.isChunkInRange(k, l, i, j, this.viewDistance)) { +- ChunkPos chunkcoordintpair = new ChunkPos(k, l); +- +- this.updateChunkTracking(player, chunkcoordintpair, new MutableObject(), !added, added); +- } +- } +- } ++ // Paper - handled by player chunk loader + + } + +@@ -1288,7 +783,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + SectionPos sectionposition = SectionPos.of((EntityAccess) player); + + player.setLastSectionPos(sectionposition); +- player.connection.send(new ClientboundSetChunkCacheCenterPacket(sectionposition.x(), sectionposition.z())); ++ //player.connection.send(new ClientboundSetChunkCacheCenterPacket(sectionposition.x(), sectionposition.z())); // Paper - handled by player chunk loader + return sectionposition; + } + +@@ -1344,65 +839,38 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + int l1; + int i2; + +- if (Math.abs(i1 - i) <= k1 * 2 && Math.abs(j1 - j) <= k1 * 2) { +- l1 = Math.min(i, i1) - k1; +- i2 = Math.min(j, j1) - k1; +- int j2 = Math.max(i, i1) + k1; +- int k2 = Math.max(j, j1) + k1; +- +- for (int l2 = l1; l2 <= j2; ++l2) { +- for (int i3 = i2; i3 <= k2; ++i3) { +- boolean flag3 = ChunkMap.isChunkInRange(l2, i3, i1, j1, this.viewDistance); +- boolean flag4 = ChunkMap.isChunkInRange(l2, i3, i, j, this.viewDistance); +- +- this.updateChunkTracking(player, new ChunkPos(l2, i3), new MutableObject(), flag3, flag4); +- } +- } +- } else { +- boolean flag5; +- boolean flag6; +- +- for (l1 = i1 - k1; l1 <= i1 + k1; ++l1) { +- for (i2 = j1 - k1; i2 <= j1 + k1; ++i2) { +- if (ChunkMap.isChunkInRange(l1, i2, i1, j1, this.viewDistance)) { +- flag5 = true; +- flag6 = false; +- this.updateChunkTracking(player, new ChunkPos(l1, i2), new MutableObject(), true, false); +- } +- } +- } +- +- for (l1 = i - k1; l1 <= i + k1; ++l1) { +- for (i2 = j - k1; i2 <= j + k1; ++i2) { +- if (ChunkMap.isChunkInRange(l1, i2, i, j, this.viewDistance)) { +- flag5 = false; +- flag6 = true; +- this.updateChunkTracking(player, new ChunkPos(l1, i2), new MutableObject(), false, true); +- } +- } +- } +- } ++ // Paper - replaced by PlayerChunkLoader + + this.updateMaps(player); // Paper - distance maps ++ this.playerChunkManager.updatePlayer(player); // Paper - respond to movement immediately + + } + + @Override + public List getPlayers(ChunkPos chunkPos, boolean onlyOnWatchDistanceEdge) { +- Set set = this.playerMap.getPlayers(chunkPos.toLong()); +- Builder builder = ImmutableList.builder(); +- Iterator iterator = set.iterator(); ++ // Paper start - per player view distance ++ // there can be potential desync with player's last mapped section and the view distance map, so use the ++ // view distance map here. ++ com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet players = this.playerChunkManager.broadcastMap.getObjectsInRange(chunkPos); ++ if (players == null) { ++ return java.util.Collections.emptyList(); ++ } + +- while (iterator.hasNext()) { +- ServerPlayer entityplayer = (ServerPlayer) iterator.next(); +- SectionPos sectionposition = entityplayer.getLastSectionPos(); ++ List ret = new java.util.ArrayList<>(players.size()); + +- if (onlyOnWatchDistanceEdge && ChunkMap.isChunkOnRangeBorder(chunkPos.x, chunkPos.z, sectionposition.x(), sectionposition.z(), this.viewDistance) || !onlyOnWatchDistanceEdge && ChunkMap.isChunkInRange(chunkPos.x, chunkPos.z, sectionposition.x(), sectionposition.z(), this.viewDistance)) { +- builder.add(entityplayer); ++ Object[] backingSet = players.getBackingSet(); ++ for (int i = 0, len = backingSet.length; i < len; ++i) { ++ if (!(backingSet[i] instanceof ServerPlayer player)) { ++ continue; ++ } ++ if (!this.playerChunkManager.isChunkSent(player, chunkPos.x, chunkPos.z, onlyOnWatchDistanceEdge)) { ++ continue; + } ++ ret.add(player); + } + +- return builder.build(); ++ return ret; ++ // Paper end - per player view distance + } + + public void addEntity(Entity entity) { +@@ -1630,7 +1098,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + + @Override + protected boolean isChunkToRemove(long pos) { +- return ChunkMap.this.toDrop.contains(pos); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + @Nullable +@@ -1711,7 +1179,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + org.spigotmc.AsyncCatcher.catchOp("player tracker update"); // Spigot + if (player != this.entity) { + Vec3 vec3d = player.position().subtract(this.entity.position()); +- double d0 = (double) Math.min(this.getEffectiveRange(), ChunkMap.this.viewDistance * 16); ++ double d0 = (double) Math.min(this.getEffectiveRange(), io.papermc.paper.chunk.PlayerChunkLoader.getSendViewDistance(player) * 16); // Paper - per player view distance + double d1 = vec3d.x * vec3d.x + vec3d.z * vec3d.z; + double d2 = d0 * d0; + boolean flag = d1 <= d2 && this.entity.broadcastToPlayer(player); +diff --git a/src/main/java/net/minecraft/server/level/DistanceManager.java b/src/main/java/net/minecraft/server/level/DistanceManager.java +index f3c9a3dbb6f0e6f825b7477c89ed72ed52845419..32a07573ee23f01c98e684aefb45ff72802c9db6 100644 +--- a/src/main/java/net/minecraft/server/level/DistanceManager.java ++++ b/src/main/java/net/minecraft/server/level/DistanceManager.java +@@ -39,65 +39,28 @@ import org.slf4j.Logger; + + public abstract class DistanceManager { + ++ // Paper start - rewrite chunk system ++ public io.papermc.paper.chunk.system.scheduling.ChunkHolderManager getChunkHolderManager() { ++ return this.chunkMap.level.chunkTaskScheduler.chunkHolderManager; ++ } ++ // Paper end - rewrite chunk system ++ + static final Logger LOGGER = LogUtils.getLogger(); + static final int PLAYER_TICKET_LEVEL = ChunkLevel.byStatus(FullChunkStatus.ENTITY_TICKING); + private static final int INITIAL_TICKET_LIST_CAPACITY = 4; + final Long2ObjectMap> playersPerChunk = new Long2ObjectOpenHashMap(); +- public final Long2ObjectOpenHashMap>> tickets = new Long2ObjectOpenHashMap(); +- private final DistanceManager.ChunkTicketTracker ticketTracker = new DistanceManager.ChunkTicketTracker(); ++ // Paper - rewrite chunk system + private final DistanceManager.FixedPlayerDistanceChunkTracker naturalSpawnChunkCounter = new DistanceManager.FixedPlayerDistanceChunkTracker(8); +- private final TickingTracker tickingTicketsTracker = new TickingTracker(); +- private final DistanceManager.PlayerTicketTracker playerTicketManager = new DistanceManager.PlayerTicketTracker(32); +- final Set chunksToUpdateFutures = Sets.newHashSet(); +- final ChunkTaskPriorityQueueSorter ticketThrottler; +- final ProcessorHandle> ticketThrottlerInput; +- final ProcessorHandle ticketThrottlerReleaser; +- final LongSet ticketsToRelease = new LongOpenHashSet(); +- final Executor mainThreadExecutor; +- private long ticketTickCounter; +- private int simulationDistance = 10; ++ // Paper - rewrite chunk system + private final ChunkMap chunkMap; // Paper + + protected DistanceManager(Executor workerExecutor, Executor mainThreadExecutor, ChunkMap chunkMap) { +- Objects.requireNonNull(mainThreadExecutor); +- ProcessorHandle mailbox = ProcessorHandle.of("player ticket throttler", mainThreadExecutor::execute); +- ChunkTaskPriorityQueueSorter chunktaskqueuesorter = new ChunkTaskPriorityQueueSorter(ImmutableList.of(mailbox), workerExecutor, 4); +- +- this.ticketThrottler = chunktaskqueuesorter; +- this.ticketThrottlerInput = chunktaskqueuesorter.getProcessor(mailbox, true); +- this.ticketThrottlerReleaser = chunktaskqueuesorter.getReleaseProcessor(mailbox); +- this.mainThreadExecutor = mainThreadExecutor; ++ // Paper - rewrite chunk system + this.chunkMap = chunkMap; // Paper + } + + protected void purgeStaleTickets() { +- ++this.ticketTickCounter; +- ObjectIterator objectiterator = this.tickets.long2ObjectEntrySet().fastIterator(); +- +- while (objectiterator.hasNext()) { +- Entry>> entry = (Entry) objectiterator.next(); +- Iterator> iterator = ((SortedArraySet) entry.getValue()).iterator(); +- boolean flag = false; +- +- while (iterator.hasNext()) { +- Ticket ticket = (Ticket) iterator.next(); +- +- if (ticket.timedOut(this.ticketTickCounter)) { +- iterator.remove(); +- flag = true; +- this.tickingTicketsTracker.removeTicket(entry.getLongKey(), ticket); +- } +- } +- +- if (flag) { +- this.ticketTracker.update(entry.getLongKey(), DistanceManager.getTicketLevelAt((SortedArraySet) entry.getValue()), false); +- } +- +- if (((SortedArraySet) entry.getValue()).isEmpty()) { +- objectiterator.remove(); +- } +- } +- ++ this.getChunkHolderManager().tick(); // Paper - rewrite chunk system + } + + private static int getTicketLevelAt(SortedArraySet> tickets) { +@@ -113,108 +76,25 @@ public abstract class DistanceManager { + protected abstract ChunkHolder updateChunkScheduling(long pos, int level, @Nullable ChunkHolder holder, int k); + + public boolean runAllUpdates(ChunkMap chunkStorage) { +- this.naturalSpawnChunkCounter.runAllUpdates(); +- this.tickingTicketsTracker.runAllUpdates(); +- this.playerTicketManager.runAllUpdates(); +- int i = Integer.MAX_VALUE - this.ticketTracker.runDistanceUpdates(Integer.MAX_VALUE); +- boolean flag = i != 0; +- +- if (flag) { +- ; +- } +- +- if (!this.chunksToUpdateFutures.isEmpty()) { +- // CraftBukkit start +- // Iterate pending chunk updates with protection against concurrent modification exceptions +- java.util.Iterator iter = this.chunksToUpdateFutures.iterator(); +- int expectedSize = this.chunksToUpdateFutures.size(); +- do { +- ChunkHolder playerchunk = iter.next(); +- iter.remove(); +- expectedSize--; +- +- playerchunk.updateFutures(chunkStorage, this.mainThreadExecutor); +- +- // Reset iterator if set was modified using add() +- if (this.chunksToUpdateFutures.size() != expectedSize) { +- expectedSize = this.chunksToUpdateFutures.size(); +- iter = this.chunksToUpdateFutures.iterator(); +- } +- } while (iter.hasNext()); +- // CraftBukkit end +- +- return true; +- } else { +- if (!this.ticketsToRelease.isEmpty()) { +- LongIterator longiterator = this.ticketsToRelease.iterator(); +- +- while (longiterator.hasNext()) { +- long j = longiterator.nextLong(); +- +- if (this.getTickets(j).stream().anyMatch((ticket) -> { +- return ticket.getType() == TicketType.PLAYER; +- })) { +- ChunkHolder playerchunk = chunkStorage.getUpdatingChunkIfPresent(j); +- +- if (playerchunk == null) { +- throw new IllegalStateException(); +- } +- +- CompletableFuture> completablefuture = playerchunk.getEntityTickingChunkFuture(); +- +- completablefuture.thenAccept((either) -> { +- this.mainThreadExecutor.execute(() -> { +- this.ticketThrottlerReleaser.tell(ChunkTaskPriorityQueueSorter.release(() -> { +- }, j, false)); +- }); +- }); +- } +- } +- +- this.ticketsToRelease.clear(); +- } +- +- return flag; +- } ++ return this.getChunkHolderManager().processTicketUpdates(); // Paper - rewrite chunk system + } + + boolean addTicket(long i, Ticket ticket) { // CraftBukkit - void -> boolean +- SortedArraySet> arraysetsorted = this.getTickets(i); +- int j = DistanceManager.getTicketLevelAt(arraysetsorted); +- Ticket ticket1 = (Ticket) arraysetsorted.addOrGet(ticket); +- +- ticket1.setCreatedTick(this.ticketTickCounter); +- if (ticket.getTicketLevel() < j) { +- this.ticketTracker.update(i, ticket.getTicketLevel(), true); +- } +- +- return ticket == ticket1; // CraftBukkit ++ org.spigotmc.AsyncCatcher.catchOp("ChunkMapDistance::addTicket"); // Paper ++ return this.getChunkHolderManager().addTicketAtLevel((TicketType)ticket.getType(), i, ticket.getTicketLevel(), ticket.key); // Paper - rewrite chunk system + } + + boolean removeTicket(long i, Ticket ticket) { // CraftBukkit - void -> boolean +- SortedArraySet> arraysetsorted = this.getTickets(i); +- +- boolean removed = false; // CraftBukkit +- if (arraysetsorted.remove(ticket)) { +- removed = true; // CraftBukkit +- } +- +- if (arraysetsorted.isEmpty()) { +- this.tickets.remove(i); +- } +- +- this.ticketTracker.update(i, DistanceManager.getTicketLevelAt(arraysetsorted), false); +- return removed; // CraftBukkit ++ org.spigotmc.AsyncCatcher.catchOp("ChunkMapDistance::removeTicket"); // Paper ++ return this.getChunkHolderManager().removeTicketAtLevel((TicketType)ticket.getType(), i, ticket.getTicketLevel(), ticket.key); // Paper - rewrite chunk system + } + + public void addTicket(TicketType type, ChunkPos pos, int level, T argument) { +- this.addTicket(pos.toLong(), new Ticket<>(type, level, argument)); ++ this.getChunkHolderManager().addTicketAtLevel(type, pos, level, argument); // Paper - rewrite chunk system + } + + public void removeTicket(TicketType type, ChunkPos pos, int level, T argument) { +- Ticket ticket = new Ticket<>(type, level, argument); +- +- this.removeTicket(pos.toLong(), ticket); ++ this.getChunkHolderManager().removeTicketAtLevel(type, pos, level, argument); // Paper - rewrite chunk system + } + + public void addRegionTicket(TicketType type, ChunkPos pos, int radius, T argument) { +@@ -223,13 +103,7 @@ public abstract class DistanceManager { + } + + public boolean addRegionTicketAtDistance(TicketType tickettype, ChunkPos chunkcoordintpair, int i, T t0) { +- // CraftBukkit end +- Ticket ticket = new Ticket<>(tickettype, ChunkLevel.byStatus(FullChunkStatus.FULL) - i, t0); +- long j = chunkcoordintpair.toLong(); +- +- boolean added = this.addTicket(j, ticket); // CraftBukkit +- this.tickingTicketsTracker.addTicket(j, ticket); +- return added; // CraftBukkit ++ return this.getChunkHolderManager().addTicketAtLevel(tickettype, chunkcoordintpair, ChunkLevel.byStatus(FullChunkStatus.FULL) - i, t0); // Paper - rewrite chunk system + } + + public void removeRegionTicket(TicketType type, ChunkPos pos, int radius, T argument) { +@@ -238,31 +112,21 @@ public abstract class DistanceManager { + } + + public boolean removeRegionTicketAtDistance(TicketType tickettype, ChunkPos chunkcoordintpair, int i, T t0) { +- // CraftBukkit end +- Ticket ticket = new Ticket<>(tickettype, ChunkLevel.byStatus(FullChunkStatus.FULL) - i, t0); +- long j = chunkcoordintpair.toLong(); +- +- boolean removed = this.removeTicket(j, ticket); // CraftBukkit +- this.tickingTicketsTracker.removeTicket(j, ticket); +- return removed; // CraftBukkit ++ return this.getChunkHolderManager().removeTicketAtLevel(tickettype, chunkcoordintpair, ChunkLevel.byStatus(FullChunkStatus.FULL) - i, t0); // Paper - rewrite chunk system + } + +- private SortedArraySet> getTickets(long position) { +- return (SortedArraySet) this.tickets.computeIfAbsent(position, (j) -> { +- return SortedArraySet.create(4); +- }); +- } ++ // Paper - rewrite chunk system + + protected void updateChunkForced(ChunkPos pos, boolean forced) { +- Ticket ticket = new Ticket<>(TicketType.FORCED, ChunkMap.FORCED_TICKET_LEVEL, pos); ++ Ticket ticket = new Ticket<>(TicketType.FORCED, ChunkMap.FORCED_TICKET_LEVEL, pos, 0L); // Paper - rewrite chunk system + long i = pos.toLong(); + + if (forced) { + this.addTicket(i, ticket); +- this.tickingTicketsTracker.addTicket(i, ticket); ++ //this.tickingTicketsTracker.addTicket(i, ticket); // Paper - no longer used + } else { + this.removeTicket(i, ticket); +- this.tickingTicketsTracker.removeTicket(i, ticket); ++ //this.tickingTicketsTracker.removeTicket(i, ticket); // Paper - no longer used + } + + } +@@ -271,12 +135,10 @@ public abstract class DistanceManager { + ChunkPos chunkcoordintpair = pos.chunk(); + long i = chunkcoordintpair.toLong(); + +- ((ObjectSet) this.playersPerChunk.computeIfAbsent(i, (j) -> { +- return new ObjectOpenHashSet(); +- })).add(player); ++ // Paper - no longer used + this.naturalSpawnChunkCounter.update(i, 0, true); +- this.playerTicketManager.update(i, 0, true); +- this.tickingTicketsTracker.addTicket(TicketType.PLAYER, chunkcoordintpair, this.getPlayerTicketLevel(), chunkcoordintpair); ++ //this.playerTicketManager.update(i, 0, true); // Paper - no longer used ++ //this.tickingTicketsTracker.addTicket(TicketType.PLAYER, chunkcoordintpair, this.getPlayerTicketLevel(), chunkcoordintpair); // Paper - no longer used + } + + public void removePlayer(SectionPos pos, ServerPlayer player) { +@@ -289,46 +151,44 @@ public abstract class DistanceManager { + if (objectset.isEmpty()) { + this.playersPerChunk.remove(i); + this.naturalSpawnChunkCounter.update(i, Integer.MAX_VALUE, false); +- this.playerTicketManager.update(i, Integer.MAX_VALUE, false); +- this.tickingTicketsTracker.removeTicket(TicketType.PLAYER, chunkcoordintpair, this.getPlayerTicketLevel(), chunkcoordintpair); ++ //this.playerTicketManager.update(i, Integer.MAX_VALUE, false); // Paper - no longer used ++ //this.tickingTicketsTracker.removeTicket(TicketType.PLAYER, chunkcoordintpair, this.getPlayerTicketLevel(), chunkcoordintpair); // Paper - no longer used + } + + } + +- private int getPlayerTicketLevel() { +- return Math.max(0, ChunkLevel.byStatus(FullChunkStatus.ENTITY_TICKING) - this.simulationDistance); +- } ++ // Paper - rewrite chunk system + + public boolean inEntityTickingRange(long chunkPos) { +- return ChunkLevel.isEntityTicking(this.tickingTicketsTracker.getLevel(chunkPos)); ++ // Paper start - replace player chunk loader system ++ ChunkHolder holder = this.chunkMap.getVisibleChunkIfPresent(chunkPos); ++ return holder != null && holder.isEntityTickingReady(); ++ // Paper end - replace player chunk loader system + } + + public boolean inBlockTickingRange(long chunkPos) { +- return ChunkLevel.isBlockTicking(this.tickingTicketsTracker.getLevel(chunkPos)); ++ // Paper start - replace player chunk loader system ++ ChunkHolder holder = this.chunkMap.getVisibleChunkIfPresent(chunkPos); ++ return holder != null && holder.isTickingReady(); ++ // Paper end - replace player chunk loader system + } + + protected String getTicketDebugString(long pos) { +- SortedArraySet> arraysetsorted = (SortedArraySet) this.tickets.get(pos); +- +- return arraysetsorted != null && !arraysetsorted.isEmpty() ? ((Ticket) arraysetsorted.first()).toString() : "no_ticket"; ++ return this.getChunkHolderManager().getTicketDebugString(pos); // Paper - rewrite chunk system + } + + protected void updatePlayerTickets(int viewDistance) { +- this.playerTicketManager.updateViewDistance(viewDistance); ++ this.chunkMap.playerChunkManager.setTargetNoTickViewDistance(viewDistance); // Paper - route to player chunk manager + } + + // Paper start + public int getSimulationDistance() { +- return this.simulationDistance; ++ return this.chunkMap.playerChunkManager.getTargetTickViewDistance(); // Paper - route to player chunk manager + } + // Paper end + + public void updateSimulationDistance(int simulationDistance) { +- if (simulationDistance != this.simulationDistance) { +- this.simulationDistance = simulationDistance; +- this.tickingTicketsTracker.replacePlayerTicketsLevel(this.getPlayerTicketLevel()); +- } +- ++ this.chunkMap.playerChunkManager.setTargetTickViewDistance(simulationDistance); // Paper - route to player chunk manager + } + + public int getNaturalSpawnChunkCount() { +@@ -342,103 +202,28 @@ public abstract class DistanceManager { + } + + public String getDebugStatus() { +- return this.ticketThrottler.getDebugStatus(); ++ return "No DistanceManager stats available"; // Paper - rewrite chunk system + } + +- private void dumpTickets(String path) { +- try { +- FileOutputStream fileoutputstream = new FileOutputStream(new File(path)); +- +- try { +- ObjectIterator objectiterator = this.tickets.long2ObjectEntrySet().iterator(); +- +- while (objectiterator.hasNext()) { +- Entry>> entry = (Entry) objectiterator.next(); +- ChunkPos chunkcoordintpair = new ChunkPos(entry.getLongKey()); +- Iterator iterator = ((SortedArraySet) entry.getValue()).iterator(); +- +- while (iterator.hasNext()) { +- Ticket ticket = (Ticket) iterator.next(); +- +- fileoutputstream.write((chunkcoordintpair.x + "\t" + chunkcoordintpair.z + "\t" + ticket.getType() + "\t" + ticket.getTicketLevel() + "\t\n").getBytes(StandardCharsets.UTF_8)); +- } +- } +- } catch (Throwable throwable) { +- try { +- fileoutputstream.close(); +- } catch (Throwable throwable1) { +- throwable.addSuppressed(throwable1); +- } ++ // Paper - rewrite chunk system + +- throw throwable; +- } +- +- fileoutputstream.close(); +- } catch (IOException ioexception) { +- DistanceManager.LOGGER.error("Failed to dump tickets to {}", path, ioexception); +- } +- +- } +- +- @VisibleForTesting +- TickingTracker tickingTracker() { +- return this.tickingTicketsTracker; +- } ++ // Paper - replace player chunk loader + + public void removeTicketsOnClosing() { +- ImmutableSet> immutableset = ImmutableSet.of(TicketType.UNKNOWN, TicketType.POST_TELEPORT, TicketType.LIGHT, TicketType.FUTURE_AWAIT, TicketType.CHUNK_RELIGHT, ca.spottedleaf.starlight.common.light.StarLightInterface.CHUNK_WORK_TICKET); // Paper - add additional tickets to preserve +- ObjectIterator objectiterator = this.tickets.long2ObjectEntrySet().fastIterator(); +- +- while (objectiterator.hasNext()) { +- Entry>> entry = (Entry) objectiterator.next(); +- Iterator> iterator = ((SortedArraySet) entry.getValue()).iterator(); +- boolean flag = false; +- +- while (iterator.hasNext()) { +- Ticket ticket = (Ticket) iterator.next(); +- +- if (!immutableset.contains(ticket.getType())) { +- iterator.remove(); +- flag = true; +- this.tickingTicketsTracker.removeTicket(entry.getLongKey(), ticket); +- } +- } +- +- if (flag) { +- this.ticketTracker.update(entry.getLongKey(), DistanceManager.getTicketLevelAt((SortedArraySet) entry.getValue()), false); +- } +- +- if (((SortedArraySet) entry.getValue()).isEmpty()) { +- objectiterator.remove(); +- } +- } +- ++ // Paper - rewrite chunk system - this stupid hack ain't needed anymore + } + + public boolean hasTickets() { +- return !this.tickets.isEmpty(); ++ return this.getChunkHolderManager().hasTickets(); // Paper - rewrite chunk system + } + + // CraftBukkit start + public void removeAllTicketsFor(TicketType ticketType, int ticketLevel, T ticketIdentifier) { +- Ticket target = new Ticket<>(ticketType, ticketLevel, ticketIdentifier); +- +- for (java.util.Iterator>>> iterator = this.tickets.long2ObjectEntrySet().fastIterator(); iterator.hasNext();) { +- Entry>> entry = iterator.next(); +- SortedArraySet> tickets = entry.getValue(); +- if (tickets.remove(target)) { +- // copied from removeTicket +- this.ticketTracker.update(entry.getLongKey(), DistanceManager.getTicketLevelAt(tickets), false); +- +- // can't use entry after it's removed +- if (tickets.isEmpty()) { +- iterator.remove(); +- } +- } +- } ++ this.getChunkHolderManager().removeAllTicketsFor(ticketType, ticketLevel, ticketIdentifier); // Paper - rewrite chunk system + } + // CraftBukkit end + ++ /* Paper - rewrite chunk system + private class ChunkTicketTracker extends ChunkTracker { + + private static final int MAX_LEVEL = ChunkLevel.MAX_LEVEL + 1; +@@ -485,6 +270,7 @@ public abstract class DistanceManager { + return this.runUpdates(distance); + } + } ++ */ // Paper - rewrite chunk system + + private class FixedPlayerDistanceChunkTracker extends ChunkTracker { + +@@ -564,6 +350,7 @@ public abstract class DistanceManager { + } + } + ++ /* Paper - rewrite chunk system + private class PlayerTicketTracker extends DistanceManager.FixedPlayerDistanceChunkTracker { + + private int viewDistance = 0; +@@ -659,4 +446,5 @@ public abstract class DistanceManager { + return distance <= this.viewDistance; + } + } ++ */ // Paper - rewrite chunk system + } +diff --git a/src/main/java/net/minecraft/server/level/ServerChunkCache.java b/src/main/java/net/minecraft/server/level/ServerChunkCache.java +index fc1afda5a60ab0f3c275f849d8af08d308b36c3f..bb7b7f904639ac964f9c2d1bd5a660d8b397f647 100644 +--- a/src/main/java/net/minecraft/server/level/ServerChunkCache.java ++++ b/src/main/java/net/minecraft/server/level/ServerChunkCache.java +@@ -367,7 +367,7 @@ public class ServerChunkCache extends ChunkSource { + public LevelChunk getChunkAtIfLoadedImmediately(int x, int z) { + long k = ChunkPos.asLong(x, z); + +- if (Thread.currentThread() == this.mainThread) { ++ if (io.papermc.paper.util.TickThread.isTickThread()) { // Paper - rewrite chunk system + return this.getChunkAtIfLoadedMainThread(x, z); + } + +@@ -389,11 +389,34 @@ public class ServerChunkCache extends ChunkSource { + return ret; + } + // Paper end ++ // Paper start - async chunk io ++ public CompletableFuture> getChunkAtAsynchronously(int x, int z, boolean gen, boolean isUrgent) { ++ CompletableFuture> ret = new CompletableFuture<>(); ++ ++ ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority priority; ++ if (isUrgent) { ++ priority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.HIGHER; ++ } else { ++ priority = ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.NORMAL; ++ } ++ ++ io.papermc.paper.chunk.system.ChunkSystem.scheduleChunkLoad(this.level, x, z, gen, ChunkStatus.FULL, true, priority, (chunk) -> { ++ if (chunk == null) { ++ ret.complete(ChunkHolder.UNLOADED_CHUNK); ++ } else { ++ ret.complete(Either.left(chunk)); ++ } ++ }); ++ ++ return ret; ++ } ++ // Paper end - async chunk io + + @Nullable + @Override + public ChunkAccess getChunk(int x, int z, ChunkStatus leastStatus, boolean create) { +- if (Thread.currentThread() != this.mainThread) { ++ final int x1 = x; final int z1 = z; // Paper - conflict on variable change ++ if (!io.papermc.paper.util.TickThread.isTickThread()) { // Paper - rewrite chunk system + return (ChunkAccess) CompletableFuture.supplyAsync(() -> { + return this.getChunk(x, z, leastStatus, create); + }, this.mainThreadProcessor).join(); +@@ -405,23 +428,20 @@ public class ServerChunkCache extends ChunkSource { + + ChunkAccess ichunkaccess; + +- for (int l = 0; l < 4; ++l) { +- if (k == this.lastChunkPos[l] && leastStatus == this.lastChunkStatus[l]) { +- ichunkaccess = this.lastChunk[l]; +- if (ichunkaccess != null) { // CraftBukkit - the chunk can become accessible in the meantime TODO for non-null chunks it might also make sense to check that the chunk's state hasn't changed in the meantime +- return ichunkaccess; +- } +- } +- } ++ // Paper - rewrite chunk system - there are no correct callbacks to remove items from cache in the new chunk system + + gameprofilerfiller.incrementCounter("getChunkCacheMiss"); +- CompletableFuture> completablefuture = this.getChunkFutureMainThread(x, z, leastStatus, create); ++ CompletableFuture> completablefuture = this.getChunkFutureMainThread(x, z, leastStatus, create, true); // Paper + ServerChunkCache.MainThreadExecutor chunkproviderserver_b = this.mainThreadProcessor; + + Objects.requireNonNull(completablefuture); + if (!completablefuture.isDone()) { // Paper ++ // Paper start - async chunk io/loading ++ io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.pushChunkWait(this.level, x1, z1); // Paper - rewrite chunk system ++ // Paper end + this.level.timings.syncChunkLoad.startTiming(); // Paper + chunkproviderserver_b.managedBlock(completablefuture::isDone); ++ io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.popChunkWait(); // Paper - async chunk debug // Paper - rewrite chunk system + this.level.timings.syncChunkLoad.stopTiming(); // Paper + } // Paper + ichunkaccess = (ChunkAccess) ((Either) completablefuture.join()).map((ichunkaccess1) -> { +@@ -441,7 +461,7 @@ public class ServerChunkCache extends ChunkSource { + @Nullable + @Override + public LevelChunk getChunkNow(int chunkX, int chunkZ) { +- if (Thread.currentThread() != this.mainThread) { ++ if (!io.papermc.paper.util.TickThread.isTickThread()) { // Paper - rewrite chunk system + return null; + } else { + this.level.getProfiler().incrementCounter("getChunkNow"); +@@ -487,7 +507,7 @@ public class ServerChunkCache extends ChunkSource { + } + + public CompletableFuture> getChunkFuture(int chunkX, int chunkZ, ChunkStatus leastStatus, boolean create) { +- boolean flag1 = Thread.currentThread() == this.mainThread; ++ boolean flag1 = io.papermc.paper.util.TickThread.isTickThread(); // Paper - rewrite chunk system + CompletableFuture completablefuture; + + if (flag1) { +@@ -508,47 +528,52 @@ public class ServerChunkCache extends ChunkSource { + } + + private CompletableFuture> getChunkFutureMainThread(int chunkX, int chunkZ, ChunkStatus leastStatus, boolean create) { +- ChunkPos chunkcoordintpair = new ChunkPos(chunkX, chunkZ); +- long k = chunkcoordintpair.toLong(); +- int l = ChunkLevel.byStatus(leastStatus); +- ChunkHolder playerchunk = this.getVisibleChunkIfPresent(k); ++ // Paper start - add isUrgent - old sig left in place for dirty nms plugins ++ return getChunkFutureMainThread(chunkX, chunkZ, leastStatus, create, false); ++ } ++ private CompletableFuture> getChunkFutureMainThread(int chunkX, int chunkZ, ChunkStatus leastStatus, boolean create, boolean isUrgent) { ++ // Paper start - rewrite chunk system ++ io.papermc.paper.util.TickThread.ensureTickThread(this.level, chunkX, chunkZ, "Scheduling chunk load off-main"); ++ int minLevel = ChunkLevel.byStatus(leastStatus); ++ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.level.chunkTaskScheduler.chunkHolderManager.getChunkHolder(chunkX, chunkZ); + +- // CraftBukkit start - don't add new ticket for currently unloading chunk +- boolean currentlyUnloading = false; +- if (playerchunk != null) { +- FullChunkStatus oldChunkState = ChunkLevel.fullStatus(playerchunk.oldTicketLevel); +- FullChunkStatus currentChunkState = ChunkLevel.fullStatus(playerchunk.getTicketLevel()); +- currentlyUnloading = (oldChunkState.isOrAfter(FullChunkStatus.FULL) && !currentChunkState.isOrAfter(FullChunkStatus.FULL)); +- } +- if (create && !currentlyUnloading) { +- // CraftBukkit end +- this.distanceManager.addTicket(TicketType.UNKNOWN, chunkcoordintpair, l, chunkcoordintpair); +- if (this.chunkAbsent(playerchunk, l)) { +- ProfilerFiller gameprofilerfiller = this.level.getProfiler(); +- +- gameprofilerfiller.push("chunkLoad"); +- this.runDistanceManagerUpdates(); +- playerchunk = this.getVisibleChunkIfPresent(k); +- gameprofilerfiller.pop(); +- if (this.chunkAbsent(playerchunk, l)) { +- throw (IllegalStateException) Util.pauseInIde(new IllegalStateException("No chunk holder after ticket has been added")); +- } +- } ++ boolean needsFullScheduling = leastStatus == ChunkStatus.FULL && (chunkHolder == null || !chunkHolder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)); ++ ++ if ((chunkHolder == null || chunkHolder.getTicketLevel() > minLevel || needsFullScheduling) && !create) { ++ return ChunkHolder.UNLOADED_CHUNK_FUTURE; + } + +- return this.chunkAbsent(playerchunk, l) ? ChunkHolder.UNLOADED_CHUNK_FUTURE : playerchunk.getOrScheduleFuture(leastStatus, this.chunkMap); +- } ++ io.papermc.paper.chunk.system.scheduling.NewChunkHolder.ChunkCompletion chunkCompletion = chunkHolder == null ? null : chunkHolder.getLastChunkCompletion(); ++ if (needsFullScheduling || chunkCompletion == null || !chunkCompletion.genStatus().isOrAfter(leastStatus)) { ++ // schedule ++ CompletableFuture> ret = new CompletableFuture<>(); ++ Consumer complete = (ChunkAccess chunk) -> { ++ if (chunk == null) { ++ ret.complete(Either.right(ChunkHolder.ChunkLoadingFailure.UNLOADED)); ++ } else { ++ ret.complete(Either.left(chunk)); ++ } ++ }; + +- private boolean chunkAbsent(@Nullable ChunkHolder holder, int maxLevel) { +- return holder == null || holder.oldTicketLevel > maxLevel; // CraftBukkit using oldTicketLevel for isLoaded checks ++ this.level.chunkTaskScheduler.scheduleChunkLoad( ++ chunkX, chunkZ, leastStatus, true, ++ isUrgent ? ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.BLOCKING : ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor.Priority.NORMAL, ++ complete ++ ); ++ ++ return ret; ++ } else { ++ // can return now ++ return CompletableFuture.completedFuture(Either.left(chunkCompletion.chunk())); ++ } ++ // Paper end - rewrite chunk system + } + ++ // Paper - rewrite chunk system ++ + @Override + public boolean hasChunk(int x, int z) { +- ChunkHolder playerchunk = this.getVisibleChunkIfPresent((new ChunkPos(x, z)).toLong()); +- int k = ChunkLevel.byStatus(ChunkStatus.FULL); +- +- return !this.chunkAbsent(playerchunk, k); ++ return this.getChunkAtIfLoadedImmediately(x, z) != null; // Paper - rewrite chunk system + } + + @Nullable +@@ -560,22 +585,13 @@ public class ServerChunkCache extends ChunkSource { + if (playerchunk == null) { + return null; + } else { +- int l = ServerChunkCache.CHUNK_STATUSES.size() - 1; +- +- while (true) { +- ChunkStatus chunkstatus = (ChunkStatus) ServerChunkCache.CHUNK_STATUSES.get(l); +- Optional optional = ((Either) playerchunk.getFutureIfPresentUnchecked(chunkstatus).getNow(ChunkHolder.UNLOADED_CHUNK)).left(); +- +- if (optional.isPresent()) { +- return (LightChunk) optional.get(); +- } +- +- if (chunkstatus == ChunkStatus.INITIALIZE_LIGHT.getParent()) { +- return null; +- } +- +- --l; ++ // Paper start - rewrite chunk system ++ ChunkStatus status = playerchunk.getChunkHolderStatus(); ++ if (status != null && !status.isOrAfter(ChunkStatus.LIGHT.getParent())) { ++ return null; + } ++ return playerchunk.getAvailableChunkNow(); ++ // Paper end - rewrite chunk system + } + } + +@@ -589,15 +605,7 @@ public class ServerChunkCache extends ChunkSource { + } + + boolean runDistanceManagerUpdates() { +- boolean flag = this.distanceManager.runAllUpdates(this.chunkMap); +- boolean flag1 = this.chunkMap.promoteChunkMap(); +- +- if (!flag && !flag1) { +- return false; +- } else { +- this.clearCache(); +- return true; +- } ++ return this.level.chunkTaskScheduler.chunkHolderManager.processTicketUpdates(); // Paper - rewrite chunk system + } + + // Paper start +@@ -607,17 +615,10 @@ public class ServerChunkCache extends ChunkSource { + // Paper end + + public boolean isPositionTicking(long pos) { +- ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos); +- +- if (playerchunk == null) { +- return false; +- } else if (!this.level.shouldTickBlocksAt(pos)) { +- return false; +- } else { +- Either either = (Either) playerchunk.getTickingChunkFuture().getNow(null); // CraftBukkit - decompile error +- +- return either != null && either.left().isPresent(); +- } ++ // Paper start - replace player chunk loader system ++ ChunkHolder holder = this.chunkMap.getVisibleChunkIfPresent(pos); ++ return holder != null && holder.isTickingReady(); ++ // Paper end - replace player chunk loader system + } + + public void save(boolean flush) { +@@ -633,17 +634,13 @@ public class ServerChunkCache extends ChunkSource { + this.close(true); + } + +- public void close(boolean save) throws IOException { +- if (save) { +- this.save(true); +- } +- // CraftBukkit end +- this.lightEngine.close(); +- this.chunkMap.close(); ++ public void close(boolean save) { // Paper - rewrite chunk system ++ this.level.chunkTaskScheduler.chunkHolderManager.close(save, true); // Paper - rewrite chunk system + } + + // CraftBukkit start - modelled on below + public void purgeUnload() { ++ if (true) return; // Paper - tickets will be removed later, this behavior isn't really well accounted for by the chunk system + this.level.getProfiler().push("purge"); + this.distanceManager.purgeStaleTickets(); + this.runDistanceManagerUpdates(); +@@ -664,6 +661,7 @@ public class ServerChunkCache extends ChunkSource { + this.level.getProfiler().popPush("chunks"); + if (tickChunks) { + this.level.timings.chunks.startTiming(); // Paper - timings ++ this.chunkMap.playerChunkManager.tick(); // Paper - this is mostly is to account for view distance changes + this.tickChunks(); + this.level.timings.chunks.stopTiming(); // Paper - timings + } +@@ -760,7 +758,12 @@ public class ServerChunkCache extends ChunkSource { + ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos); + + if (playerchunk != null) { +- ((Either) playerchunk.getFullChunkFuture().getNow(ChunkHolder.UNLOADED_LEVEL_CHUNK)).left().ifPresent(chunkConsumer); ++ // Paper start - rewrite chunk system ++ LevelChunk chunk = playerchunk.getFullChunk(); ++ if (chunk != null) { ++ chunkConsumer.accept(chunk); ++ } ++ // Paper end - rewrite chunk system + } + + } +@@ -926,17 +929,11 @@ public class ServerChunkCache extends ChunkSource { + @Override + // CraftBukkit start - process pending Chunk loadCallback() and unloadCallback() after each run task + public boolean pollTask() { +- try { ++ ServerChunkCache.this.chunkMap.playerChunkManager.tickMidTick(); + if (ServerChunkCache.this.runDistanceManagerUpdates()) { + return true; +- } else { +- ServerChunkCache.this.lightEngine.tryScheduleUpdate(); +- return super.pollTask(); + } +- } finally { +- chunkMap.callbackExecutor.run(); +- } +- // CraftBukkit end ++ return super.pollTask() | ServerChunkCache.this.level.chunkTaskScheduler.executeMainThreadTask(); // Paper - rewrite chunk system + } + } + +diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java +index 7cb5abfa89f842194325d26c6e95b49460c5968f..d7172df2489f2eb325120d950dcff32cc483db56 100644 +--- a/src/main/java/net/minecraft/server/level/ServerLevel.java ++++ b/src/main/java/net/minecraft/server/level/ServerLevel.java +@@ -194,7 +194,7 @@ public class ServerLevel extends Level implements WorldGenLevel { + private final MinecraftServer server; + public final PrimaryLevelData serverLevelData; // CraftBukkit - type + final EntityTickList entityTickList; +- public final PersistentEntitySectionManager entityManager; ++ //public final PersistentEntitySectionManager entityManager; // Paper - rewrite chunk system + private final GameEventDispatcher gameEventDispatcher; + public boolean noSave; + private final SleepStatus sleepStatus; +@@ -320,7 +320,108 @@ public class ServerLevel extends Level implements WorldGenLevel { + } + } + } +- // Paper end ++ ++ // Paper start - rewrite chunk system ++ public final io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler chunkTaskScheduler; ++ public final io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController chunkDataControllerNew ++ = new io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.CHUNK_DATA) { ++ ++ @Override ++ public net.minecraft.world.level.chunk.storage.RegionFileStorage getCache() { ++ return ServerLevel.this.getChunkSource().chunkMap.regionFileCache; ++ } ++ ++ @Override ++ public void writeData(int chunkX, int chunkZ, net.minecraft.nbt.CompoundTag compound) throws IOException { ++ ServerLevel.this.getChunkSource().chunkMap.write(new ChunkPos(chunkX, chunkZ), compound); ++ } ++ ++ @Override ++ public net.minecraft.nbt.CompoundTag readData(int chunkX, int chunkZ) throws IOException { ++ return ServerLevel.this.getChunkSource().chunkMap.readSync(new ChunkPos(chunkX, chunkZ)); ++ } ++ }; ++ public final io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController poiDataControllerNew ++ = new io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA) { ++ ++ @Override ++ public net.minecraft.world.level.chunk.storage.RegionFileStorage getCache() { ++ return ServerLevel.this.getChunkSource().chunkMap.getPoiManager(); ++ } ++ ++ @Override ++ public void writeData(int chunkX, int chunkZ, net.minecraft.nbt.CompoundTag compound) throws IOException { ++ ServerLevel.this.getChunkSource().chunkMap.getPoiManager().write(new ChunkPos(chunkX, chunkZ), compound); ++ } ++ ++ @Override ++ public net.minecraft.nbt.CompoundTag readData(int chunkX, int chunkZ) throws IOException { ++ return ServerLevel.this.getChunkSource().chunkMap.getPoiManager().read(new ChunkPos(chunkX, chunkZ)); ++ } ++ }; ++ public final io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController entityDataControllerNew ++ = new io.papermc.paper.chunk.system.io.RegionFileIOThread.ChunkDataController(io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.ENTITY_DATA) { ++ ++ @Override ++ public net.minecraft.world.level.chunk.storage.RegionFileStorage getCache() { ++ return ServerLevel.this.entityStorage; ++ } ++ ++ @Override ++ public void writeData(int chunkX, int chunkZ, net.minecraft.nbt.CompoundTag compound) throws IOException { ++ ServerLevel.this.writeEntityChunk(chunkX, chunkZ, compound); ++ } ++ ++ @Override ++ public net.minecraft.nbt.CompoundTag readData(int chunkX, int chunkZ) throws IOException { ++ return ServerLevel.this.readEntityChunk(chunkX, chunkZ); ++ } ++ }; ++ private final EntityRegionFileStorage entityStorage; ++ ++ private static final class EntityRegionFileStorage extends net.minecraft.world.level.chunk.storage.RegionFileStorage { ++ ++ public EntityRegionFileStorage(Path directory, boolean dsync) { ++ super(directory, dsync); ++ } ++ ++ protected void write(ChunkPos pos, net.minecraft.nbt.CompoundTag nbt) throws IOException { ++ ChunkPos nbtPos = nbt == null ? null : EntityStorage.readChunkPos(nbt); ++ if (nbtPos != null && !pos.equals(nbtPos)) { ++ throw new IllegalArgumentException( ++ "Entity chunk coordinate and serialized data do not have matching coordinates, trying to serialize coordinate " + pos.toString() ++ + " but compound says coordinate is " + nbtPos + " for world: " + this ++ ); ++ } ++ super.write(pos, nbt); ++ } ++ } ++ ++ private void writeEntityChunk(int chunkX, int chunkZ, net.minecraft.nbt.CompoundTag compound) throws IOException { ++ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) { ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.scheduleSave( ++ this, chunkX, chunkZ, compound, ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.ENTITY_DATA); ++ return; ++ } ++ this.entityStorage.write(new ChunkPos(chunkX, chunkZ), compound); ++ } ++ ++ private net.minecraft.nbt.CompoundTag readEntityChunk(int chunkX, int chunkZ) throws IOException { ++ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) { ++ return io.papermc.paper.chunk.system.io.RegionFileIOThread.loadData( ++ this, chunkX, chunkZ, io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.ENTITY_DATA, ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.getIOBlockingPriorityForCurrentThread() ++ ); ++ } ++ return this.entityStorage.read(new ChunkPos(chunkX, chunkZ)); ++ } ++ ++ private final io.papermc.paper.chunk.system.entity.EntityLookup entityLookup; ++ public final io.papermc.paper.chunk.system.entity.EntityLookup getEntityLookup() { ++ return this.entityLookup; ++ } ++ // Paper end - rewrite chunk system + + // Add env and gen to constructor, IWorldDataServer -> WorldDataServer + public ServerLevel(MinecraftServer minecraftserver, Executor executor, LevelStorageSource.LevelStorageAccess convertable_conversionsession, PrimaryLevelData iworlddataserver, ResourceKey resourcekey, LevelStem worlddimension, ChunkProgressListener worldloadlistener, boolean flag, long i, List list, boolean flag1, @Nullable RandomSequences randomsequences, org.bukkit.World.Environment env, org.bukkit.generator.ChunkGenerator gen, org.bukkit.generator.BiomeProvider biomeProvider) { +@@ -364,16 +465,16 @@ public class ServerLevel extends Level implements WorldGenLevel { + // CraftBukkit end + boolean flag2 = minecraftserver.forceSynchronousWrites(); + DataFixer datafixer = minecraftserver.getFixerUpper(); +- EntityPersistentStorage entitypersistentstorage = new EntityStorage(this, convertable_conversionsession.getDimensionPath(resourcekey).resolve("entities"), datafixer, flag2, minecraftserver); ++ this.entityStorage = new EntityRegionFileStorage(convertable_conversionsession.getDimensionPath(resourcekey).resolve("entities"), flag2); // Paper - rewrite chunk system //EntityPersistentStorage entitypersistentstorage = new EntityStorage(this, convertable_conversionsession.getDimensionPath(resourcekey).resolve("entities"), datafixer, flag2, minecraftserver); + +- this.entityManager = new PersistentEntitySectionManager<>(Entity.class, new ServerLevel.EntityCallbacks(), entitypersistentstorage); ++ // this.entityManager = new PersistentEntitySectionManager<>(Entity.class, new ServerLevel.EntityCallbacks(), entitypersistentstorage, this.entitySliceManager); // Paper // Paper - rewrite chunk system + StructureTemplateManager structuretemplatemanager = minecraftserver.getStructureManager(); + int j = this.spigotConfig.viewDistance; // Spigot + int k = this.spigotConfig.simulationDistance; // Spigot +- PersistentEntitySectionManager persistententitysectionmanager = this.entityManager; ++ //PersistentEntitySectionManager persistententitysectionmanager = this.entityManager; // Paper - rewrite chunk system + +- Objects.requireNonNull(this.entityManager); +- this.chunkSource = new ServerChunkCache(this, convertable_conversionsession, datafixer, structuretemplatemanager, executor, chunkgenerator, j, k, flag2, worldloadlistener, persistententitysectionmanager::updateChunkStatus, () -> { ++ //Objects.requireNonNull(this.entityManager); // Paper - rewrite chunk system ++ this.chunkSource = new ServerChunkCache(this, convertable_conversionsession, datafixer, structuretemplatemanager, executor, chunkgenerator, j, k, flag2, worldloadlistener, null, () -> { // Paper - rewrite chunk system + return minecraftserver.overworld().getDataStorage(); + }); + this.chunkSource.getGeneratorState().ensureStructuresGenerated(); +@@ -410,6 +511,9 @@ public class ServerLevel extends Level implements WorldGenLevel { + }, "random_sequences"); + }); + this.getCraftServer().addWorld(this.getWorld()); // CraftBukkit ++ ++ this.chunkTaskScheduler = new io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler(this, io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.workerThreads); // Paper - rewrite chunk system ++ this.entityLookup = new io.papermc.paper.chunk.system.entity.EntityLookup(this, new EntityCallbacks()); // Paper - rewrite chunk system + } + + /** @deprecated */ +@@ -520,7 +624,7 @@ public class ServerLevel extends Level implements WorldGenLevel { + gameprofilerfiller.push("checkDespawn"); + entity.checkDespawn(); + gameprofilerfiller.pop(); +- if (this.chunkSource.chunkMap.getDistanceManager().inEntityTickingRange(entity.chunkPosition().toLong())) { ++ if (true || this.chunkSource.chunkMap.getDistanceManager().inEntityTickingRange(entity.chunkPosition().toLong())) { // Paper - now always true if in the ticking list + Entity entity1 = entity.getVehicle(); + + if (entity1 != null) { +@@ -545,13 +649,16 @@ public class ServerLevel extends Level implements WorldGenLevel { + } + + gameprofilerfiller.push("entityManagement"); +- this.entityManager.tick(); ++ //this.entityManager.tick(); // Paper - rewrite chunk system + gameprofilerfiller.pop(); + } + + @Override + public boolean shouldTickBlocksAt(long chunkPos) { +- return this.chunkSource.chunkMap.getDistanceManager().inBlockTickingRange(chunkPos); ++ // Paper start - replace player chunk loader system ++ ChunkHolder holder = this.chunkSource.chunkMap.getVisibleChunkIfPresent(chunkPos); ++ return holder != null && holder.isTickingReady(); ++ // Paper end - replace player chunk loader system + } + + protected void tickTime() { +@@ -1012,6 +1119,11 @@ public class ServerLevel extends Level implements WorldGenLevel { + } + + public void save(@Nullable ProgressListener progressListener, boolean flush, boolean savingDisabled) { ++ // Paper start - rewrite chunk system - add close param ++ this.save(progressListener, flush, savingDisabled, false); ++ } ++ public void save(@Nullable ProgressListener progressListener, boolean flush, boolean savingDisabled, boolean close) { ++ // Paper end - rewrite chunk system - add close param + ServerChunkCache chunkproviderserver = this.getChunkSource(); + + if (!savingDisabled) { +@@ -1027,16 +1139,13 @@ public class ServerLevel extends Level implements WorldGenLevel { + } + + timings.worldSaveChunks.startTiming(); // Paper +- chunkproviderserver.save(flush); ++ if (!close) chunkproviderserver.save(flush); // Paper - rewrite chunk system ++ if (close) chunkproviderserver.close(true); // Paper - rewrite chunk system + timings.worldSaveChunks.stopTiming(); // Paper + }// Paper +- if (flush) { +- this.entityManager.saveAll(); +- } else { +- this.entityManager.autoSave(); +- } ++ // Paper - rewrite chunk system - entity saving moved into ChunkHolder + +- } ++ } else if (close) { chunkproviderserver.close(false); } // Paper - rewrite chunk system + + // CraftBukkit start - moved from MinecraftServer.saveChunks + ServerLevel worldserver1 = this; +@@ -1172,7 +1281,7 @@ public class ServerLevel extends Level implements WorldGenLevel { + this.removePlayerImmediately((ServerPlayer) entity, Entity.RemovalReason.DISCARDED); + } + +- this.entityManager.addNewEntity(player); ++ this.entityLookup.addNewEntity(player); // Paper - rewite chunk system + } + + // CraftBukkit start +@@ -1188,7 +1297,7 @@ public class ServerLevel extends Level implements WorldGenLevel { + } + // CraftBukkit end + +- return this.entityManager.addNewEntity(entity); ++ return this.entityLookup.addNewEntity(entity); // Paper - rewrite chunk system + } + } + +@@ -1200,10 +1309,10 @@ public class ServerLevel extends Level implements WorldGenLevel { + public boolean tryAddFreshEntityWithPassengers(Entity entity, org.bukkit.event.entity.CreatureSpawnEvent.SpawnReason reason) { + // CraftBukkit end + Stream stream = entity.getSelfAndPassengers().map(Entity::getUUID); // CraftBukkit - decompile error +- PersistentEntitySectionManager persistententitysectionmanager = this.entityManager; ++ //PersistentEntitySectionManager persistententitysectionmanager = this.entityManager; // Paper - rewrite chunk system + +- Objects.requireNonNull(this.entityManager); +- if (stream.anyMatch(persistententitysectionmanager::isLoaded)) { ++ //Objects.requireNonNull(this.entityManager); // Paper - rewrite chunk system ++ if (stream.anyMatch(this.entityLookup::hasEntity)) { // Paper - rewrite chunk system + return false; + } else { + this.addFreshEntityWithPassengers(entity, reason); // CraftBukkit +@@ -1723,7 +1832,7 @@ public class ServerLevel extends Level implements WorldGenLevel { + } + } + +- bufferedwriter.write(String.format(Locale.ROOT, "entities: %s\n", this.entityManager.gatherStats())); ++ bufferedwriter.write(String.format(Locale.ROOT, "entities: %s\n", this.entityLookup.getDebugInfo())); // Paper - rewrite chunk system + bufferedwriter.write(String.format(Locale.ROOT, "block_entity_tickers: %d\n", this.blockEntityTickers.size())); + bufferedwriter.write(String.format(Locale.ROOT, "block_ticks: %d\n", this.getBlockTicks().count())); + bufferedwriter.write(String.format(Locale.ROOT, "fluid_ticks: %d\n", this.getFluidTicks().count())); +@@ -1772,7 +1881,7 @@ public class ServerLevel extends Level implements WorldGenLevel { + BufferedWriter bufferedwriter2 = Files.newBufferedWriter(path1); + + try { +- playerchunkmap.dumpChunks(bufferedwriter2); ++ //playerchunkmap.dumpChunks(bufferedwriter2); // Paper - rewrite chunk system + } catch (Throwable throwable4) { + if (bufferedwriter2 != null) { + try { +@@ -1793,7 +1902,7 @@ public class ServerLevel extends Level implements WorldGenLevel { + BufferedWriter bufferedwriter3 = Files.newBufferedWriter(path2); + + try { +- this.entityManager.dumpSections(bufferedwriter3); ++ //this.entityManager.dumpSections(bufferedwriter3); // Paper - rewrite chunk system + } catch (Throwable throwable6) { + if (bufferedwriter3 != null) { + try { +@@ -1935,7 +2044,7 @@ public class ServerLevel extends Level implements WorldGenLevel { + + @VisibleForTesting + public String getWatchdogStats() { +- return String.format(Locale.ROOT, "players: %s, entities: %s [%s], block_entities: %d [%s], block_ticks: %d, fluid_ticks: %d, chunk_source: %s", this.players.size(), this.entityManager.gatherStats(), ServerLevel.getTypeCount(this.entityManager.getEntityGetter().getAll(), (entity) -> { ++ return String.format(Locale.ROOT, "players: %s, entities: %s [%s], block_entities: %d [%s], block_ticks: %d, fluid_ticks: %d, chunk_source: %s", this.players.size(), this.entityLookup.getDebugInfo(), ServerLevel.getTypeCount(this.entityLookup.getAll(), (entity) -> { // Paper - rewrite chunk system + return BuiltInRegistries.ENTITY_TYPE.getKey(entity.getType()).toString(); + }), this.blockEntityTickers.size(), ServerLevel.getTypeCount(this.blockEntityTickers, TickingBlockEntity::getType), this.getBlockTicks().count(), this.getFluidTicks().count(), this.gatherChunkSourceStats()); + } +@@ -1995,15 +2104,15 @@ public class ServerLevel extends Level implements WorldGenLevel { + @Override + public LevelEntityGetter getEntities() { + org.spigotmc.AsyncCatcher.catchOp("Chunk getEntities call"); // Spigot +- return this.entityManager.getEntityGetter(); ++ return this.entityLookup; // Paper - rewrite chunk system + } + + public void addLegacyChunkEntities(Stream entities) { +- this.entityManager.addLegacyChunkEntities(entities); ++ this.entityLookup.addLegacyChunkEntities(entities.toList()); // Paper - rewrite chunk system + } + + public void addWorldGenChunkEntities(Stream entities) { +- this.entityManager.addWorldGenChunkEntities(entities); ++ this.entityLookup.addWorldGenChunkEntities(entities.toList()); // Paper - rewrite chunk system + } + + public void startTickingChunk(LevelChunk chunk) { +@@ -2019,34 +2128,49 @@ public class ServerLevel extends Level implements WorldGenLevel { + @Override + public void close() throws IOException { + super.close(); +- this.entityManager.close(); ++ //this.entityManager.close(); // Paper - rewrite chunk system + } + + @Override + public String gatherChunkSourceStats() { + String s = this.chunkSource.gatherStats(); + +- return "Chunks[S] W: " + s + " E: " + this.entityManager.gatherStats(); ++ return "Chunks[S] W: " + s + " E: " + this.entityLookup.getDebugInfo(); // Paper - rewrite chunk system + } + + public boolean areEntitiesLoaded(long chunkPos) { +- return this.entityManager.areEntitiesLoaded(chunkPos); ++ // Paper start - rewrite chunk system ++ return this.getChunkIfLoadedImmediately(ChunkPos.getX(chunkPos), ChunkPos.getZ(chunkPos)) != null; ++ // Paper end - rewrite chunk system + } + + private boolean isPositionTickingWithEntitiesLoaded(long chunkPos) { +- return this.areEntitiesLoaded(chunkPos) && this.chunkSource.isPositionTicking(chunkPos); ++ // Paper start - optimize is ticking ready type functions ++ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.chunkTaskScheduler.chunkHolderManager.getChunkHolder(chunkPos); ++ // isTicking implies the chunk is loaded, and the chunk is loaded now implies the entities are loaded ++ return chunkHolder != null && chunkHolder.isTickingReady(); ++ // Paper end + } + + public boolean isPositionEntityTicking(BlockPos pos) { +- return this.entityManager.canPositionTick(pos) && this.chunkSource.chunkMap.getDistanceManager().inEntityTickingRange(ChunkPos.asLong(pos)); ++ // Paper start - rewrite chunk system ++ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.chunkTaskScheduler.chunkHolderManager.getChunkHolder(io.papermc.paper.util.CoordinateUtils.getChunkKey(pos)); ++ return chunkHolder != null && chunkHolder.isEntityTickingReady(); ++ // Paper end - rewrite chunk system + } + + public boolean isNaturalSpawningAllowed(BlockPos pos) { +- return this.entityManager.canPositionTick(pos); ++ // Paper start - rewrite chunk system ++ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.chunkTaskScheduler.chunkHolderManager.getChunkHolder(io.papermc.paper.util.CoordinateUtils.getChunkKey(pos)); ++ return chunkHolder != null && chunkHolder.isEntityTickingReady(); ++ // Paper end - rewrite chunk system + } + + public boolean isNaturalSpawningAllowed(ChunkPos pos) { +- return this.entityManager.canPositionTick(pos); ++ // Paper start - rewrite chunk system ++ io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder = this.chunkTaskScheduler.chunkHolderManager.getChunkHolder(io.papermc.paper.util.CoordinateUtils.getChunkKey(pos)); ++ return chunkHolder != null && chunkHolder.isEntityTickingReady(); ++ // Paper end - rewrite chunk system + } + + @Override +diff --git a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java +index 481272124b7589cff0aa05b6df5b7e6f1d539414..c709e27a00d8617f9a3346f85bd88ce47baa9c76 100644 +--- a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java ++++ b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java +@@ -37,15 +37,12 @@ import net.minecraft.world.level.chunk.ChunkStatus; + public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCloseable { + public static final int DEFAULT_BATCH_SIZE = 1000; + private static final Logger LOGGER = LogUtils.getLogger(); +- private final ProcessorMailbox taskMailbox; +- private final ObjectList> lightTasks = new ObjectArrayList<>(); ++ // Paper - rewrite chunk system + private final ChunkMap chunkMap; +- private final ProcessorHandle> sorterMailbox; +- private final int taskPerBatch = 1000; +- private final AtomicBoolean scheduled = new AtomicBoolean(); ++ // Paper - rewrite chunk system + + // Paper start - replace light engine impl +- protected final ca.spottedleaf.starlight.common.light.StarLightInterface theLightEngine; ++ public final ca.spottedleaf.starlight.common.light.StarLightInterface theLightEngine; + public final boolean hasBlockLight; + public final boolean hasSkyLight; + // Paper end - replace light engine impl +@@ -53,8 +50,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl + public ThreadedLevelLightEngine(LightChunkGetter chunkProvider, ChunkMap chunkStorage, boolean hasBlockLight, ProcessorMailbox processor, ProcessorHandle> executor) { + super(chunkProvider, false, false); // Paper - destroy vanilla light engine state + this.chunkMap = chunkStorage; +- this.sorterMailbox = executor; +- this.taskMailbox = processor; ++ // Paper - rewrite chunk system + // Paper start - replace light engine impl + this.hasBlockLight = true; + this.hasSkyLight = hasBlockLight; // Nice variable name. +@@ -98,7 +94,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl + ++totalChunks; + } + +- this.taskMailbox.tell(() -> { ++ this.chunkMap.level.chunkTaskScheduler.lightExecutor.queueRunnable(() -> { // Paper - rewrite chunk system + this.theLightEngine.relightChunks(chunks, (ChunkPos chunkPos) -> { + chunkLightCallback.accept(chunkPos); + ((java.util.concurrent.Executor)((ServerLevel)this.theLightEngine.getWorld()).getChunkSource().mainThreadProcessor).execute(() -> { +@@ -275,17 +271,11 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl + } + + private void addTask(int x, int z, ThreadedLevelLightEngine.TaskType stage, Runnable task) { +- this.addTask(x, z, this.chunkMap.getChunkQueueLevel(ChunkPos.asLong(x, z)), stage, task); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + private void addTask(int x, int z, IntSupplier completedLevelSupplier, ThreadedLevelLightEngine.TaskType stage, Runnable task) { +- this.sorterMailbox.tell(ChunkTaskPriorityQueueSorter.message(() -> { +- this.lightTasks.add(Pair.of(stage, task)); +- if (this.lightTasks.size() >= 1000) { +- this.runUpdate(); +- } +- +- }, ChunkPos.asLong(x, z), completedLevelSupplier)); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + @Override +@@ -327,90 +317,15 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl + } + + public CompletableFuture lightChunk(ChunkAccess chunk, boolean excludeBlocks) { +- // Paper start - replace light engine impl +- if (true) { +- boolean lit = excludeBlocks; +- final ChunkPos chunkPos = chunk.getPos(); +- +- return CompletableFuture.supplyAsync(() -> { +- final Boolean[] emptySections = StarLightEngine.getEmptySectionsForChunk(chunk); +- if (!lit) { +- chunk.setLightCorrect(false); +- this.theLightEngine.lightChunk(chunk, emptySections); +- chunk.setLightCorrect(true); +- } else { +- this.theLightEngine.forceLoadInChunk(chunk, emptySections); +- // can't really force the chunk to be edged checked, as we need neighbouring chunks - but we don't have +- // them, so if it's not loaded then i guess we can't do edge checks. later loads of the chunk should +- // catch what we miss here. +- this.theLightEngine.checkChunkEdges(chunkPos.x, chunkPos.z); +- } +- +- this.chunkMap.releaseLightTicket(chunkPos); +- return chunk; +- }, (runnable) -> { +- this.theLightEngine.scheduleChunkLight(chunkPos, runnable); +- this.tryScheduleUpdate(); +- }).whenComplete((final ChunkAccess c, final Throwable throwable) -> { +- if (throwable != null) { +- LOGGER.error("Failed to light chunk " + chunkPos, throwable); +- } +- }); +- } +- // Paper end - replace light engine impl +- ChunkPos chunkPos = chunk.getPos(); +- chunk.setLightCorrect(false); +- this.addTask(chunkPos.x, chunkPos.z, ThreadedLevelLightEngine.TaskType.PRE_UPDATE, Util.name(() -> { +- if (!excludeBlocks) { +- super.propagateLightSources(chunkPos); +- } +- +- }, () -> { +- return "lightChunk " + chunkPos + " " + excludeBlocks; +- })); +- return CompletableFuture.supplyAsync(() -> { +- chunk.setLightCorrect(true); +- this.chunkMap.releaseLightTicket(chunkPos); +- return chunk; +- }, (task) -> { +- this.addTask(chunkPos.x, chunkPos.z, ThreadedLevelLightEngine.TaskType.POST_UPDATE, task); +- }); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + public void tryScheduleUpdate() { +- if (this.hasLightWork() && this.scheduled.compareAndSet(false, true)) { // Paper // Paper - rewrite light engine +- this.taskMailbox.tell(() -> { +- this.runUpdate(); +- this.scheduled.set(false); +- }); +- } +- ++ // Paper - rewrite chunk system + } + + private void runUpdate() { +- int i = Math.min(this.lightTasks.size(), 1000); +- ObjectListIterator> objectListIterator = this.lightTasks.iterator(); +- +- int j; +- for(j = 0; objectListIterator.hasNext() && j < i; ++j) { +- Pair pair = objectListIterator.next(); +- if (pair.getFirst() == ThreadedLevelLightEngine.TaskType.PRE_UPDATE) { +- pair.getSecond().run(); +- } +- } +- +- objectListIterator.back(j); +- this.theLightEngine.propagateChanges(); // Paper - rewrite light engine +- +- for(int var5 = 0; objectListIterator.hasNext() && var5 < i; ++var5) { +- Pair pair2 = objectListIterator.next(); +- if (pair2.getFirst() == ThreadedLevelLightEngine.TaskType.POST_UPDATE) { +- pair2.getSecond().run(); +- } +- +- objectListIterator.remove(); +- } +- ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + static enum TaskType { +diff --git a/src/main/java/net/minecraft/server/level/Ticket.java b/src/main/java/net/minecraft/server/level/Ticket.java +index b346fa94b23d81da7da073f71dd12e672e0f079c..768a2667f950a635a562fa8a0c75b31a3ae9190e 100644 +--- a/src/main/java/net/minecraft/server/level/Ticket.java ++++ b/src/main/java/net/minecraft/server/level/Ticket.java +@@ -6,9 +6,12 @@ public final class Ticket implements Comparable> { + private final TicketType type; + private final int ticketLevel; + public final T key; +- private long createdTick; ++ // Paper start - rewrite chunk system ++ public final long removalTick; + +- protected Ticket(TicketType type, int level, T argument) { ++ public Ticket(TicketType type, int level, T argument, long removalTick) { ++ this.removalTick = removalTick; ++ // Paper end - rewrite chunk system + this.type = type; + this.ticketLevel = level; + this.key = argument; +@@ -44,7 +47,7 @@ public final class Ticket implements Comparable> { + + @Override + public String toString() { +- return "Ticket[" + this.type + " " + this.ticketLevel + " (" + this.key + ")] at " + this.createdTick; ++ return "Ticket[" + this.type + " " + this.ticketLevel + " (" + this.key + ")] to die on " + this.removalTick; // Paper - rewrite chunk system + } + + public TicketType getType() { +@@ -56,11 +59,10 @@ public final class Ticket implements Comparable> { + } + + protected void setCreatedTick(long tickCreated) { +- this.createdTick = tickCreated; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + + protected boolean timedOut(long currentTick) { +- long l = this.type.timeout(); +- return l != 0L && currentTick - this.createdTick > l; ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + } +diff --git a/src/main/java/net/minecraft/server/level/TicketType.java b/src/main/java/net/minecraft/server/level/TicketType.java +index 6051e5f272838ef23276a90e21c2fc821ca155d1..97d1ff2af23bac14e67bca5896843325aaa5bfc1 100644 +--- a/src/main/java/net/minecraft/server/level/TicketType.java ++++ b/src/main/java/net/minecraft/server/level/TicketType.java +@@ -8,6 +8,7 @@ import net.minecraft.world.level.ChunkPos; + + public class TicketType { + public static final TicketType FUTURE_AWAIT = create("future_await", Long::compareTo); // Paper ++ public static final TicketType ASYNC_LOAD = create("async_load", Long::compareTo); // Paper + + private final String name; + private final Comparator comparator; +@@ -27,6 +28,13 @@ public class TicketType { + public static final TicketType PLUGIN = TicketType.create("plugin", (a, b) -> 0); // CraftBukkit + public static final TicketType PLUGIN_TICKET = TicketType.create("plugin_ticket", (plugin1, plugin2) -> plugin1.getClass().getName().compareTo(plugin2.getClass().getName())); // CraftBukkit + public static final TicketType CHUNK_RELIGHT = create("light_update", Long::compareTo); // Paper - ensure chunks stay loaded for lighting ++ // Paper start - rewrite chunk system ++ public static final TicketType CHUNK_LOAD = create("chunk_load", Long::compareTo); ++ public static final TicketType STATUS_UPGRADE = create("status_upgrade", Long::compareTo); ++ public static final TicketType ENTITY_LOAD = create("entity_load", Long::compareTo); ++ public static final TicketType POI_LOAD = create("poi_load", Long::compareTo); ++ public static final TicketType UNLOAD_COOLDOWN = create("unload_cooldown", (u1, u2) -> 0, 5 * 20); ++ // Paper end - rewrite chunk system + + public static TicketType create(String name, Comparator argumentComparator) { + return new TicketType<>(name, argumentComparator, 0L); +diff --git a/src/main/java/net/minecraft/server/level/WorldGenRegion.java b/src/main/java/net/minecraft/server/level/WorldGenRegion.java +index e96a0ca47e4701ba187555bd92c968345bc85677..73b96f804079288e9c5fcc11da54e61e89a6782a 100644 +--- a/src/main/java/net/minecraft/server/level/WorldGenRegion.java ++++ b/src/main/java/net/minecraft/server/level/WorldGenRegion.java +@@ -504,4 +504,21 @@ public class WorldGenRegion implements WorldGenLevel { + public long nextSubTickCount() { + return this.subTickCount.getAndIncrement(); + } ++ ++ // Paper start ++ // No-op, this class doesn't provide entity access ++ @Override ++ public List getHardCollidingEntities(Entity except, AABB box, Predicate predicate) { ++ return Collections.emptyList(); ++ } ++ ++ @Override ++ public void getEntities(Entity except, AABB box, Predicate predicate, List into) {} ++ ++ @Override ++ public void getHardCollidingEntities(Entity except, AABB box, Predicate predicate, List into) {} ++ ++ @Override ++ public void getEntitiesByClass(Class clazz, Entity except, AABB box, List into, Predicate predicate) {} ++ // Paper end + } +diff --git a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java +index 403f7c7f31e0072b0cad0706bc981ece24733a1d..d72e8df4f99b6219ea305742f0cf8d1c1985ffd6 100644 +--- a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java ++++ b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java +@@ -788,6 +788,13 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic + this.disconnect(Component.translatable("disconnect.spam")); + return; + } ++ // Paper start ++ String str = packet.getCommand(); int index = -1; ++ if (str.length() > 64 && ((index = str.indexOf(' ')) == -1 || index >= 64)) { ++ server.scheduleOnMain(() -> this.disconnect(Component.translatable("disconnect.spam", new Object[0]))); // Paper ++ return; ++ } ++ // Paper end + // CraftBukkit end + StringReader stringreader = new StringReader(packet.getCommand()); + +diff --git a/src/main/java/net/minecraft/server/players/PlayerList.java b/src/main/java/net/minecraft/server/players/PlayerList.java +index 90c73b9075489242556a7ba749618e20c0ed0c4d..d4f44635b3cb7ac890ea89b6f4454fa9d4375c08 100644 +--- a/src/main/java/net/minecraft/server/players/PlayerList.java ++++ b/src/main/java/net/minecraft/server/players/PlayerList.java +@@ -262,7 +262,7 @@ public abstract class PlayerList { + boolean flag1 = gamerules.getBoolean(GameRules.RULE_REDUCEDDEBUGINFO); + + // Spigot - view distance +- playerconnection.send(new ClientboundLoginPacket(player.getId(), worlddata.isHardcore(), player.gameMode.getGameModeForPlayer(), player.gameMode.getPreviousGameModeForPlayer(), this.server.levelKeys(), this.synchronizedRegistries, worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), this.getMaxPlayers(), worldserver1.spigotConfig.viewDistance, worldserver1.spigotConfig.simulationDistance, flag1, !flag, worldserver1.isDebug(), worldserver1.isFlat(), player.getLastDeathLocation(), player.getPortalCooldown())); ++ playerconnection.send(new ClientboundLoginPacket(player.getId(), worlddata.isHardcore(), player.gameMode.getGameModeForPlayer(), player.gameMode.getPreviousGameModeForPlayer(), this.server.levelKeys(), this.synchronizedRegistries, worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), this.getMaxPlayers(), worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance(), worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance(), flag1, !flag, worldserver1.isDebug(), worldserver1.isFlat(), player.getLastDeathLocation(), player.getPortalCooldown())); // Paper - replace old player chunk management + player.getBukkitEntity().sendSupportedChannels(); // CraftBukkit + playerconnection.send(new ClientboundUpdateEnabledFeaturesPacket(FeatureFlags.REGISTRY.toNames(worldserver1.enabledFeatures()))); + playerconnection.send(new ClientboundCustomPayloadPacket(ClientboundCustomPayloadPacket.BRAND, (new FriendlyByteBuf(Unpooled.buffer())).writeUtf(this.getServer().getServerModName()))); +@@ -800,8 +800,8 @@ public abstract class PlayerList { + // CraftBukkit start + LevelData worlddata = worldserver1.getLevelData(); + entityplayer1.connection.send(new ClientboundRespawnPacket(worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), entityplayer1.gameMode.getGameModeForPlayer(), entityplayer1.gameMode.getPreviousGameModeForPlayer(), worldserver1.isDebug(), worldserver1.isFlat(), (byte) i, entityplayer1.getLastDeathLocation(), entityplayer1.getPortalCooldown())); +- entityplayer1.connection.send(new ClientboundSetChunkCacheRadiusPacket(worldserver1.spigotConfig.viewDistance)); // Spigot +- entityplayer1.connection.send(new ClientboundSetSimulationDistancePacket(worldserver1.spigotConfig.simulationDistance)); // Spigot ++ entityplayer1.connection.send(new ClientboundSetChunkCacheRadiusPacket(worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance())); // Spigot // Paper - replace old player chunk management ++ entityplayer1.connection.send(new ClientboundSetSimulationDistancePacket(worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance())); // Spigot // Paper - replace old player chunk management + entityplayer1.spawnIn(worldserver1); + entityplayer1.unsetRemoved(); + entityplayer1.connection.teleport(CraftLocation.toBukkit(entityplayer1.position(), worldserver1.getWorld(), entityplayer1.getYRot(), entityplayer1.getXRot())); +@@ -1297,7 +1297,7 @@ public abstract class PlayerList { + + public void setViewDistance(int viewDistance) { + this.viewDistance = viewDistance; +- this.broadcastAll(new ClientboundSetChunkCacheRadiusPacket(viewDistance)); ++ //this.broadcastAll(new ClientboundSetChunkCacheRadiusPacket(viewDistance)); // Paper - move into setViewDistance + Iterator iterator = this.server.getAllLevels().iterator(); + + while (iterator.hasNext()) { +@@ -1312,7 +1312,7 @@ public abstract class PlayerList { + + public void setSimulationDistance(int simulationDistance) { + this.simulationDistance = simulationDistance; +- this.broadcastAll(new ClientboundSetSimulationDistancePacket(simulationDistance)); ++ //this.broadcastAll(new ClientboundSetSimulationDistancePacket(simulationDistance)); // Paper - handled by playerchunkloader + Iterator iterator = this.server.getAllLevels().iterator(); + + while (iterator.hasNext()) { +diff --git a/src/main/java/net/minecraft/util/SortedArraySet.java b/src/main/java/net/minecraft/util/SortedArraySet.java +index ca788f0dcec4a117b410fe8348969e056b138b1e..4f5f2c25e12ee6d977bc98d9118650cfe91e6c0e 100644 +--- a/src/main/java/net/minecraft/util/SortedArraySet.java ++++ b/src/main/java/net/minecraft/util/SortedArraySet.java +@@ -22,6 +22,41 @@ public class SortedArraySet extends AbstractSet { + this.contents = (T[])castRawArray(new Object[initialCapacity]); + } + } ++ // Paper start - optimise removeIf ++ @Override ++ public boolean removeIf(java.util.function.Predicate filter) { ++ // prev. impl used an iterator, which could be n^2 and creates garbage ++ int i = 0, len = this.size; ++ T[] backingArray = this.contents; ++ ++ for (;;) { ++ if (i >= len) { ++ return false; ++ } ++ if (!filter.test(backingArray[i])) { ++ ++i; ++ continue; ++ } ++ break; ++ } ++ ++ // we only want to write back to backingArray if we really need to ++ ++ int lastIndex = i; // this is where new elements are shifted to ++ ++ for (; i < len; ++i) { ++ T curr = backingArray[i]; ++ if (!filter.test(curr)) { // if test throws we're screwed ++ backingArray[lastIndex++] = curr; ++ } ++ } ++ ++ // cleanup end ++ Arrays.fill(backingArray, lastIndex, len, null); ++ this.size = lastIndex; ++ return true; ++ } ++ // Paper end - optimise removeIf + + public static > SortedArraySet create() { + return create(10); +@@ -110,6 +145,31 @@ public class SortedArraySet extends AbstractSet { + } + } + ++ // Paper start - rewrite chunk system ++ public T replace(T object) { ++ int i = this.findIndex(object); ++ if (i >= 0) { ++ T old = this.contents[i]; ++ this.contents[i] = object; ++ return old; ++ } else { ++ this.addInternal(object, getInsertionPosition(i)); ++ return object; ++ } ++ } ++ ++ public T removeAndGet(T object) { ++ int i = this.findIndex(object); ++ if (i >= 0) { ++ final T ret = this.contents[i]; ++ this.removeInternal(i); ++ return ret; ++ } else { ++ return null; ++ } ++ } ++ // Paper end - rewrite chunk system ++ + @Override + public boolean remove(Object object) { + int i = this.findIndex((T)object); +diff --git a/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java b/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java +index 12e72ad737b1219fcdf88d344d41621d9fd5feec..e0bfeebeaac1aaea64bc07cdfdf7790e3e43ca7b 100644 +--- a/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java ++++ b/src/main/java/net/minecraft/util/worldupdate/WorldUpgrader.java +@@ -186,7 +186,11 @@ public class WorldUpgrader { + } + + WorldUpgrader.LOGGER.error("Error upgrading chunk {}", chunkcoordintpair, throwable); ++ // Paper start ++ } catch (IOException e) { ++ WorldUpgrader.LOGGER.error("Error upgrading chunk {}", chunkcoordintpair, e); + } ++ // Paper end + + if (flag1) { + ++this.converted; +diff --git a/src/main/java/net/minecraft/world/entity/Entity.java b/src/main/java/net/minecraft/world/entity/Entity.java +index f668a80c6bff67bf766207985c1af73f09e1bd1c..7f6a90fab14d6880f2784e1c62eb2f3c9da404b5 100644 +--- a/src/main/java/net/minecraft/world/entity/Entity.java ++++ b/src/main/java/net/minecraft/world/entity/Entity.java +@@ -327,6 +327,58 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { + } + // Paper end + ++ // Paper start ++ /** ++ * Overriding this field will cause memory leaks. ++ */ ++ private final boolean hardCollides; ++ ++ private static final java.util.Map, Boolean> cachedOverrides = java.util.Collections.synchronizedMap(new java.util.WeakHashMap<>()); ++ { ++ /* // Goodbye, broken on reobf... ++ Boolean hardCollides = cachedOverrides.get(this.getClass()); ++ if (hardCollides == null) { ++ try { ++ java.lang.reflect.Method getHardCollisionBoxEntityMethod = Entity.class.getMethod("canCollideWith", Entity.class); ++ java.lang.reflect.Method hasHardCollisionBoxMethod = Entity.class.getMethod("canBeCollidedWith"); ++ if (!this.getClass().getMethod(hasHardCollisionBoxMethod.getName(), hasHardCollisionBoxMethod.getParameterTypes()).equals(hasHardCollisionBoxMethod) ++ || !this.getClass().getMethod(getHardCollisionBoxEntityMethod.getName(), getHardCollisionBoxEntityMethod.getParameterTypes()).equals(getHardCollisionBoxEntityMethod)) { ++ hardCollides = Boolean.TRUE; ++ } else { ++ hardCollides = Boolean.FALSE; ++ } ++ cachedOverrides.put(this.getClass(), hardCollides); ++ } ++ catch (ThreadDeath thr) { throw thr; } ++ catch (Throwable thr) { ++ // shouldn't happen, just explode ++ throw new RuntimeException(thr); ++ } ++ } */ ++ this.hardCollides = this instanceof Boat ++ || this instanceof net.minecraft.world.entity.monster.Shulker ++ || this instanceof net.minecraft.world.entity.vehicle.AbstractMinecart ++ || this.shouldHardCollide(); ++ } ++ ++ // plugins can override ++ protected boolean shouldHardCollide() { ++ return false; ++ } ++ ++ public final boolean hardCollides() { ++ return this.hardCollides; ++ } ++ ++ public net.minecraft.server.level.ChunkHolder.FullChunkStatus chunkStatus; ++ ++ public int sectionX = Integer.MIN_VALUE; ++ public int sectionY = Integer.MIN_VALUE; ++ public int sectionZ = Integer.MIN_VALUE; ++ ++ public boolean updatingSectionStatus = false; ++ // Paper end ++ + public Entity(EntityType type, Level world) { + this.id = Entity.ENTITY_COUNTER.incrementAndGet(); + this.passengers = ImmutableList.of(); +@@ -2256,11 +2308,11 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { + return InteractionResult.PASS; + } + +- public boolean canCollideWith(Entity other) { ++ public boolean canCollideWith(Entity other) { // Paper - diff on change, hard colliding entities override this - TODO CHECK ON UPDATE - AbstractMinecart/Boat override + return other.canBeCollidedWith() && !this.isPassengerOfSameVehicle(other); + } + +- public boolean canBeCollidedWith() { ++ public boolean canBeCollidedWith() { // Paper - diff on change, hard colliding entities override this TODO CHECK ON UPDATE - Boat/Shulker override + return false; + } + +@@ -3571,6 +3623,16 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { + }; + } + ++ // Paper start - rewrite chunk system ++ public boolean hasAnyPlayerPassengers() { ++ // copied from below ++ if (this.passengers.isEmpty()) { return false; } ++ return this.getIndirectPassengersStream().anyMatch((entity) -> { ++ return entity instanceof Player; ++ }); ++ } ++ // Paper end - rewrite chunk system ++ + public boolean hasExactlyOnePlayerPassenger() { + return this.getIndirectPassengersStream().filter((entity) -> { + return entity instanceof Player; +@@ -3902,6 +3964,12 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { + } + + public final void setPosRaw(double x, double y, double z) { ++ // Paper start - rewrite chunk system ++ if (this.updatingSectionStatus) { ++ LOGGER.error("Refusing to update position for entity " + this + " to position " + new Vec3(x, y, z) + " since it is processing a section status update", new Throwable()); ++ return; ++ } ++ // Paper end - rewrite chunk system + if (this.position.x != x || this.position.y != y || this.position.z != z) { + this.position = new Vec3(x, y, z); + int i = Mth.floor(x); +@@ -4009,6 +4077,13 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { + + @Override + public final void setRemoved(Entity.RemovalReason reason) { ++ // Paper start - rewrite chunk system ++ io.papermc.paper.util.TickThread.ensureTickThread(this, "Cannot remove entity off-main"); ++ if (this.updatingSectionStatus) { ++ LOGGER.warn("Entity " + this + " is currently prevented from being added/removed to world since it is processing section status updates", new Throwable()); ++ return; ++ } ++ // Paper end - rewrite chunk system + if (this.removalReason == null) { + this.removalReason = reason; + } +@@ -4017,7 +4092,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { + this.stopRiding(); + } + +- this.getPassengers().forEach(Entity::stopRiding); ++ if (reason != RemovalReason.UNLOADED_TO_CHUNK) this.getPassengers().forEach(Entity::stopRiding); // Paper - chunk system - don't adjust passenger state when unloading, it's just not safe (and messes with our logic in entity chunk unload) + this.levelCallback.onRemove(reason); + } + +@@ -4032,7 +4107,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { + + @Override + public boolean shouldBeSaved() { +- return this.removalReason != null && !this.removalReason.shouldSave() ? false : (this.isPassenger() ? false : !this.isVehicle() || !this.hasExactlyOnePlayerPassenger()); ++ return this.removalReason != null && !this.removalReason.shouldSave() ? false : (this.isPassenger() ? false : !this.isVehicle() || !this.hasAnyPlayerPassengers()); // Paper - rewrite chunk system - it should check if the entity has ANY player passengers + } + + @Override +diff --git a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java +index b18b896c624d5cadc02b1db9d011d82124d61d54..f10ba4211cbdcc4f4ce3585c7cb3f80185e13b73 100644 +--- a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java ++++ b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java +@@ -38,12 +38,28 @@ import net.minecraft.world.level.chunk.storage.SectionStorage; + public class PoiManager extends SectionStorage { + public static final int MAX_VILLAGE_DISTANCE = 6; + public static final int VILLAGE_SECTION_SIZE = 1; +- private final PoiManager.DistanceTracker distanceTracker; +- private final LongSet loadedChunks = new LongOpenHashSet(); ++ // Paper start - rewrite chunk system ++ // the vanilla tracker needs to be replaced because it does not support level removes ++ public final net.minecraft.server.level.ServerLevel world; ++ private final io.papermc.paper.util.misc.Delayed26WayDistancePropagator3D villageDistanceTracker = new io.papermc.paper.util.misc.Delayed26WayDistancePropagator3D(); ++ static final int POI_DATA_SOURCE = 7; ++ public static int convertBetweenLevels(final int level) { ++ return POI_DATA_SOURCE - level; ++ } ++ ++ protected void updateDistanceTracking(long section) { ++ if (this.isVillageCenter(section)) { ++ this.villageDistanceTracker.setSource(section, POI_DATA_SOURCE); ++ } else { ++ this.villageDistanceTracker.removeSource(section); ++ } ++ } ++ // Paper end - rewrite chunk system ++ + + public PoiManager(Path path, DataFixer dataFixer, boolean dsync, RegistryAccess registryManager, LevelHeightAccessor world) { + super(path, PoiSection::codec, PoiSection::new, dataFixer, DataFixTypes.POI_CHUNK, dsync, registryManager, world); +- this.distanceTracker = new PoiManager.DistanceTracker(); ++ this.world = (net.minecraft.server.level.ServerLevel)world; // Paper - rewrite chunk system + } + + public void add(BlockPos pos, Holder type) { +@@ -180,8 +196,8 @@ public class PoiManager extends SectionStorage { + } + + public int sectionsToVillage(SectionPos pos) { +- this.distanceTracker.runAllUpdates(); +- return this.distanceTracker.getLevel(pos.asLong()); ++ this.villageDistanceTracker.propagateUpdates(); // Paper - replace distance tracking util ++ return convertBetweenLevels(this.villageDistanceTracker.getLevel(io.papermc.paper.util.CoordinateUtils.getChunkSectionKey(pos))); // Paper - replace distance tracking util + } + + boolean isVillageCenter(long pos) { +@@ -195,21 +211,107 @@ public class PoiManager extends SectionStorage { + + @Override + public void tick(BooleanSupplier shouldKeepTicking) { +- super.tick(shouldKeepTicking); +- this.distanceTracker.runAllUpdates(); ++ this.villageDistanceTracker.propagateUpdates(); // Paper - rewrite chunk system + } + + @Override +- protected void setDirty(long pos) { +- super.setDirty(pos); +- this.distanceTracker.update(pos, this.distanceTracker.getLevelFromSource(pos), false); ++ public void setDirty(long pos) { ++ // Paper start - rewrite chunk system ++ int chunkX = io.papermc.paper.util.CoordinateUtils.getChunkSectionX(pos); ++ int chunkZ = io.papermc.paper.util.CoordinateUtils.getChunkSectionZ(pos); ++ io.papermc.paper.chunk.system.scheduling.ChunkHolderManager manager = this.world.chunkTaskScheduler.chunkHolderManager; ++ io.papermc.paper.chunk.system.poi.PoiChunk chunk = manager.getPoiChunkIfLoaded(chunkX, chunkZ, false); ++ if (chunk != null) { ++ chunk.setDirty(true); ++ } ++ this.updateDistanceTracking(pos); ++ // Paper end - rewrite chunk system + } + + @Override + protected void onSectionLoad(long pos) { +- this.distanceTracker.update(pos, this.distanceTracker.getLevelFromSource(pos), false); ++ this.updateDistanceTracking(pos); // Paper - move to new distance tracking util + } + ++ // Paper start - rewrite chunk system ++ @Override ++ public Optional get(long pos) { ++ int chunkX = io.papermc.paper.util.CoordinateUtils.getChunkSectionX(pos); ++ int chunkY = io.papermc.paper.util.CoordinateUtils.getChunkSectionY(pos); ++ int chunkZ = io.papermc.paper.util.CoordinateUtils.getChunkSectionZ(pos); ++ ++ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Accessing poi chunk off-main"); ++ ++ io.papermc.paper.chunk.system.scheduling.ChunkHolderManager manager = this.world.chunkTaskScheduler.chunkHolderManager; ++ io.papermc.paper.chunk.system.poi.PoiChunk ret = manager.getPoiChunkIfLoaded(chunkX, chunkZ, true); ++ ++ return ret == null ? Optional.empty() : ret.getSectionForVanilla(chunkY); ++ } ++ ++ @Override ++ public Optional getOrLoad(long pos) { ++ int chunkX = io.papermc.paper.util.CoordinateUtils.getChunkSectionX(pos); ++ int chunkY = io.papermc.paper.util.CoordinateUtils.getChunkSectionY(pos); ++ int chunkZ = io.papermc.paper.util.CoordinateUtils.getChunkSectionZ(pos); ++ ++ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Accessing poi chunk off-main"); ++ ++ io.papermc.paper.chunk.system.scheduling.ChunkHolderManager manager = this.world.chunkTaskScheduler.chunkHolderManager; ++ ++ if (chunkY >= io.papermc.paper.util.WorldUtil.getMinSection(this.world) && ++ chunkY <= io.papermc.paper.util.WorldUtil.getMaxSection(this.world)) { ++ io.papermc.paper.chunk.system.poi.PoiChunk ret = manager.getPoiChunkIfLoaded(chunkX, chunkZ, true); ++ if (ret != null) { ++ return ret.getSectionForVanilla(chunkY); ++ } else { ++ return manager.loadPoiChunk(chunkX, chunkZ).getSectionForVanilla(chunkY); ++ } ++ } ++ // retain vanilla behavior: do not load section if out of bounds! ++ return Optional.empty(); ++ } ++ ++ @Override ++ protected PoiSection getOrCreate(long pos) { ++ int chunkX = io.papermc.paper.util.CoordinateUtils.getChunkSectionX(pos); ++ int chunkY = io.papermc.paper.util.CoordinateUtils.getChunkSectionY(pos); ++ int chunkZ = io.papermc.paper.util.CoordinateUtils.getChunkSectionZ(pos); ++ ++ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Accessing poi chunk off-main"); ++ ++ io.papermc.paper.chunk.system.scheduling.ChunkHolderManager manager = this.world.chunkTaskScheduler.chunkHolderManager; ++ ++ io.papermc.paper.chunk.system.poi.PoiChunk ret = manager.getPoiChunkIfLoaded(chunkX, chunkZ, true); ++ if (ret != null) { ++ return ret.getOrCreateSection(chunkY); ++ } else { ++ return manager.loadPoiChunk(chunkX, chunkZ).getOrCreateSection(chunkY); ++ } ++ } ++ ++ public void onUnload(long coordinate) { // Paper - rewrite chunk system ++ int chunkX = io.papermc.paper.util.MCUtil.getCoordinateX(coordinate); ++ int chunkZ = io.papermc.paper.util.MCUtil.getCoordinateZ(coordinate); ++ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Unloading poi chunk off-main"); ++ for (int section = this.levelHeightAccessor.getMinSection(); section < this.levelHeightAccessor.getMaxSection(); ++section) { ++ long sectionPos = SectionPos.asLong(chunkX, section, chunkZ); ++ this.updateDistanceTracking(sectionPos); ++ } ++ } ++ ++ public void loadInPoiChunk(io.papermc.paper.chunk.system.poi.PoiChunk poiChunk) { ++ int chunkX = poiChunk.chunkX; ++ int chunkZ = poiChunk.chunkZ; ++ io.papermc.paper.util.TickThread.ensureTickThread(this.world, chunkX, chunkZ, "Loading poi chunk off-main"); ++ for (int sectionY = this.levelHeightAccessor.getMinSection(); sectionY < this.levelHeightAccessor.getMaxSection(); ++sectionY) { ++ PoiSection section = poiChunk.getSection(sectionY); ++ if (section != null && !section.isEmpty()) { ++ this.onSectionLoad(SectionPos.asLong(chunkX, sectionY, chunkZ)); ++ } ++ } ++ } ++ // Paper end - rewrite chunk system ++ + public void checkConsistencyWithBlocks(SectionPos sectionPos, LevelChunkSection chunkSection) { + Util.ifElse(this.getOrLoad(sectionPos.asLong()), (poiSet) -> { + poiSet.refresh((populator) -> { +@@ -248,7 +350,7 @@ public class PoiManager extends SectionStorage { + }).map((pair) -> { + return pair.getFirst().chunk(); + }).filter((chunkPos) -> { +- return this.loadedChunks.add(chunkPos.toLong()); ++ return true; // Paper - rewrite chunk system + }).forEach((chunkPos) -> { + world.getChunk(chunkPos.x, chunkPos.z, ChunkStatus.EMPTY); + }); +@@ -264,7 +366,7 @@ public class PoiManager extends SectionStorage { + + @Override + protected int getLevelFromSource(long id) { +- return PoiManager.this.isVillageCenter(id) ? 0 : 7; ++ return PoiManager.this.isVillageCenter(id) ? 0 : 7; // Paper - rewrite chunk system - diff on change, this specifies the source level to use for distance tracking + } + + @Override +@@ -287,6 +389,35 @@ public class PoiManager extends SectionStorage { + } + } + ++ // Paper start - Asynchronous chunk io ++ @javax.annotation.Nullable ++ @Override ++ public net.minecraft.nbt.CompoundTag read(ChunkPos chunkcoordintpair) throws java.io.IOException { ++ // Paper start - rewrite chunk system ++ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) { ++ return io.papermc.paper.chunk.system.io.RegionFileIOThread.loadData( ++ this.world, chunkcoordintpair.x, chunkcoordintpair.z, io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA, ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.getIOBlockingPriorityForCurrentThread() ++ ); ++ } ++ // Paper end - rewrite chunk system ++ return super.read(chunkcoordintpair); ++ } ++ ++ @Override ++ public void write(ChunkPos chunkcoordintpair, net.minecraft.nbt.CompoundTag nbttagcompound) throws java.io.IOException { ++ // Paper start - rewrite chunk system ++ if (!io.papermc.paper.chunk.system.io.RegionFileIOThread.isRegionFileThread()) { ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.scheduleSave( ++ this.world, chunkcoordintpair.x, chunkcoordintpair.z, nbttagcompound, ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.RegionFileType.POI_DATA); ++ return; ++ } ++ // Paper end - rewrite chunk system ++ super.write(chunkcoordintpair, nbttagcompound); ++ } ++ // Paper end ++ + public static enum Occupancy { + HAS_SPACE(PoiRecord::hasSpace), + IS_OCCUPIED(PoiRecord::isOccupied), +diff --git a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiSection.java b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiSection.java +index 795a02941d7cecb58ec45b5e79c8d510ff21163a..3fc17817906876e83f040f908b8b1ba6cfa37b8b 100644 +--- a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiSection.java ++++ b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiSection.java +@@ -29,6 +29,7 @@ public class PoiSection { + private final Map, Set> byType = Maps.newHashMap(); + private final Runnable setDirty; + private boolean isValid; ++ public final Optional noAllocateOptional = Optional.of(this); // Paper - rewrite chunk system + + public static Codec codec(Runnable updateListener) { + return RecordCodecBuilder.create((instance) -> { +@@ -46,6 +47,12 @@ public class PoiSection { + this(updateListener, true, ImmutableList.of()); + } + ++ // Paper start - isEmpty ++ public boolean isEmpty() { ++ return this.isValid && this.records.isEmpty() && this.byType.isEmpty(); ++ } ++ // Paper end ++ + private PoiSection(Runnable updateListener, boolean valid, List pois) { + this.setDirty = updateListener; + this.isValid = valid; +diff --git a/src/main/java/net/minecraft/world/level/EntityGetter.java b/src/main/java/net/minecraft/world/level/EntityGetter.java +index 127c4ebedb94631ceac92dbdcd465e904217d715..be6e3e21ad62da01e5e2dd78e300cbc8efdbeb42 100644 +--- a/src/main/java/net/minecraft/world/level/EntityGetter.java ++++ b/src/main/java/net/minecraft/world/level/EntityGetter.java +@@ -18,6 +18,18 @@ import net.minecraft.world.phys.shapes.Shapes; + import net.minecraft.world.phys.shapes.VoxelShape; + + public interface EntityGetter { ++ ++ // Paper start ++ List getHardCollidingEntities(Entity except, AABB box, Predicate predicate); ++ ++ void getEntities(Entity except, AABB box, Predicate predicate, List into); ++ ++ void getHardCollidingEntities(Entity except, AABB box, Predicate predicate, List into); ++ ++ void getEntitiesByClass(Class clazz, Entity except, final AABB box, List into, ++ Predicate predicate); ++ // Paper end ++ + List getEntities(@Nullable Entity except, AABB box, Predicate predicate); + + List getEntities(EntityTypeTest filter, AABB box, Predicate predicate); +diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java +index d87f02c748fe2e5b4ea251f6691e8907a152cb6d..0276c32ef8323bcf82eb3400bb003a93b8a56de0 100644 +--- a/src/main/java/net/minecraft/world/level/Level.java ++++ b/src/main/java/net/minecraft/world/level/Level.java +@@ -453,6 +453,11 @@ public abstract class Level implements LevelAccessor, AutoCloseable { + + if ((i & 2) != 0 && (!this.isClientSide || (i & 4) == 0) && (this.isClientSide || chunk == null || (chunk.getFullStatus() != null && chunk.getFullStatus().isOrAfter(FullChunkStatus.BLOCK_TICKING)))) { // allow chunk to be null here as chunk.isReady() is false when we send our notification during block placement + this.sendBlockUpdated(blockposition, iblockdata1, iblockdata, i); ++ // Paper start - per player view distance - allow block updates for non-ticking chunks in player view distance ++ // if copied from above ++ } else if ((i & 2) != 0 && (!this.isClientSide || (i & 4) == 0) && (this.isClientSide || chunk == null || ((ServerLevel)this).getChunkSource().chunkMap.playerChunkManager.broadcastMap.getObjectsInRange(io.papermc.paper.util.MCUtil.getCoordinateKey(blockposition)) != null)) { // Paper - replace old player chunk management ++ ((ServerLevel)this).getChunkSource().blockChanged(blockposition); ++ // Paper end - per player view distance + } + + if ((i & 1) != 0) { +@@ -805,7 +810,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { + return this.capturedTileEntities.get(blockposition); + } + // CraftBukkit end +- return this.isOutsideBuildHeight(blockposition) ? null : (!this.isClientSide && Thread.currentThread() != this.thread ? null : this.getChunkAt(blockposition).getBlockEntity(blockposition, LevelChunk.EntityCreationType.IMMEDIATE)); ++ return this.isOutsideBuildHeight(blockposition) ? null : (!this.isClientSide && !io.papermc.paper.util.TickThread.isTickThread() ? null : this.getChunkAt(blockposition).getBlockEntity(blockposition, LevelChunk.EntityCreationType.IMMEDIATE)); // Paper - rewrite chunk system + } + + public void setBlockEntity(BlockEntity blockEntity) { +@@ -896,26 +901,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { + public List getEntities(@Nullable Entity except, AABB box, Predicate predicate) { + this.getProfiler().incrementCounter("getEntities"); + List list = Lists.newArrayList(); +- +- this.getEntities().get(box, (entity1) -> { +- if (entity1 != except && predicate.test(entity1)) { +- list.add(entity1); +- } +- +- if (entity1 instanceof EnderDragon) { +- EnderDragonPart[] aentitycomplexpart = ((EnderDragon) entity1).getSubEntities(); +- int i = aentitycomplexpart.length; +- +- for (int j = 0; j < i; ++j) { +- EnderDragonPart entitycomplexpart = aentitycomplexpart[j]; +- +- if (entity1 != except && predicate.test(entitycomplexpart)) { +- list.add(entitycomplexpart); +- } +- } +- } +- +- }); ++ ((ServerLevel)this).getEntityLookup().getEntities(except, box, list, predicate); // Paper - optimise this call + return list; + } + +@@ -933,34 +919,23 @@ public abstract class Level implements LevelAccessor, AutoCloseable { + + public void getEntities(EntityTypeTest filter, AABB box, Predicate predicate, List result, int limit) { + this.getProfiler().incrementCounter("getEntities"); +- this.getEntities().get(filter, box, (entity) -> { +- if (predicate.test(entity)) { +- result.add(entity); +- if (result.size() >= limit) { +- return AbortableIterationConsumer.Continuation.ABORT; +- } +- } +- +- if (entity instanceof EnderDragon) { +- EnderDragon entityenderdragon = (EnderDragon) entity; +- EnderDragonPart[] aentitycomplexpart = entityenderdragon.getSubEntities(); +- int j = aentitycomplexpart.length; +- +- for (int k = 0; k < j; ++k) { +- EnderDragonPart entitycomplexpart = aentitycomplexpart[k]; +- T t0 = filter.tryCast(entitycomplexpart); // CraftBukkit - decompile error +- +- if (t0 != null && predicate.test(t0)) { +- result.add(t0); +- if (result.size() >= limit) { +- return AbortableIterationConsumer.Continuation.ABORT; +- } +- } +- } ++ // Paper start - optimise this call ++ //TODO use limit ++ if (filter instanceof net.minecraft.world.entity.EntityType entityTypeTest) { ++ ((ServerLevel) this).getEntityLookup().getEntities(entityTypeTest, box, result, predicate); ++ } else { ++ Predicate test = (obj) -> { ++ return filter.tryCast(obj) != null; ++ }; ++ predicate = predicate == null ? test : test.and((Predicate) predicate); ++ Class base; ++ if (filter == null || (base = filter.getBaseClass()) == null || base == Entity.class) { ++ ((ServerLevel) this).getEntityLookup().getEntities((Entity) null, box, (List) result, (Predicate)predicate); ++ } else { ++ ((ServerLevel) this).getEntityLookup().getEntities(base, null, box, (List) result, (Predicate)predicate); // Paper - optimise this call + } +- +- return AbortableIterationConsumer.Continuation.CONTINUE; +- }); ++ } ++ // Paper end - optimise this call + } + + @Nullable +@@ -1230,4 +1205,45 @@ public abstract class Level implements LevelAccessor, AutoCloseable { + + private ExplosionInteraction() {} + } ++ // Paper start ++ //protected final io.papermc.paper.world.EntitySliceManager entitySliceManager; // Paper - rewrite chunk system ++ ++ public org.bukkit.entity.Entity[] getChunkEntities(int chunkX, int chunkZ) { ++ io.papermc.paper.world.ChunkEntitySlices slices = ((ServerLevel)this).getEntityLookup().getChunk(chunkX, chunkZ); ++ if (slices == null) { ++ return new org.bukkit.entity.Entity[0]; ++ } ++ return slices.getChunkEntities(); ++ } ++ ++ @Override ++ public List getHardCollidingEntities(Entity except, AABB box, Predicate predicate) { ++ List ret = new java.util.ArrayList<>(); ++ ((ServerLevel)this).getEntityLookup().getHardCollidingEntities(except, box, ret, predicate); ++ return ret; ++ } ++ ++ @Override ++ public void getEntities(Entity except, AABB box, Predicate predicate, List into) { ++ ((ServerLevel)this).getEntityLookup().getEntities(except, box, into, predicate); ++ } ++ ++ @Override ++ public void getHardCollidingEntities(Entity except, AABB box, Predicate predicate, List into) { ++ ((ServerLevel)this).getEntityLookup().getHardCollidingEntities(except, box, into, predicate); ++ } ++ ++ @Override ++ public void getEntitiesByClass(Class clazz, Entity except, final AABB box, List into, ++ Predicate predicate) { ++ ((ServerLevel)this).getEntityLookup().getEntities((Class)clazz, except, box, (List)into, (Predicate)predicate); ++ } ++ ++ @Override ++ public List getEntitiesOfClass(Class entityClass, AABB box, Predicate predicate) { ++ List ret = new java.util.ArrayList<>(); ++ ((ServerLevel)this).getEntityLookup().getEntities(entityClass, null, box, ret, predicate); ++ return ret; ++ } ++ // Paper end + } +diff --git a/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java b/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java +index 8bb14230beb4b012f38997eec70934b96bae4db5..5ca3987683f7cecbce24bac434dc387bb5e9bf08 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java ++++ b/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java +@@ -114,7 +114,7 @@ public abstract class ChunkGenerator { + return CompletableFuture.supplyAsync(Util.wrapThreadWithTaskName("init_biomes", () -> { + chunk.fillBiomesFromNoise(this.biomeSource, noiseConfig.sampler()); + return chunk; +- }), Util.backgroundExecutor()); ++ }), executor); // Paper - run with supplied executor + } + + public abstract void applyCarvers(WorldGenRegion chunkRegion, long seed, RandomState noiseConfig, BiomeManager biomeAccess, StructureManager structureAccessor, ChunkAccess chunk, GenerationStep.Carving carverStep); +diff --git a/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java b/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java +index fb5a06a908d2b42bf0530b62ed648548499d9f87..ec55711e912fe6cb8f797c0b21bcef273966a47a 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java ++++ b/src/main/java/net/minecraft/world/level/chunk/ChunkStatus.java +@@ -30,6 +30,30 @@ import net.minecraft.world.level.levelgen.structure.templatesystem.StructureTemp + + public class ChunkStatus { + ++ // Paper start - rewrite chunk system ++ public boolean isParallelCapable; // Paper ++ public int writeRadius = -1; ++ public int loadRange = 0; ++ ++ protected static final java.util.List statuses = new java.util.ArrayList<>(); ++ ++ private ChunkStatus nextStatus; ++ ++ public final ChunkStatus getNextStatus() { ++ return this.nextStatus; ++ } ++ ++ public final boolean isEmptyLoadStatus() { ++ return this.loadingTask == PASSTHROUGH_LOAD_TASK; ++ } ++ ++ public final boolean isEmptyGenStatus() { ++ return this == ChunkStatus.EMPTY; ++ } ++ ++ public final java.util.concurrent.atomic.AtomicBoolean warnedAboutNoImmediateComplete = new java.util.concurrent.atomic.AtomicBoolean(); ++ // Paper end - rewrite chunk system ++ + public static final int MAX_STRUCTURE_DISTANCE = 8; + private static final EnumSet PRE_FEATURES = EnumSet.of(Heightmap.Types.OCEAN_FLOOR_WG, Heightmap.Types.WORLD_SURFACE_WG); + public static final EnumSet POST_FEATURES = EnumSet.of(Heightmap.Types.OCEAN_FLOOR, Heightmap.Types.WORLD_SURFACE, Heightmap.Types.MOTION_BLOCKING, Heightmap.Types.MOTION_BLOCKING_NO_LEAVES); +@@ -151,13 +175,13 @@ public class ChunkStatus { + ((ProtoChunk) chunk).setLightEngine(lightingProvider); + boolean flag = ChunkStatus.isLighted(chunk); + +- return lightingProvider.initializeLight(chunk, flag).thenApply(Either::left); ++ return CompletableFuture.completedFuture(Either.left(chunk)); // Paper - rewrite chunk system + } + + private static CompletableFuture> lightChunk(ThreadedLevelLightEngine lightingProvider, ChunkAccess chunk) { + boolean flag = ChunkStatus.isLighted(chunk); + +- return lightingProvider.lightChunk(chunk, flag).thenApply(Either::left); ++ return CompletableFuture.completedFuture(Either.left(chunk)); // Paper - rewrite chunk system + } + + private static ChunkStatus registerSimple(String id, @Nullable ChunkStatus previous, int taskMargin, EnumSet heightMapTypes, ChunkStatus.ChunkType chunkType, ChunkStatus.SimpleGenerationTask task) { +@@ -211,6 +235,13 @@ public class ChunkStatus { + this.chunkType = chunkType; + this.heightmapsAfter = heightMapTypes; + this.index = previous == null ? 0 : previous.getIndex() + 1; ++ // Paper start ++ this.nextStatus = this; ++ if (statuses.size() > 0) { ++ statuses.get(statuses.size() - 1).nextStatus = this; ++ } ++ statuses.add(this); ++ // Paper end + } + + public int getIndex() { +diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java +index 09999a3f523ce6d652799215d3418284a69042c1..41754f6c8162df07e2b22f4ffcacd8b6158a864e 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java ++++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java +@@ -172,6 +172,43 @@ public class LevelChunk extends ChunkAccess { + + protected void onNeighbourChange(final long bitsetBefore, final long bitsetAfter) { + ++ // Paper start - no-tick view distance ++ ServerChunkCache chunkProviderServer = ((ServerLevel)this.level).getChunkSource(); ++ net.minecraft.server.level.ChunkMap chunkMap = chunkProviderServer.chunkMap; ++ // this code handles the addition of ticking tickets - the distance map handles the removal ++ if (!areNeighboursLoaded(bitsetBefore, 2) && areNeighboursLoaded(bitsetAfter, 2)) { ++ if (chunkMap.playerChunkManager.tickMap.getObjectsInRange(this.coordinateKey) != null) { // Paper - replace old player chunk loading system ++ // now we're ready for entity ticking ++ chunkProviderServer.mainThreadProcessor.execute(() -> { ++ // double check that this condition still holds. ++ if (LevelChunk.this.areNeighboursLoaded(2) && chunkMap.playerChunkManager.tickMap.getObjectsInRange(LevelChunk.this.coordinateKey) != null) { // Paper - replace old player chunk loading system ++ chunkMap.playerChunkManager.onChunkPlayerTickReady(this.chunkPos.x, this.chunkPos.z); // Paper - replace old player chunk ++ chunkProviderServer.addTicketAtLevel(net.minecraft.server.level.TicketType.PLAYER, LevelChunk.this.chunkPos, 31, LevelChunk.this.chunkPos); // 31 -> entity ticking, TODO check on update ++ } ++ }); ++ } ++ } ++ ++ // this code handles the chunk sending ++ if (!areNeighboursLoaded(bitsetBefore, 1) && areNeighboursLoaded(bitsetAfter, 1)) { ++ // Paper start - replace old player chunk loading system ++ if (chunkMap.playerChunkManager.isChunkNearPlayers(this.chunkPos.x, this.chunkPos.z)) { ++ // the post processing is expensive, so we don't want to run it unless we're actually near ++ // a player. ++ chunkProviderServer.mainThreadProcessor.execute(() -> { ++ if (!LevelChunk.this.areNeighboursLoaded(1)) { ++ return; ++ } ++ LevelChunk.this.postProcessGeneration(); ++ if (!LevelChunk.this.areNeighboursLoaded(1)) { ++ return; ++ } ++ chunkMap.playerChunkManager.onChunkSendReady(this.chunkPos.x, this.chunkPos.z); ++ }); ++ } ++ // Paper end - replace old player chunk loading system ++ } ++ // Paper end - no-tick view distance + } + + public final boolean isAnyNeighborsLoaded() { +@@ -661,9 +698,26 @@ public class LevelChunk extends ChunkAccess { + + } + +- // CraftBukkit start +- public void loadCallback() { +- // Paper start - neighbour cache ++ // Paper start - new load callbacks ++ private io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder; ++ public io.papermc.paper.chunk.system.scheduling.NewChunkHolder getChunkHolder() { ++ return this.chunkHolder; ++ } ++ ++ public void setChunkHolder(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) { ++ if (chunkHolder == null) { ++ throw new NullPointerException("Chunkholder cannot be null"); ++ } ++ if (this.chunkHolder != null) { ++ throw new IllegalStateException("Already have chunkholder: " + this.chunkHolder + ", cannot replace with " + chunkHolder); ++ } ++ this.chunkHolder = chunkHolder; ++ this.playerChunk = chunkHolder.vanillaChunkHolder; ++ } ++ ++ /* Note: We skip the light neighbour chunk loading done for the vanilla full chunk */ ++ /* Starlight does not need these chunks for lighting purposes because of edge checks */ ++ public void pushChunkIntoLoadedMap() { + int chunkX = this.chunkPos.x; + int chunkZ = this.chunkPos.z; + net.minecraft.server.level.ServerChunkCache chunkProvider = this.level.getChunkSource(); +@@ -678,10 +732,56 @@ public class LevelChunk extends ChunkAccess { + } + } + this.setNeighbourLoaded(0, 0, this); ++ this.level.getChunkSource().addLoadedChunk(this); ++ } ++ ++ public void onChunkLoad(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) { ++ // figure out how this should interface with: ++ // the entity chunk load event // -> moved to the FULL status ++ // the chunk load event // -> stays here ++ // any entity add to world events // -> in FULL status ++ this.loadCallback(); ++ io.papermc.paper.chunk.system.ChunkSystem.onChunkBorder(this, chunkHolder.vanillaChunkHolder); ++ } ++ ++ public void onChunkUnload(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) { ++ // figure out how this should interface with: ++ // the entity chunk load event // -> moved to chunk unload to disk (not written yet) ++ // the chunk load event // -> stays here ++ // any entity add to world events // -> goes into the unload logic, it will completely explode ++ // etc later ++ this.unloadCallback(); ++ io.papermc.paper.chunk.system.ChunkSystem.onChunkNotBorder(this, chunkHolder.vanillaChunkHolder); ++ } ++ ++ public void onChunkTicking(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) { ++ this.postProcessGeneration(); ++ this.level.startTickingChunk(this); ++ io.papermc.paper.chunk.system.ChunkSystem.onChunkTicking(this, chunkHolder.vanillaChunkHolder); ++ } ++ ++ public void onChunkNotTicking(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) { ++ io.papermc.paper.chunk.system.ChunkSystem.onChunkNotTicking(this, chunkHolder.vanillaChunkHolder); ++ } ++ ++ public void onChunkEntityTicking(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) { ++ io.papermc.paper.chunk.system.ChunkSystem.onChunkEntityTicking(this, chunkHolder.vanillaChunkHolder); ++ } ++ ++ public void onChunkNotEntityTicking(io.papermc.paper.chunk.system.scheduling.NewChunkHolder chunkHolder) { ++ io.papermc.paper.chunk.system.ChunkSystem.onChunkNotEntityTicking(this, chunkHolder.vanillaChunkHolder); ++ } ++ // Paper end - new load callbacks ++ ++ // CraftBukkit start ++ public void loadCallback() { ++ if (this.loadedTicketLevel) { LOGGER.error("Double calling chunk load!", new Throwable()); } // Paper ++ // Paper - rewrite chunk system - move into separate callback + this.loadedTicketLevel = true; +- // Paper end - neighbour cache ++ // Paper - rewrite chunk system - move into separate callback + org.bukkit.Server server = this.level.getCraftServer(); +- this.level.getChunkSource().addLoadedChunk(this); // Paper ++ // Paper - rewrite chunk system - move into separate callback ++ ((ServerLevel)this.level).getChunkSource().chunkMap.playerChunkManager.onChunkLoad(this.chunkPos.x, this.chunkPos.z); // Paper - rewrite player chunk management + if (server != null) { + /* + * If it's a new world, the first few chunks are generated inside +@@ -690,6 +790,7 @@ public class LevelChunk extends ChunkAccess { + */ + org.bukkit.Chunk bukkitChunk = new org.bukkit.craftbukkit.CraftChunk(this); + server.getPluginManager().callEvent(new org.bukkit.event.world.ChunkLoadEvent(bukkitChunk, this.needsDecoration)); ++ this.chunkHolder.getEntityChunk().callEntitiesLoadEvent(); // Paper - rewrite chunk system + + if (this.needsDecoration) { + try (co.aikar.timings.Timing ignored = this.level.timings.chunkLoadPopulate.startTiming()) { // Paper +@@ -718,9 +819,11 @@ public class LevelChunk extends ChunkAccess { + } + + public void unloadCallback() { ++ if (!this.loadedTicketLevel) { LOGGER.error("Double calling chunk unload!", new Throwable()); } // Paper + org.bukkit.Server server = this.level.getCraftServer(); ++ this.chunkHolder.getEntityChunk().callEntitiesUnloadEvent(); // Paper - rewrite chunk system + org.bukkit.Chunk bukkitChunk = new org.bukkit.craftbukkit.CraftChunk(this); +- org.bukkit.event.world.ChunkUnloadEvent unloadEvent = new org.bukkit.event.world.ChunkUnloadEvent(bukkitChunk, this.isUnsaved()); ++ org.bukkit.event.world.ChunkUnloadEvent unloadEvent = new org.bukkit.event.world.ChunkUnloadEvent(bukkitChunk, true); // Paper - rewrite chunk system - force save to true so that mustNotSave is correctly set below + server.getPluginManager().callEvent(unloadEvent); + // note: saving can be prevented, but not forced if no saving is actually required + this.mustNotSave = !unloadEvent.isSaveChunk(); +@@ -742,9 +845,26 @@ public class LevelChunk extends ChunkAccess { + // Paper end + } + ++ // Paper start - add dirty system to tick lists ++ @Override ++ public void setUnsaved(boolean needsSaving) { ++ if (!needsSaving) { ++ this.blockTicks.clearDirty(); ++ this.fluidTicks.clearDirty(); ++ } ++ super.setUnsaved(needsSaving); ++ } ++ // Paper end - add dirty system to tick lists ++ + @Override + public boolean isUnsaved() { +- return super.isUnsaved() && !this.mustNotSave; ++ // Paper start - add dirty system to tick lists ++ long gameTime = this.level.getLevelData().getGameTime(); ++ if (this.blockTicks.isDirty(gameTime) || this.fluidTicks.isDirty(gameTime)) { ++ return true; ++ } ++ // Paper end - add dirty system to tick lists ++ return super.isUnsaved(); // Paper - rewrite chunk system - do NOT clobber the dirty flag + } + // CraftBukkit end + +@@ -813,7 +933,9 @@ public class LevelChunk extends ChunkAccess { + return this.blockEntities; + } + ++ public boolean isPostProcessingDone; // Paper - replace chunk loader system + public void postProcessGeneration() { ++ try { // Paper - replace chunk loader system + ChunkPos chunkcoordintpair = this.getPos(); + + for (int i = 0; i < this.postProcessing.length; ++i) { +@@ -851,6 +973,11 @@ public class LevelChunk extends ChunkAccess { + + this.pendingBlockEntities.clear(); + this.upgradeData.upgrade(this); ++ } finally { // Paper start - replace chunk loader system ++ this.isPostProcessingDone = true; ++ this.level.getChunkSource().chunkMap.playerChunkManager.onChunkPostProcessing(this.chunkPos.x, this.chunkPos.z); ++ } ++ // Paper end - replace chunk loader system + } + + @Nullable +@@ -900,7 +1027,7 @@ public class LevelChunk extends ChunkAccess { + } + + public FullChunkStatus getFullStatus() { +- return this.fullStatus == null ? FullChunkStatus.FULL : (FullChunkStatus) this.fullStatus.get(); ++ return this.chunkHolder == null ? FullChunkStaatus.INACCESSIBLE : this.chunkHolder.getChunkStatus(); // Paper - rewrite chunk system + } + + public void setFullStatus(Supplier levelTypeProvider) { +diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java +index 24892ae367e19038625e243bcdf1bb694632ede5..55da32077d1db81ba197da0be5896da694f4bfa9 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java ++++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java +@@ -94,7 +94,31 @@ public class ChunkSerializer { + + public ChunkSerializer() {} + ++ // Paper start ++ public static final class InProgressChunkHolder { ++ ++ public final ProtoChunk protoChunk; ++ public final java.util.ArrayDeque tasks; ++ ++ public CompoundTag poiData; ++ ++ public InProgressChunkHolder(final ProtoChunk protoChunk, final java.util.ArrayDeque tasks) { ++ this.protoChunk = protoChunk; ++ this.tasks = tasks; ++ } ++ } ++ // Paper end ++ + public static ProtoChunk read(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt) { ++ // Paper start - add variant for async calls ++ InProgressChunkHolder holder = loadChunk(world, poiStorage, chunkPos, nbt, true); ++ holder.tasks.forEach(Runnable::run); ++ return holder.protoChunk; ++ } ++ ++ public static InProgressChunkHolder loadChunk(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt, boolean distinguish) { ++ java.util.ArrayDeque tasksToExecuteOnMain = new java.util.ArrayDeque<>(); ++ // Paper end + ChunkPos chunkcoordintpair1 = new ChunkPos(nbt.getInt("xPos"), nbt.getInt("zPos")); + + if (!Objects.equals(chunkPos, chunkcoordintpair1)) { +@@ -160,7 +184,9 @@ public class ChunkSerializer { + achunksection[k] = chunksection; + SectionPos sectionposition = SectionPos.of(chunkPos, b0); + ++ tasksToExecuteOnMain.add(() -> { // Paper - delay this task since we're executing off-main + poiStorage.checkConsistencyWithBlocks(sectionposition, chunksection); ++ }); // Paper - delay this task since we're executing off-main + } + + boolean flag3 = nbttagcompound1.contains("BlockLight", 7); +@@ -306,7 +332,7 @@ public class ChunkSerializer { + } + + if (chunkstatus_type == ChunkStatus.ChunkType.LEVELCHUNK) { +- return new ImposterProtoChunk((LevelChunk) object1, false); ++ return new InProgressChunkHolder(new ImposterProtoChunk((LevelChunk) object1, false), tasksToExecuteOnMain); // Paper - Async chunk loading + } else { + ProtoChunk protochunk1 = (ProtoChunk) object1; + +@@ -334,9 +360,41 @@ public class ChunkSerializer { + protochunk1.setCarvingMask(worldgenstage_features, new CarvingMask(nbttagcompound5.getLongArray(s1), ((ChunkAccess) object1).getMinBuildHeight())); + } + +- return protochunk1; ++ return new InProgressChunkHolder(protochunk1, tasksToExecuteOnMain); // Paper - Async chunk loading ++ } ++ } ++ ++ // Paper start - async chunk save for unload ++ public record AsyncSaveData( ++ Tag blockTickList, // non-null if we had to go to the server's tick list ++ Tag fluidTickList, // non-null if we had to go to the server's tick list ++ ListTag blockEntities, ++ long worldTime ++ ) {} ++ ++ // must be called sync ++ public static AsyncSaveData getAsyncSaveData(ServerLevel world, ChunkAccess chunk) { ++ org.spigotmc.AsyncCatcher.catchOp("preparation of chunk data for async save"); ++ ++ final CompoundTag tickLists = new CompoundTag(); ++ ChunkSerializer.saveTicks(world, tickLists, chunk.getTicksForSerialization()); ++ ++ ListTag blockEntitiesSerialized = new ListTag(); ++ for (final BlockPos blockPos : chunk.getBlockEntitiesPos()) { ++ final CompoundTag blockEntityNbt = chunk.getBlockEntityNbtForSaving(blockPos); ++ if (blockEntityNbt != null) { ++ blockEntitiesSerialized.add(blockEntityNbt); ++ } + } ++ ++ return new AsyncSaveData( ++ tickLists.get(BLOCK_TICKS_TAG), ++ tickLists.get(FLUID_TICKS_TAG), ++ blockEntitiesSerialized, ++ world.getGameTime() ++ ); + } ++ // Paper end + + private static void logErrors(ChunkPos chunkPos, int y, String message) { + ChunkSerializer.LOGGER.error("Recoverable errors when loading section [" + chunkPos.x + ", " + y + ", " + chunkPos.z + "]: " + message); +@@ -353,6 +411,11 @@ public class ChunkSerializer { + // CraftBukkit end + + public static CompoundTag write(ServerLevel world, ChunkAccess chunk) { ++ // Paper start ++ return saveChunk(world, chunk, null); ++ } ++ public static CompoundTag saveChunk(ServerLevel world, ChunkAccess chunk, @org.checkerframework.checker.nullness.qual.Nullable AsyncSaveData asyncsavedata) { ++ // Paper end + // Paper start - rewrite light impl + final int minSection = io.papermc.paper.util.WorldUtil.getMinLightSection(world); + final int maxSection = io.papermc.paper.util.WorldUtil.getMaxLightSection(world); +@@ -365,7 +428,7 @@ public class ChunkSerializer { + nbttagcompound.putInt("xPos", chunkcoordintpair.x); + nbttagcompound.putInt("yPos", chunk.getMinSection()); + nbttagcompound.putInt("zPos", chunkcoordintpair.z); +- nbttagcompound.putLong("LastUpdate", world.getGameTime()); ++ nbttagcompound.putLong("LastUpdate", asyncsavedata != null ? asyncsavedata.worldTime : world.getGameTime()); // Paper - async chunk unloading + nbttagcompound.putLong("InhabitedTime", chunk.getInhabitedTime()); + nbttagcompound.putString("Status", BuiltInRegistries.CHUNK_STATUS.getKey(chunk.getStatus()).toString()); + BlendingData blendingdata = chunk.getBlendingData(); +@@ -465,8 +528,17 @@ public class ChunkSerializer { + nbttagcompound.putBoolean("isLightOn", false); // Paper - set to false but still store, this allows us to detect --eraseCache (as eraseCache _removes_) + } + +- ListTag nbttaglist1 = new ListTag(); +- Iterator iterator = chunk.getBlockEntitiesPos().iterator(); ++ // Paper start ++ ListTag nbttaglist1; ++ Iterator iterator; ++ if (asyncsavedata != null) { ++ nbttaglist1 = asyncsavedata.blockEntities; ++ iterator = java.util.Collections.emptyIterator(); ++ } else { ++ nbttaglist1 = new ListTag(); ++ iterator = chunk.getBlockEntitiesPos().iterator(); ++ } ++ // Paper end + + CompoundTag nbttagcompound2; + +@@ -502,7 +574,14 @@ public class ChunkSerializer { + nbttagcompound.put("CarvingMasks", nbttagcompound2); + } + ++ // Paper start ++ if (asyncsavedata != null) { ++ nbttagcompound.put(BLOCK_TICKS_TAG, asyncsavedata.blockTickList); ++ nbttagcompound.put(FLUID_TICKS_TAG, asyncsavedata.fluidTickList); ++ } else { + ChunkSerializer.saveTicks(world, nbttagcompound, chunk.getTicksForSerialization()); ++ } ++ // Paper end + nbttagcompound.put("PostProcessing", ChunkSerializer.packOffsets(chunk.getPostProcessing())); + CompoundTag nbttagcompound3 = new CompoundTag(); + Iterator iterator1 = chunk.getHeightmaps().iterator(); +diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java +index 6c0f87535ffa95cf82ab4b03bb7bf8f2132d275f..b4e7c9b317d532d4915932f8f79dfebf2b63ff16 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java ++++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java +@@ -28,26 +28,33 @@ import net.minecraft.world.level.storage.DimensionDataStorage; + public class ChunkStorage implements AutoCloseable { + + public static final int LAST_MONOLYTH_STRUCTURE_DATA_VERSION = 1493; +- private final IOWorker worker; ++ // Paper - nuke IO worker + protected final DataFixer fixerUpper; + @Nullable + private volatile LegacyStructureDataHandler legacyStructureHandler; ++ // Paper start - async chunk loading ++ private final Object persistentDataLock = new Object(); // Paper ++ public final RegionFileStorage regionFileCache; ++ // Paper end - async chunk loading + + public ChunkStorage(Path directory, DataFixer dataFixer, boolean dsync) { + this.fixerUpper = dataFixer; +- this.worker = new IOWorker(directory, dsync, "chunk"); ++ // Paper start - async chunk io ++ // remove IO worker ++ this.regionFileCache = new RegionFileStorage(directory, dsync); // Paper - nuke IOWorker ++ // Paper end - async chunk io + } + + public boolean isOldChunkAround(ChunkPos chunkPos, int checkRadius) { +- return this.worker.isOldChunkAround(chunkPos, checkRadius); ++ return true; // Paper - (for now, old unoptimised behavior) TODO implement later? the chunk status that blender uses SHOULD already have this radius loaded, no need to go back for it... + } + + // CraftBukkit start + private boolean check(ServerChunkCache cps, int x, int z) { + ChunkPos pos = new ChunkPos(x, z); + if (cps != null) { +- com.google.common.base.Preconditions.checkState(org.bukkit.Bukkit.isPrimaryThread(), "primary thread"); +- if (cps.hasChunk(x, z)) { ++ //com.google.common.base.Preconditions.checkState(org.bukkit.Bukkit.isPrimaryThread(), "primary thread"); // Paper - this function is now MT-Safe ++ if (cps.getChunkAtIfCachedImmediately(x, z) != null) { // Paper - isLoaded is a ticket level check, not a chunk loaded check! + return true; + } + } +@@ -75,6 +82,7 @@ public class ChunkStorage implements AutoCloseable { + + public CompoundTag upgradeChunkTag(ResourceKey resourcekey, Supplier supplier, CompoundTag nbttagcompound, Optional>> optional, ChunkPos pos, @Nullable LevelAccessor generatoraccess) { + // CraftBukkit end ++ nbttagcompound = nbttagcompound.copy(); // Paper - defensive copy, another thread might modify this + int i = ChunkStorage.getVersion(nbttagcompound); + + // CraftBukkit start +@@ -92,9 +100,11 @@ public class ChunkStorage implements AutoCloseable { + if (i < 1493) { + ca.spottedleaf.dataconverter.minecraft.MCDataConverter.convertTag(ca.spottedleaf.dataconverter.minecraft.datatypes.MCTypeRegistry.CHUNK, nbttagcompound, i, 1493); // Paper - replace chunk converter + if (nbttagcompound.getCompound("Level").getBoolean("hasLegacyStructureData")) { ++ synchronized (this.persistentDataLock) { // Paper - Async chunk loading + LegacyStructureDataHandler persistentstructurelegacy = this.getLegacyStructureHandler(resourcekey, supplier); + + nbttagcompound = persistentstructurelegacy.updateFromLegacy(nbttagcompound); ++ } // Paper - Async chunk loading + } + } + +@@ -127,7 +137,7 @@ public class ChunkStorage implements AutoCloseable { + LegacyStructureDataHandler persistentstructurelegacy = this.legacyStructureHandler; + + if (persistentstructurelegacy == null) { +- synchronized (this) { ++ synchronized (this.persistentDataLock) { // Paper - async chunk loading + persistentstructurelegacy = this.legacyStructureHandler; + if (persistentstructurelegacy == null) { + this.legacyStructureHandler = persistentstructurelegacy = LegacyStructureDataHandler.getLegacyStructureHandler(worldKey, (DimensionDataStorage) stateManagerGetter.get()); +@@ -153,26 +163,49 @@ public class ChunkStorage implements AutoCloseable { + } + + public CompletableFuture> read(ChunkPos chunkPos) { +- return this.worker.loadAsync(chunkPos); ++ // Paper start - async chunk io ++ try { ++ return CompletableFuture.completedFuture(Optional.ofNullable(this.readSync(chunkPos))); ++ } catch (Throwable thr) { ++ return CompletableFuture.failedFuture(thr); ++ } ++ } ++ @Nullable ++ public CompoundTag readSync(ChunkPos chunkPos) throws IOException { ++ return this.regionFileCache.read(chunkPos); + } ++ // Paper end - async chunk io + +- public void write(ChunkPos chunkPos, CompoundTag nbt) { +- this.worker.store(chunkPos, nbt); ++ // Paper start - async chunk io ++ public void write(ChunkPos chunkPos, CompoundTag nbt) throws IOException { ++ this.regionFileCache.write(chunkPos, nbt); ++ // Paper end - Async chunk loading + if (this.legacyStructureHandler != null) { ++ synchronized (this.persistentDataLock) { // Paper - Async chunk loading + this.legacyStructureHandler.removeIndex(chunkPos.toLong()); ++ } // Paper - Async chunk loading + } + + } + + public void flushWorker() { +- this.worker.synchronize(true).join(); ++ io.papermc.paper.chunk.system.io.RegionFileIOThread.flush(); // Paper - rewrite chunk system + } + + public void close() throws IOException { +- this.worker.close(); ++ this.regionFileCache.close(); // Paper - nuke IO worker + } + + public ChunkScanAccess chunkScanner() { +- return this.worker; ++ // Paper start - nuke IO worker ++ return ((chunkPos, streamTagVisitor) -> { ++ try { ++ this.regionFileCache.scanChunk(chunkPos, streamTagVisitor); ++ return java.util.concurrent.CompletableFuture.completedFuture(null); ++ } catch (IOException e) { ++ throw new RuntimeException(e); ++ } ++ }); ++ // Paper end + } + } +diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java +index 98b3909b536f11eda9c481ffd74066ad0cdb0ebc..0ec0be22f7292d57c40da6f1f4575bdebf8dbd09 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java ++++ b/src/main/java/net/minecraft/world/level/chunk/storage/EntityStorage.java +@@ -30,43 +30,31 @@ public class EntityStorage implements EntityPersistentStorage { + private static final String ENTITIES_TAG = "Entities"; + private static final String POSITION_TAG = "Position"; + public final ServerLevel level; +- private final IOWorker worker; ++ // Paper - rewrite chunk system + private final LongSet emptyChunks = new LongOpenHashSet(); +- public final ProcessorMailbox entityDeserializerQueue; ++ // Paper - rewrite chunk system + protected final DataFixer fixerUpper; + + public EntityStorage(ServerLevel world, Path path, DataFixer dataFixer, boolean dsync, Executor executor) { + this.level = world; + this.fixerUpper = dataFixer; +- this.entityDeserializerQueue = ProcessorMailbox.create(executor, "entity-deserializer"); +- this.worker = new IOWorker(path, dsync, "entities"); ++ // Paper - rewrite chunk system + } + + @Override + public CompletableFuture> loadEntities(ChunkPos pos) { +- return this.emptyChunks.contains(pos.toLong()) ? CompletableFuture.completedFuture(emptyChunk(pos)) : this.worker.loadAsync(pos).thenApplyAsync((nbt) -> { +- if (nbt.isEmpty()) { +- this.emptyChunks.add(pos.toLong()); +- return emptyChunk(pos); +- } else { +- try { +- ChunkPos chunkPos2 = readChunkPos(nbt.get()); +- if (!Objects.equals(pos, chunkPos2)) { +- LOGGER.error("Chunk file at {} is in the wrong location. (Expected {}, got {})", pos, pos, chunkPos2); +- } +- } catch (Exception var6) { +- LOGGER.warn("Failed to parse chunk {} position info", pos, var6); +- } +- +- CompoundTag compoundTag = this.upgradeChunkTag(nbt.get()); +- ListTag listTag = compoundTag.getList("Entities", 10); +- List list = EntityType.loadEntitiesRecursive(listTag, this.level).collect(ImmutableList.toImmutableList()); +- return new ChunkEntities<>(pos, list); +- } +- }, this.entityDeserializerQueue::tell); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system - copy out read logic into readEntities ++ } ++ ++ // Paper start - rewrite chunk system ++ public static List readEntities(ServerLevel level, CompoundTag compoundTag) { ++ ListTag listTag = compoundTag.getList("Entities", 10); ++ List list = EntityType.loadEntitiesRecursive(listTag, level).collect(ImmutableList.toImmutableList()); ++ return list; + } ++ // Paper end - rewrite chunk system + +- private static ChunkPos readChunkPos(CompoundTag chunkNbt) { ++ public static ChunkPos readChunkPos(CompoundTag chunkNbt) { // Paper - public + int[] is = chunkNbt.getIntArray("Position"); + return new ChunkPos(is[0], is[1]); + } +@@ -81,45 +69,75 @@ public class EntityStorage implements EntityPersistentStorage { + + @Override + public void storeEntities(ChunkEntities dataList) { ++ // Paper start - rewrite chunk system ++ if (true) { ++ throw new UnsupportedOperationException(); ++ } ++ // Paper end - rewrite chunk system + ChunkPos chunkPos = dataList.getPos(); + if (dataList.isEmpty()) { + if (this.emptyChunks.add(chunkPos.toLong())) { +- this.worker.store(chunkPos, (CompoundTag)null); ++ // Paper - rewrite chunk system + } + + } else { +- ListTag listTag = new ListTag(); +- dataList.getEntities().forEach((entity) -> { +- CompoundTag compoundTag = new CompoundTag(); +- if (entity.save(compoundTag)) { +- listTag.add(compoundTag); +- } +- +- }); +- CompoundTag compoundTag = NbtUtils.addCurrentDataVersion(new CompoundTag()); +- compoundTag.put("Entities", listTag); +- writeChunkPos(compoundTag, chunkPos); +- this.worker.store(chunkPos, compoundTag).exceptionally((ex) -> { +- LOGGER.error("Failed to store chunk {}", chunkPos, ex); +- return null; +- }); ++ // Paper - move into saveEntityChunk0 + this.emptyChunks.remove(chunkPos.toLong()); + } + } + ++ // Paper start - rewrite chunk system ++ public static void copyEntities(final CompoundTag from, final CompoundTag into) { ++ if (from == null) { ++ return; ++ } ++ final ListTag entitiesFrom = from.getList("Entities", net.minecraft.nbt.Tag.TAG_COMPOUND); ++ if (entitiesFrom == null || entitiesFrom.isEmpty()) { ++ return; ++ } ++ ++ final ListTag entitiesInto = into.getList("Entities", net.minecraft.nbt.Tag.TAG_COMPOUND); ++ into.put("Entities", entitiesInto); // this is in case into doesn't have any entities ++ entitiesInto.addAll(0, entitiesFrom.copy()); // need to copy, this is coming from the save thread ++ } ++ ++ public static CompoundTag saveEntityChunk(List entities, ChunkPos chunkPos, ServerLevel level) { ++ return saveEntityChunk0(entities, chunkPos, level, false); ++ } ++ private static CompoundTag saveEntityChunk0(List entities, ChunkPos chunkPos, ServerLevel level, boolean force) { ++ if (!force && entities.isEmpty()) { ++ return null; ++ } ++ ++ ListTag listTag = new ListTag(); ++ entities.forEach((entity) -> { // diff here: use entities parameter ++ CompoundTag compoundTag = new CompoundTag(); ++ if (entity.save(compoundTag)) { ++ listTag.add(compoundTag); ++ } ++ ++ }); ++ CompoundTag compoundTag = NbtUtils.addCurrentDataVersion(new CompoundTag()); ++ compoundTag.put("Entities", listTag); ++ writeChunkPos(compoundTag, chunkPos); ++ // Paper - remove worker usage ++ ++ return !force && listTag.isEmpty() ? null : compoundTag; ++ } ++ // Paper end - rewrite chunk system ++ + @Override + public void flush(boolean sync) { +- this.worker.synchronize(sync).join(); +- this.entityDeserializerQueue.runAll(); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + +- private CompoundTag upgradeChunkTag(CompoundTag chunkNbt) { ++ public static CompoundTag upgradeChunkTag(CompoundTag chunkNbt) { // Paper - public and static + int i = NbtUtils.getDataVersion(chunkNbt, -1); + return ca.spottedleaf.dataconverter.minecraft.MCDataConverter.convertTag(ca.spottedleaf.dataconverter.minecraft.datatypes.MCTypeRegistry.ENTITY_CHUNK, chunkNbt, i, net.minecraft.SharedConstants.getCurrentVersion().getDataVersion().getVersion()); // Paper - route to new converter system + } + + @Override + public void close() throws IOException { +- this.worker.close(); ++ throw new UnsupportedOperationException(); // Paper - rewrite chunk system + } + } +diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java +index d9daf07132c46548964a75588b69d7a74680e917..e68205fe7169c7c5b7c6fdada2ee97d86107ca97 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java ++++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java +@@ -44,6 +44,7 @@ public class RegionFile implements AutoCloseable { + private final IntBuffer timestamps; + @VisibleForTesting + protected final RegionBitmap usedSectors; ++ public final java.util.concurrent.locks.ReentrantLock fileLock = new java.util.concurrent.locks.ReentrantLock(true); // Paper + + public RegionFile(Path file, Path directory, boolean dsync) throws IOException { + this(file, directory, RegionFileVersion.VERSION_DEFLATE, dsync); +@@ -228,7 +229,7 @@ public class RegionFile implements AutoCloseable { + return (byteCount + 4096 - 1) / 4096; + } + +- public boolean doesChunkExist(ChunkPos pos) { ++ public synchronized boolean doesChunkExist(ChunkPos pos) { // Paper - synchronized + int i = this.getOffset(pos); + + if (i == 0) { +@@ -393,6 +394,11 @@ public class RegionFile implements AutoCloseable { + } + + public void close() throws IOException { ++ // Paper start - Prevent regionfiles from being closed during use ++ this.fileLock.lock(); ++ synchronized (this) { ++ try { ++ // Paper end + try { + this.padToFullSector(); + } finally { +@@ -402,6 +408,10 @@ public class RegionFile implements AutoCloseable { + this.file.close(); + } + } ++ } finally { // Paper start - Prevent regionfiles from being closed during use ++ this.fileLock.unlock(); ++ } ++ } // Paper end + + } + +diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java +index b9b50c56e79297bb824a92355f437a5d4d7e6760..18ef7025f7f4dc2a4aff85ca65ff5a2d35a1ef06 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java ++++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java +@@ -24,16 +24,37 @@ public class RegionFileStorage implements AutoCloseable { + private final Path folder; + private final boolean sync; + +- RegionFileStorage(Path directory, boolean dsync) { ++ protected RegionFileStorage(Path directory, boolean dsync) { // Paper - protected constructor + this.folder = directory; + this.sync = dsync; + } + +- private RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly) throws IOException { // CraftBukkit ++ // Paper start ++ public synchronized RegionFile getRegionFileIfLoaded(ChunkPos chunkcoordintpair) { ++ return this.regionCache.getAndMoveToFirst(ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ())); ++ } ++ ++ public synchronized boolean chunkExists(ChunkPos pos) throws IOException { ++ RegionFile regionfile = getRegionFile(pos, true); ++ ++ return regionfile != null ? regionfile.hasChunk(pos) : false; ++ } ++ ++ public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly) throws IOException { // CraftBukkit ++ return this.getRegionFile(chunkcoordintpair, existingOnly, false); ++ } ++ public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly, boolean lock) throws IOException { ++ // Paper end + long i = ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()); + RegionFile regionfile = (RegionFile) this.regionCache.getAndMoveToFirst(i); + + if (regionfile != null) { ++ // Paper start ++ if (lock) { ++ // must be in this synchronized block ++ regionfile.fileLock.lock(); ++ } ++ // Paper end + return regionfile; + } else { + if (this.regionCache.size() >= 256) { +@@ -48,6 +69,12 @@ public class RegionFileStorage implements AutoCloseable { + RegionFile regionfile1 = new RegionFile(path1, this.folder, this.sync); + + this.regionCache.putAndMoveToFirst(i, regionfile1); ++ // Paper start ++ if (lock) { ++ // must be in this synchronized block ++ regionfile1.fileLock.lock(); ++ } ++ // Paper end + return regionfile1; + } + } +@@ -55,11 +82,12 @@ public class RegionFileStorage implements AutoCloseable { + @Nullable + public CompoundTag read(ChunkPos pos) throws IOException { + // CraftBukkit start - SPIGOT-5680: There's no good reason to preemptively create files on read, save that for writing +- RegionFile regionfile = this.getRegionFile(pos, true); ++ RegionFile regionfile = this.getRegionFile(pos, true, true); // Paper + if (regionfile == null) { + return null; + } + // CraftBukkit end ++ try { // Paper + DataInputStream datainputstream = regionfile.getChunkDataInputStream(pos); + + CompoundTag nbttagcompound; +@@ -96,6 +124,9 @@ public class RegionFileStorage implements AutoCloseable { + } + + return nbttagcompound; ++ } finally { // Paper start ++ regionfile.fileLock.unlock(); ++ } // Paper end + } + + public void scanChunk(ChunkPos chunkPos, StreamTagVisitor scanner) throws IOException { +@@ -130,7 +161,12 @@ public class RegionFileStorage implements AutoCloseable { + } + + protected void write(ChunkPos pos, @Nullable CompoundTag nbt) throws IOException { +- RegionFile regionfile = this.getRegionFile(pos, false); // CraftBukkit ++ RegionFile regionfile = this.getRegionFile(pos, nbt == null, true); // CraftBukkit // Paper // Paper start - rewrite chunk system ++ if (nbt == null && regionfile == null) { ++ return; ++ } ++ // Paper end - rewrite chunk system ++ try { // Paper + + if (nbt == null) { + regionfile.clear(pos); +@@ -156,9 +192,12 @@ public class RegionFileStorage implements AutoCloseable { + } + } + ++ } finally { // Paper start ++ regionfile.fileLock.unlock(); ++ } // Paper end + } + +- public void close() throws IOException { ++ public synchronized void close() throws IOException { // Paper -> synchronized + ExceptionCollector exceptionsuppressor = new ExceptionCollector<>(); + ObjectIterator objectiterator = this.regionCache.values().iterator(); + +@@ -175,7 +214,7 @@ public class RegionFileStorage implements AutoCloseable { + exceptionsuppressor.throwIfPresent(); + } + +- public void flush() throws IOException { ++ public synchronized void flush() throws IOException { // Paper - synchronize + ObjectIterator objectiterator = this.regionCache.values().iterator(); + + while (objectiterator.hasNext()) { +diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java +index b0c8a0e64c7a5d41c1b4cc1e39c4399c142b56af..0887cba39bfc4279abec21c6c316abab28beb0a3 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java ++++ b/src/main/java/net/minecraft/world/level/chunk/storage/SectionStorage.java +@@ -34,27 +34,28 @@ import net.minecraft.world.level.ChunkPos; + import net.minecraft.world.level.LevelHeightAccessor; + import org.slf4j.Logger; + +-public class SectionStorage implements AutoCloseable { ++public class SectionStorage extends RegionFileStorage implements AutoCloseable { // Paper - nuke IOWorker + private static final Logger LOGGER = LogUtils.getLogger(); + private static final String SECTIONS_TAG = "Sections"; +- private final IOWorker worker; ++ // Paper - remove mojang I/O thread + private final Long2ObjectMap> storage = new Long2ObjectOpenHashMap<>(); + private final LongLinkedOpenHashSet dirty = new LongLinkedOpenHashSet(); + private final Function> codec; + private final Function factory; + private final DataFixer fixerUpper; + private final DataFixTypes type; +- private final RegistryAccess registryAccess; ++ public final RegistryAccess registryAccess; // Paper - rewrite chunk system + protected final LevelHeightAccessor levelHeightAccessor; + + public SectionStorage(Path path, Function> codecFactory, Function factory, DataFixer dataFixer, DataFixTypes dataFixTypes, boolean dsync, RegistryAccess dynamicRegistryManager, LevelHeightAccessor world) { ++ super(path, dsync); // Paper - remove mojang I/O thread + this.codec = codecFactory; + this.factory = factory; + this.fixerUpper = dataFixer; + this.type = dataFixTypes; + this.registryAccess = dynamicRegistryManager; + this.levelHeightAccessor = world; +- this.worker = new IOWorker(path, dsync, path.getFileName().toString()); ++ // Paper - remove mojang I/O thread + } + + protected void tick(BooleanSupplier shouldKeepTicking) { +@@ -116,23 +117,21 @@ public class SectionStorage implements AutoCloseable { + } + + private void readColumn(ChunkPos pos) { +- Optional optional = this.tryRead(pos).join(); +- RegistryOps registryOps = RegistryOps.create(NbtOps.INSTANCE, this.registryAccess); +- this.readColumn(pos, registryOps, optional.orElse((CompoundTag)null)); ++ throw new IllegalStateException("Only chunk system can load in state, offending class:" + this.getClass().getName()); // Paper - rewrite chunk system + } + + private CompletableFuture> tryRead(ChunkPos pos) { +- return this.worker.loadAsync(pos).exceptionally((throwable) -> { +- if (throwable instanceof IOException iOException) { +- LOGGER.error("Error reading chunk {} data from disk", pos, iOException); +- return Optional.empty(); +- } else { +- throw new CompletionException(throwable); +- } +- }); ++ // Paper start - rewrite chunk system ++ try { ++ return CompletableFuture.completedFuture(Optional.ofNullable(this.read(pos))); ++ } catch (Throwable thr) { ++ return CompletableFuture.failedFuture(thr); ++ } ++ // Paper end - rewrite chunk system + } + + private void readColumn(ChunkPos pos, DynamicOps ops, @Nullable T data) { ++ if (true) throw new IllegalStateException("Only chunk system can load in state, offending class:" + this.getClass().getName()); // Paper - rewrite chunk system + if (data == null) { + for(int i = this.levelHeightAccessor.getMinSection(); i < this.levelHeightAccessor.getMaxSection(); ++i) { + this.storage.put(getKey(pos, i), Optional.empty()); +@@ -177,7 +176,7 @@ public class SectionStorage implements AutoCloseable { + Dynamic dynamic = this.writeColumn(pos, registryOps); + Tag tag = dynamic.getValue(); + if (tag instanceof CompoundTag) { +- this.worker.store(pos, (CompoundTag)tag); ++ try { this.write(pos, (CompoundTag)tag); } catch (IOException ioexception) { SectionStorage.LOGGER.error("Error writing data to disk", ioexception); } // Paper - nuke IOWorker + } else { + LOGGER.error("Expected compound tag, got {}", (Object)tag); + } +@@ -222,7 +221,7 @@ public class SectionStorage implements AutoCloseable { + } + + private static int getVersion(Dynamic dynamic) { +- return dynamic.get("DataVersion").asInt(1945); ++ return dynamic.get("DataVersion").asInt(1945); // Paper - diff on change, constant used in ChunkLoadTask + } + + public void flush(ChunkPos pos) { +@@ -240,6 +239,9 @@ public class SectionStorage implements AutoCloseable { + + @Override + public void close() throws IOException { +- this.worker.close(); ++ //this.worker.close(); // Paper - nuke I/O worker - don't call the worker ++ super.close(); // Paper - nuke I/O worker - call super.close method which is responsible for closing used files. + } ++ ++ // Paper - rewrite chunk system + } +diff --git a/src/main/java/net/minecraft/world/level/entity/EntityTickList.java b/src/main/java/net/minecraft/world/level/entity/EntityTickList.java +index 2830d32bba3dc85847e3a5d9b4d98f822e34b606..4cdfc433df67afcd455422e9baf56f167dd712ae 100644 +--- a/src/main/java/net/minecraft/world/level/entity/EntityTickList.java ++++ b/src/main/java/net/minecraft/world/level/entity/EntityTickList.java +@@ -8,54 +8,42 @@ import javax.annotation.Nullable; + import net.minecraft.world.entity.Entity; + + public class EntityTickList { +- private Int2ObjectMap active = new Int2ObjectLinkedOpenHashMap<>(); +- private Int2ObjectMap passive = new Int2ObjectLinkedOpenHashMap<>(); +- @Nullable +- private Int2ObjectMap iterated; ++ private final io.papermc.paper.util.maplist.IteratorSafeOrderedReferenceSet entities = new io.papermc.paper.util.maplist.IteratorSafeOrderedReferenceSet<>(true); // Paper - rewrite this, always keep this updated - why would we EVER tick an entity that's not ticking? + + private void ensureActiveIsNotIterated() { +- if (this.iterated == this.active) { +- this.passive.clear(); +- +- for(Int2ObjectMap.Entry entry : Int2ObjectMaps.fastIterable(this.active)) { +- this.passive.put(entry.getIntKey(), entry.getValue()); +- } +- +- Int2ObjectMap int2ObjectMap = this.active; +- this.active = this.passive; +- this.passive = int2ObjectMap; +- } ++ // Paper - replace with better logic, do not delay removals + + } + + public void add(Entity entity) { ++ io.papermc.paper.util.TickThread.ensureTickThread("Asynchronous entity ticklist addition"); // Paper + this.ensureActiveIsNotIterated(); +- this.active.put(entity.getId(), entity); ++ this.entities.add(entity); // Paper - replace with better logic, do not delay removals/additions + } + + public void remove(Entity entity) { ++ io.papermc.paper.util.TickThread.ensureTickThread("Asynchronous entity ticklist removal"); // Paper + this.ensureActiveIsNotIterated(); +- this.active.remove(entity.getId()); ++ this.entities.remove(entity); // Paper - replace with better logic, do not delay removals/additions + } + + public boolean contains(Entity entity) { +- return this.active.containsKey(entity.getId()); ++ return this.entities.contains(entity); // Paper - replace with better logic, do not delay removals/additions + } + + public void forEach(Consumer action) { +- if (this.iterated != null) { +- throw new UnsupportedOperationException("Only one concurrent iteration supported"); +- } else { +- this.iterated = this.active; +- +- try { +- for(Entity entity : this.active.values()) { +- action.accept(entity); +- } +- } finally { +- this.iterated = null; ++ io.papermc.paper.util.TickThread.ensureTickThread("Asynchronous entity ticklist iteration"); // Paper ++ // Paper start - replace with better logic, do not delay removals/additions ++ // To ensure nothing weird happens with dimension travelling, do not iterate over new entries... ++ // (by dfl iterator() is configured to not iterate over new entries) ++ io.papermc.paper.util.maplist.IteratorSafeOrderedReferenceSet.Iterator iterator = this.entities.iterator(); ++ try { ++ while (iterator.hasNext()) { ++ action.accept(iterator.next()); + } +- ++ } finally { ++ iterator.finishedIterating(); + } ++ // Paper end - replace with better logic, do not delay removals/additions + } + } +diff --git a/src/main/java/net/minecraft/world/level/levelgen/NoiseBasedChunkGenerator.java b/src/main/java/net/minecraft/world/level/levelgen/NoiseBasedChunkGenerator.java +index 54308f1decc3982f30bf8b7a8a9d8865bfdbb9fd..902156477bdfc9917105f1229f760c26e5af302a 100644 +--- a/src/main/java/net/minecraft/world/level/levelgen/NoiseBasedChunkGenerator.java ++++ b/src/main/java/net/minecraft/world/level/levelgen/NoiseBasedChunkGenerator.java +@@ -87,7 +87,7 @@ public final class NoiseBasedChunkGenerator extends ChunkGenerator { + return CompletableFuture.supplyAsync(Util.wrapThreadWithTaskName("init_biomes", () -> { + this.doCreateBiomes(blender, noiseConfig, structureAccessor, chunk); + return chunk; +- }), Util.backgroundExecutor()); ++ }), executor); // Paper - run with supplied executor + } + + private void doCreateBiomes(Blender blender, RandomState noiseConfig, StructureManager structureAccessor, ChunkAccess chunk) { +@@ -286,7 +286,7 @@ public final class NoiseBasedChunkGenerator extends ChunkGenerator { + + return CompletableFuture.supplyAsync(Util.wrapThreadWithTaskName("wgen_fill_noise", () -> { + return this.doFill(blender, structureAccessor, noiseConfig, chunk, j, k); +- }), Util.backgroundExecutor()).whenCompleteAsync((ichunkaccess1, throwable) -> { ++ }), executor).whenCompleteAsync((ichunkaccess1, throwable) -> { // Paper - run with supplied executor + Iterator iterator = set.iterator(); + + while (iterator.hasNext()) { +diff --git a/src/main/java/net/minecraft/world/ticks/LevelChunkTicks.java b/src/main/java/net/minecraft/world/ticks/LevelChunkTicks.java +index 9f6c2e5b5d9e8d714a47c770e255d06c0ef7c190..ac807277a6b26d140ea9873d17c7aa4fb5fe37b2 100644 +--- a/src/main/java/net/minecraft/world/ticks/LevelChunkTicks.java ++++ b/src/main/java/net/minecraft/world/ticks/LevelChunkTicks.java +@@ -25,6 +25,19 @@ public class LevelChunkTicks implements SerializableTickContainer, TickCon + @Nullable + private BiConsumer, ScheduledTick> onTickAdded; + ++ // Paper start - add dirty flag ++ private boolean dirty; ++ private long lastSaved = Long.MIN_VALUE; ++ ++ public boolean isDirty(final long tick) { ++ return this.dirty || (!this.tickQueue.isEmpty() && tick != this.lastSaved); ++ } ++ ++ public void clearDirty() { ++ this.dirty = false; ++ } ++ // Paper end - add dirty flag ++ + public LevelChunkTicks() { + } + +@@ -50,6 +63,7 @@ public class LevelChunkTicks implements SerializableTickContainer, TickCon + public ScheduledTick poll() { + ScheduledTick scheduledTick = this.tickQueue.poll(); + if (scheduledTick != null) { ++ this.dirty = true; // Paper - add dirty flag + this.ticksPerPosition.remove(scheduledTick); + } + +@@ -59,6 +73,7 @@ public class LevelChunkTicks implements SerializableTickContainer, TickCon + @Override + public void schedule(ScheduledTick orderedTick) { + if (this.ticksPerPosition.add(orderedTick)) { ++ this.dirty = true; // Paper - add dirty flag + this.scheduleUnchecked(orderedTick); + } + +@@ -83,7 +98,7 @@ public class LevelChunkTicks implements SerializableTickContainer, TickCon + while(iterator.hasNext()) { + ScheduledTick scheduledTick = iterator.next(); + if (predicate.test(scheduledTick)) { +- iterator.remove(); ++ iterator.remove(); this.dirty = true; // Paper - add dirty flag + this.ticksPerPosition.remove(scheduledTick); + } + } +@@ -101,6 +116,7 @@ public class LevelChunkTicks implements SerializableTickContainer, TickCon + + @Override + public ListTag save(long l, Function function) { ++ this.lastSaved = l; // Paper - add dirty system to level ticks + ListTag listTag = new ListTag(); + if (this.pendingTicks != null) { + for(SavedTick savedTick : this.pendingTicks) { +@@ -117,6 +133,11 @@ public class LevelChunkTicks implements SerializableTickContainer, TickCon + + public void unpack(long time) { + if (this.pendingTicks != null) { ++ // Paper start - add dirty system to level chunk ticks ++ if (this.tickQueue.isEmpty()) { ++ this.lastSaved = time; ++ } ++ // Paper end - add dirty system to level chunk ticks + int i = -this.pendingTicks.size(); + + for(SavedTick savedTick : this.pendingTicks) { +diff --git a/src/main/java/org/bukkit/craftbukkit/CraftChunk.java b/src/main/java/org/bukkit/craftbukkit/CraftChunk.java +index bf4b2f89d3a7133155c6272379c742318b2c1514..33677ec811ceab939c419bf7d31b99585e9a1ef1 100644 +--- a/src/main/java/org/bukkit/craftbukkit/CraftChunk.java ++++ b/src/main/java/org/bukkit/craftbukkit/CraftChunk.java +@@ -111,7 +111,7 @@ public class CraftChunk implements Chunk { + + @Override + public boolean isEntitiesLoaded() { +- return this.getCraftWorld().getHandle().entityManager.areEntitiesLoaded(ChunkPos.asLong(x, z)); ++ return this.getCraftWorld().getHandle().areEntitiesLoaded(io.papermc.paper.util.CoordinateUtils.getChunkKey(this.x, this.z)); // Paper - rewrite chunk system + } + + @Override +@@ -120,51 +120,7 @@ public class CraftChunk implements Chunk { + this.getWorld().getChunkAt(x, z); // Transient load for this tick + } + +- PersistentEntitySectionManager entityManager = this.getCraftWorld().getHandle().entityManager; +- long pair = ChunkPos.asLong(x, z); +- +- if (entityManager.areEntitiesLoaded(pair)) { +- return entityManager.getEntities(new ChunkPos(this.x, this.z)).stream() +- .map(net.minecraft.world.entity.Entity::getBukkitEntity) +- .filter(Objects::nonNull).toArray(Entity[]::new); +- } +- +- entityManager.ensureChunkQueuedForLoad(pair); // Start entity loading +- +- // SPIGOT-6772: Use entity mailbox and re-schedule entities if they get unloaded +- ProcessorMailbox mailbox = ((EntityStorage) entityManager.permanentStorage).entityDeserializerQueue; +- BooleanSupplier supplier = () -> { +- // only execute inbox if our entities are not present +- if (entityManager.areEntitiesLoaded(pair)) { +- return true; +- } +- +- if (!entityManager.isPending(pair)) { +- // Our entities got unloaded, this should normally not happen. +- entityManager.ensureChunkQueuedForLoad(pair); // Re-start entity loading +- } +- +- // tick loading inbox, which loads the created entities to the world +- // (if present) +- entityManager.tick(); +- // check if our entities are loaded +- return entityManager.areEntitiesLoaded(pair); +- }; +- +- // now we wait until the entities are loaded, +- // the converting from NBT to entity object is done on the main Thread which is why we wait +- while (!supplier.getAsBoolean()) { +- if (mailbox.size() != 0) { +- mailbox.run(); +- } else { +- Thread.yield(); +- LockSupport.parkNanos("waiting for entity loading", 100000L); +- } +- } +- +- return entityManager.getEntities(new ChunkPos(this.x, this.z)).stream() +- .map(net.minecraft.world.entity.Entity::getBukkitEntity) +- .filter(Objects::nonNull).toArray(Entity[]::new); ++ return getCraftWorld().getHandle().getChunkEntities(this.x, this.z); // Paper - rewrite chunk system + } + + @Override +diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java +index 7912cf0e4cab7d6c99106d43b2cdc4f4e80aebac..0a4b2529034b85609b2f8d3f3f633e8f6adbe34f 100644 +--- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java ++++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java +@@ -1176,7 +1176,7 @@ public final class CraftServer implements Server { + this.console.addLevel(internal); + + this.getServer().prepareLevels(internal.getChunkSource().chunkMap.progressListener, internal); +- internal.entityManager.tick(); // SPIGOT-6526: Load pending entities so they are available to the API ++ //internal.entityManager.tick(); // SPIGOT-6526: Load pending entities so they are available to the API // Paper - rewrite chunk system + + this.pluginManager.callEvent(new WorldLoadEvent(internal.getWorld())); + return internal.getWorld(); +@@ -1220,7 +1220,7 @@ public final class CraftServer implements Server { + } + + handle.getChunkSource().close(save); +- handle.entityManager.close(save); // SPIGOT-6722: close entityManager ++ // handle.entityManager.close(save); // SPIGOT-6722: close entityManager // Paper - rewrite chunk system + handle.convertable.close(); + } catch (Exception ex) { + this.getLogger().log(Level.SEVERE, null, ex); +@@ -2037,7 +2037,7 @@ public final class CraftServer implements Server { + + @Override + public boolean isPrimaryThread() { +- return Thread.currentThread().equals(console.serverThread) || this.console.hasStopped() || !org.spigotmc.AsyncCatcher.enabled; // All bets are off if we have shut down (e.g. due to watchdog) ++ return io.papermc.paper.util.TickThread.isTickThread(); // Paper - rewrite chunk system + } + + // Paper start +diff --git a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java +index 1440b6b8b71ece71b076601752b06bdcff45542f..49623627555cb2b18ea8f7e17d0f6c1db54c0be4 100644 +--- a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java ++++ b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java +@@ -323,10 +323,14 @@ public class CraftWorld extends CraftRegionAccessor implements World { + ChunkHolder playerChunk = this.world.getChunkSource().chunkMap.getVisibleChunkIfPresent(ChunkPos.asLong(x, z)); + if (playerChunk == null) return false; + +- playerChunk.getTickingChunkFuture().thenAccept(either -> { +- either.left().ifPresent(chunk -> { ++ // Paper start - rewrite player chunk loader ++ net.minecraft.world.level.chunk.LevelChunk chunk = playerChunk.getSendingChunk(); ++ if (chunk == null) { ++ return false; ++ } ++ // Paper end - rewrite player chunk loader + List playersInRange = playerChunk.playerProvider.getPlayers(playerChunk.getPos(), false); +- if (playersInRange.isEmpty()) return; ++ if (playersInRange.isEmpty()) return true; // Paper - rewrite player chunk loader + + ClientboundLevelChunkWithLightPacket refreshPacket = new ClientboundLevelChunkWithLightPacket(chunk, this.world.getLightEngine(), null, null); + for (ServerPlayer player : playersInRange) { +@@ -334,8 +338,7 @@ public class CraftWorld extends CraftRegionAccessor implements World { + + player.connection.send(refreshPacket); + } +- }); +- }); ++ // Paper - rewrite player chunk loader + + return true; + } +@@ -412,20 +415,7 @@ public class CraftWorld extends CraftRegionAccessor implements World { + @Override + public Collection getPluginChunkTickets(int x, int z) { + DistanceManager chunkDistanceManager = this.world.getChunkSource().chunkMap.distanceManager; +- SortedArraySet> tickets = chunkDistanceManager.tickets.get(ChunkPos.asLong(x, z)); +- +- if (tickets == null) { +- return Collections.emptyList(); +- } +- +- ImmutableList.Builder ret = ImmutableList.builder(); +- for (Ticket ticket : tickets) { +- if (ticket.getType() == TicketType.PLUGIN_TICKET) { +- ret.add((Plugin) ticket.key); +- } +- } +- +- return ret.build(); ++ return chunkDistanceManager.getChunkHolderManager().getPluginChunkTickets(x, z); // Paper - rewrite chunk system + } + + @Override +@@ -433,7 +423,7 @@ public class CraftWorld extends CraftRegionAccessor implements World { + Map> ret = new HashMap<>(); + DistanceManager chunkDistanceManager = this.world.getChunkSource().chunkMap.distanceManager; + +- for (Long2ObjectMap.Entry>> chunkTickets : chunkDistanceManager.tickets.long2ObjectEntrySet()) { ++ for (Long2ObjectMap.Entry>> chunkTickets : chunkDistanceManager.getChunkHolderManager().getTicketsCopy().long2ObjectEntrySet()) { // Paper - rewrite chunk system + long chunkKey = chunkTickets.getLongKey(); + SortedArraySet> tickets = chunkTickets.getValue(); + +@@ -1957,14 +1947,53 @@ public class CraftWorld extends CraftRegionAccessor implements World { + // Spigot start + @Override + public int getViewDistance() { +- return world.spigotConfig.viewDistance; ++ return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance(); // Paper - replace old player chunk management + } + + @Override + public int getSimulationDistance() { +- return world.spigotConfig.simulationDistance; ++ return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance(); // Paper - replace old player chunk management + } + // Spigot end ++ // Paper start - view distance api ++ @Override ++ public void setViewDistance(int viewDistance) { ++ if (viewDistance < 2 || viewDistance > 32) { ++ throw new IllegalArgumentException("View distance " + viewDistance + " is out of range of [2, 32]"); ++ } ++ net.minecraft.server.level.ChunkMap chunkMap = getHandle().getChunkSource().chunkMap; ++ chunkMap.setViewDistance(viewDistance); ++ } ++ ++ @Override ++ public void setSimulationDistance(int simulationDistance) { ++ if (simulationDistance < 2 || simulationDistance > 32) { ++ throw new IllegalArgumentException("Simulation distance " + simulationDistance + " is out of range of [2, 32]"); ++ } ++ net.minecraft.server.level.ChunkMap chunkMap = getHandle().getChunkSource().chunkMap; ++ chunkMap.setTickViewDistance(simulationDistance); ++ } ++ ++ @Override ++ public int getNoTickViewDistance() { ++ return this.getViewDistance(); ++ } ++ ++ @Override ++ public void setNoTickViewDistance(int viewDistance) { ++ this.setViewDistance(viewDistance); ++ } ++ ++ @Override ++ public int getSendViewDistance() { ++ return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance(); ++ } ++ ++ @Override ++ public void setSendViewDistance(int viewDistance) { ++ getHandle().getChunkSource().chunkMap.playerChunkManager.setSendDistance(viewDistance); ++ } ++ // Paper end - view distance api + + // Spigot start + private final org.bukkit.World.Spigot spigot = new org.bukkit.World.Spigot() +diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +index 5afdada0d56c2f9af431ea6485faa277229befa9..2f35f954eb2dcdc2de54b54c47c139908438e0f0 100644 +--- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java ++++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +@@ -184,6 +184,81 @@ public class CraftPlayer extends CraftHumanEntity implements Player { + this.firstPlayed = System.currentTimeMillis(); + } + ++ // Paper start - implement view distances ++ @Override ++ public int getViewDistance() { ++ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; ++ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); ++ if (data == null) { ++ return chunkMap.playerChunkManager.getTargetNoTickViewDistance(); ++ } ++ return data.getTargetNoTickViewDistance(); ++ } ++ ++ @Override ++ public void setViewDistance(int viewDistance) { ++ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; ++ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); ++ if (data == null) { ++ throw new IllegalStateException("Player is not attached to world"); ++ } ++ ++ data.setTargetNoTickViewDistance(viewDistance); ++ } ++ ++ @Override ++ public int getSimulationDistance() { ++ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; ++ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); ++ if (data == null) { ++ return chunkMap.playerChunkManager.getTargetTickViewDistance(); ++ } ++ return data.getTargetTickViewDistance(); ++ } ++ ++ @Override ++ public void setSimulationDistance(int simulationDistance) { ++ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; ++ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); ++ if (data == null) { ++ throw new IllegalStateException("Player is not attached to world"); ++ } ++ ++ data.setTargetTickViewDistance(simulationDistance); ++ } ++ ++ @Override ++ public int getNoTickViewDistance() { ++ return this.getViewDistance(); ++ } ++ ++ @Override ++ public void setNoTickViewDistance(int viewDistance) { ++ this.setViewDistance(viewDistance); ++ } ++ ++ @Override ++ public int getSendViewDistance() { ++ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; ++ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); ++ if (data == null) { ++ return chunkMap.playerChunkManager.getTargetSendDistance(); ++ } ++ return data.getTargetSendViewDistance(); ++ } ++ ++ @Override ++ public void setSendViewDistance(int viewDistance) { ++ net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; ++ io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); ++ if (data == null) { ++ throw new IllegalStateException("Player is not attached to world"); ++ } ++ ++ data.setTargetSendViewDistance(viewDistance); ++ } ++ // Paper end - implement view distances ++ + public GameProfile getProfile() { + return this.getHandle().getGameProfile(); + } +diff --git a/src/main/java/org/bukkit/craftbukkit/generator/CustomChunkGenerator.java b/src/main/java/org/bukkit/craftbukkit/generator/CustomChunkGenerator.java +index 2f0dea0f9540d07f8b38029652fd66c1871dd97c..a7fe78e655a0f4a2c4464e334ea04cf95b9afcd0 100644 +--- a/src/main/java/org/bukkit/craftbukkit/generator/CustomChunkGenerator.java ++++ b/src/main/java/org/bukkit/craftbukkit/generator/CustomChunkGenerator.java +@@ -265,7 +265,7 @@ public class CustomChunkGenerator extends InternalChunkGenerator { + return ichunkaccess1; + }; + +- return future == null ? CompletableFuture.supplyAsync(() -> function.apply(chunk), net.minecraft.Util.backgroundExecutor()) : future.thenApply(function); ++ return future == null ? CompletableFuture.supplyAsync(() -> function.apply(chunk), executor) : future.thenApply(function); // Paper - run with supplied executor + } + + @Override +diff --git a/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java b/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java +index 629178347039893fb9de710810fe8112499bb91c..d5861dfa771c4eb217e082e3c832c3a6c603710d 100644 +--- a/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java ++++ b/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java +@@ -258,4 +258,20 @@ public class DummyGeneratorAccess implements WorldGenLevel { + public boolean destroyBlock(BlockPos pos, boolean drop, Entity breakingEntity, int maxUpdateDepth) { + return false; // SPIGOT-6515 + } ++ ++ // Paper start ++ @Override ++ public List getHardCollidingEntities(Entity except, AABB box, Predicate predicate) { ++ return java.util.Collections.emptyList(); ++ } ++ ++ @Override ++ public void getEntities(Entity except, AABB box, Predicate predicate, List into) {} ++ ++ @Override ++ public void getHardCollidingEntities(Entity except, AABB box, Predicate predicate, List into) {} ++ ++ @Override ++ public void getEntitiesByClass(Class clazz, Entity except, AABB box, List into, Predicate predicate) {} ++ // Paper end + } +diff --git a/src/main/java/org/spigotmc/ActivationRange.java b/src/main/java/org/spigotmc/ActivationRange.java +index 3e5d541d3d7fc3956b2fc68da9f22a0ab0367ce9..b47b740186c200c420dcb4d1537a93c743a887c1 100644 +--- a/src/main/java/org/spigotmc/ActivationRange.java ++++ b/src/main/java/org/spigotmc/ActivationRange.java +@@ -132,7 +132,13 @@ public class ActivationRange + ActivationType.ANIMAL.boundingBox = player.getBoundingBox().inflate( animalActivationRange, 256, animalActivationRange ); + ActivationType.MONSTER.boundingBox = player.getBoundingBox().inflate( monsterActivationRange, 256, monsterActivationRange ); + +- world.getEntities().get(maxBB, ActivationRange::activateEntity); ++ // Paper start ++ java.util.List entities = world.getEntities((Entity)null, maxBB, null); ++ for (int i = 0; i < entities.size(); i++) { ++ Entity entity = entities.get(i); ++ ActivationRange.activateEntity(entity); ++ } ++ // Paper end + } + MinecraftTimings.entityActivationCheckTimer.stopTiming(); + } +diff --git a/src/main/java/org/spigotmc/AsyncCatcher.java b/src/main/java/org/spigotmc/AsyncCatcher.java +index 78669fa035b7537ff7e533cf32aaf2995625424f..05e94702e42b8f5c35d2a112c486d57948a3acba 100644 +--- a/src/main/java/org/spigotmc/AsyncCatcher.java ++++ b/src/main/java/org/spigotmc/AsyncCatcher.java +@@ -9,7 +9,7 @@ public class AsyncCatcher + + public static void catchOp(String reason) + { +- if ( (AsyncCatcher.enabled || io.papermc.paper.util.TickThread.STRICT_THREAD_CHECKS) && Thread.currentThread() != MinecraftServer.getServer().serverThread ) // Paper ++ if ( !io.papermc.paper.util.TickThread.isTickThread() ) // Paper // Paper - rewrite chunk system + { + throw new IllegalStateException( "Asynchronous " + reason + "!" ); + } +diff --git a/src/main/java/org/spigotmc/WatchdogThread.java b/src/main/java/org/spigotmc/WatchdogThread.java +index 335120afc88a8fc1543c2e6df516fd728e3ab032..f1194eb6fdfba60959e00080d0562f2820d13b27 100644 +--- a/src/main/java/org/spigotmc/WatchdogThread.java ++++ b/src/main/java/org/spigotmc/WatchdogThread.java +@@ -8,7 +8,7 @@ import java.util.logging.Logger; + import net.minecraft.server.MinecraftServer; + import org.bukkit.Bukkit; + +-public class WatchdogThread extends Thread ++public final class WatchdogThread extends io.papermc.paper.util.TickThread // Paper - rewrite chunk system + { + + private static WatchdogThread instance; +@@ -83,6 +83,7 @@ public class WatchdogThread extends Thread + // + log.log( Level.SEVERE, "------------------------------" ); + log.log( Level.SEVERE, "Server thread dump (Look for plugins here before reporting to Spigot!):" ); ++ io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.dumpAllChunkLoadInfo(isLongTimeout); // Paper // Paper - rewrite chunk system + WatchdogThread.dumpThread( ManagementFactory.getThreadMXBean().getThreadInfo( MinecraftServer.getServer().serverThread.getId(), Integer.MAX_VALUE ), log ); + log.log( Level.SEVERE, "------------------------------" ); + // diff --git a/patches/server/0019-Configurable-cactus-bamboo-and-reed-growth-heights.patch b/patches/server/0020-Configurable-cactus-bamboo-and-reed-growth-heights.patch similarity index 100% rename from patches/server/0019-Configurable-cactus-bamboo-and-reed-growth-heights.patch rename to patches/server/0020-Configurable-cactus-bamboo-and-reed-growth-heights.patch diff --git a/patches/server/0021-New-player-chunk-loader-system.patch b/patches/server/0021-New-player-chunk-loader-system.patch new file mode 100644 index 0000000000..7665e63540 --- /dev/null +++ b/patches/server/0021-New-player-chunk-loader-system.patch @@ -0,0 +1,2367 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Spottedleaf +Date: Wed, 1 Feb 2023 21:06:31 -0800 +Subject: [PATCH] New player chunk loader system + + +diff --git a/src/main/java/co/aikar/timings/TimingsExport.java b/src/main/java/co/aikar/timings/TimingsExport.java +index 43380d5e3a40b64bebdf3c0e7c48eca8998c8ac0..1080e1f67afe5574baca0df50cdb1d029a7a586a 100644 +--- a/src/main/java/co/aikar/timings/TimingsExport.java ++++ b/src/main/java/co/aikar/timings/TimingsExport.java +@@ -164,9 +164,9 @@ public class TimingsExport extends Thread { + return pair(rule, world.getWorld().getGameRuleValue(rule)); + })), + // Paper start - replace chunk loader system +- pair("ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()), +- pair("no-ticking-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()), +- pair("sending-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance()) ++ pair("ticking-distance", world.getWorld().getSimulationDistance()), ++ pair("no-ticking-distance", world.getWorld().getViewDistance()), ++ pair("sending-distance", world.getWorld().getSendViewDistance()) + // Paper end - replace chunk loader system + )); + })); +diff --git a/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java +index e77972c4c264100ffdd824bfa2dac58dbbc6d678..562630db2cf5f923bf5b611b828a365e6d60fefb 100644 +--- a/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java ++++ b/src/main/java/io/papermc/paper/chunk/PlayerChunkLoader.java +@@ -41,12 +41,7 @@ public final class PlayerChunkLoader { + } + + public static int getTickViewDistance(final ServerPlayer player) { +- final ServerLevel level = (ServerLevel)player.level; +- final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player); +- if (data == null) { +- return level.chunkSource.chunkMap.playerChunkManager.getTargetTickViewDistance(); +- } +- return data.getTargetTickViewDistance(); ++ throw new UnsupportedOperationException(); + } + + public static int getLoadViewDistance(final Player player) { +@@ -54,12 +49,7 @@ public final class PlayerChunkLoader { + } + + public static int getLoadViewDistance(final ServerPlayer player) { +- final ServerLevel level = (ServerLevel)player.level; +- final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player); +- if (data == null) { +- return level.chunkSource.chunkMap.playerChunkManager.getLoadDistance(); +- } +- return data.getLoadDistance(); ++ throw new UnsupportedOperationException(); + } + + public static int getSendViewDistance(final Player player) { +@@ -67,12 +57,7 @@ public final class PlayerChunkLoader { + } + + public static int getSendViewDistance(final ServerPlayer player) { +- final ServerLevel level = (ServerLevel)player.level; +- final PlayerLoaderData data = level.chunkSource.chunkMap.playerChunkManager.getData(player); +- if (data == null) { +- return level.chunkSource.chunkMap.playerChunkManager.getTargetSendDistance(); +- } +- return data.getTargetSendViewDistance(); ++ throw new UnsupportedOperationException(); + } + + protected final ChunkMap chunkMap; +@@ -872,7 +857,7 @@ public final class PlayerChunkLoader { + + public void sendChunk(final int chunkX, final int chunkZ, final Runnable onChunkSend) { + if (this.sentChunks.add(CoordinateUtils.getChunkKey(chunkX, chunkZ))) { +- this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player, ++ ((ServerLevel)this.player.level()).getChunkSource().chunkMap.updateChunkTracking(this.player, + new ChunkPos(chunkX, chunkZ), new MutableObject<>(), false, true); // unloaded, loaded + this.player.connection.connection.execute(onChunkSend); + } else { +@@ -882,7 +867,7 @@ public final class PlayerChunkLoader { + + public void unloadChunk(final int chunkX, final int chunkZ) { + if (this.sentChunks.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) { +- this.player.getLevel().getChunkSource().chunkMap.updateChunkTracking(this.player, ++ ((ServerLevel)this.player.level()).getChunkSource().chunkMap.updateChunkTracking(this.player, + new ChunkPos(chunkX, chunkZ), null, true, false); // unloaded, loaded + } + } +diff --git a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java +index 0dc94dec1317b3f86d38074c6cbe41ab828cab1d..97edd4e8d3524c839a1765b6515deacae112ff4b 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java ++++ b/src/main/java/io/papermc/paper/chunk/system/ChunkSystem.java +@@ -119,15 +119,15 @@ public final class ChunkSystem { + } + + public static int getSendViewDistance(final ServerPlayer player) { +- return io.papermc.paper.chunk.PlayerChunkLoader.getSendViewDistance(player); ++ return io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.getAPISendViewDistance(player); + } + + public static int getLoadViewDistance(final ServerPlayer player) { +- return io.papermc.paper.chunk.PlayerChunkLoader.getLoadViewDistance(player); ++ return io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.getLoadViewDistance(player); + } + + public static int getTickViewDistance(final ServerPlayer player) { +- return io.papermc.paper.chunk.PlayerChunkLoader.getTickViewDistance(player); ++ return io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.getAPITickViewDistance(player); + } + + private ChunkSystem() { +diff --git a/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java +new file mode 100644 +index 0000000000000000000000000000000000000000..cf7610b3396d03bf79a899d5d9cfc6debb5b90be +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java +@@ -0,0 +1,1332 @@ ++package io.papermc.paper.chunk.system; ++ ++import ca.spottedleaf.concurrentutil.collection.SRSWLinkedQueue; ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import io.papermc.paper.chunk.system.io.RegionFileIOThread; ++import io.papermc.paper.chunk.system.scheduling.ChunkHolderManager; ++import io.papermc.paper.configuration.GlobalConfiguration; ++import io.papermc.paper.util.CoordinateUtils; ++import io.papermc.paper.util.IntegerUtil; ++import io.papermc.paper.util.TickThread; ++import it.unimi.dsi.fastutil.longs.Long2ByteOpenHashMap; ++import it.unimi.dsi.fastutil.longs.LongArrayFIFOQueue; ++import it.unimi.dsi.fastutil.longs.LongArrayList; ++import it.unimi.dsi.fastutil.longs.LongComparator; ++import it.unimi.dsi.fastutil.longs.LongHeapPriorityQueue; ++import it.unimi.dsi.fastutil.longs.LongOpenHashSet; ++import net.minecraft.network.protocol.Packet; ++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheCenterPacket; ++import net.minecraft.network.protocol.game.ClientboundSetChunkCacheRadiusPacket; ++import net.minecraft.network.protocol.game.ClientboundSetSimulationDistancePacket; ++import net.minecraft.server.level.ChunkMap; ++import net.minecraft.server.level.ServerLevel; ++import net.minecraft.server.level.ServerPlayer; ++import net.minecraft.server.level.TicketType; ++import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.GameRules; ++import net.minecraft.world.level.chunk.ChunkAccess; ++import net.minecraft.world.level.chunk.ChunkStatus; ++import net.minecraft.world.level.chunk.LevelChunk; ++import net.minecraft.world.level.levelgen.BelowZeroRetrogen; ++import org.apache.commons.lang3.mutable.MutableObject; ++import org.bukkit.craftbukkit.entity.CraftPlayer; ++import org.bukkit.entity.Player; ++import java.lang.invoke.VarHandle; ++import java.util.ArrayDeque; ++import java.util.concurrent.TimeUnit; ++import java.util.concurrent.atomic.AtomicLong; ++ ++public class RegionizedPlayerChunkLoader { ++ ++ public static final TicketType REGION_PLAYER_TICKET = TicketType.create("region_player_ticket", Long::compareTo); ++ ++ public static final int MIN_VIEW_DISTANCE = 2; ++ public static final int MAX_VIEW_DISTANCE = 32; ++ ++ public static final int TICK_TICKET_LEVEL = 31; ++ public static final int GENERATED_TICKET_LEVEL = 33 + ChunkStatus.getDistance(ChunkStatus.FULL); ++ public static final int LOADED_TICKET_LEVEL = 33 + ChunkStatus.getDistance(ChunkStatus.EMPTY); ++ ++ public static final record ViewDistances( ++ int tickViewDistance, ++ int loadViewDistance, ++ int sendViewDistance ++ ) { ++ public ViewDistances setTickViewDistance(final int distance) { ++ return new ViewDistances(distance, this.loadViewDistance, this.sendViewDistance); ++ } ++ ++ public ViewDistances setLoadViewDistance(final int distance) { ++ return new ViewDistances(this.tickViewDistance, distance, this.sendViewDistance); ++ } ++ ++ ++ public ViewDistances setSendViewDistance(final int distance) { ++ return new ViewDistances(this.tickViewDistance, this.loadViewDistance, distance); ++ } ++ } ++ ++ public static int getAPITickViewDistance(final Player player) { ++ return getAPITickViewDistance(((CraftPlayer)player).getHandle()); ++ } ++ ++ public static int getAPITickViewDistance(final ServerPlayer player) { ++ final ServerLevel level = (ServerLevel)player.level(); ++ final PlayerChunkLoaderData data = player.chunkLoader; ++ if (data == null) { ++ return level.playerChunkLoader.getAPITickDistance(); ++ } ++ return data.lastTickDistance; ++ } ++ ++ public static int getAPIViewDistance(final Player player) { ++ return getAPIViewDistance(((CraftPlayer)player).getHandle()); ++ } ++ ++ public static int getAPIViewDistance(final ServerPlayer player) { ++ final ServerLevel level = (ServerLevel)player.level(); ++ final PlayerChunkLoaderData data = player.chunkLoader; ++ if (data == null) { ++ return level.playerChunkLoader.getAPIViewDistance(); ++ } ++ // view distance = load distance + 1 ++ return data.lastLoadDistance - 1; ++ } ++ ++ public static int getLoadViewDistance(final ServerPlayer player) { ++ final ServerLevel level = (ServerLevel)player.level(); ++ final PlayerChunkLoaderData data = player.chunkLoader; ++ if (data == null) { ++ return level.playerChunkLoader.getAPIViewDistance(); ++ } ++ // view distance = load distance + 1 ++ return data.lastLoadDistance - 1; ++ } ++ ++ public static int getAPISendViewDistance(final Player player) { ++ return getAPISendViewDistance(((CraftPlayer)player).getHandle()); ++ } ++ ++ public static int getAPISendViewDistance(final ServerPlayer player) { ++ final ServerLevel level = (ServerLevel)player.level(); ++ final PlayerChunkLoaderData data = player.chunkLoader; ++ if (data == null) { ++ return level.playerChunkLoader.getAPISendViewDistance(); ++ } ++ return data.lastSendDistance; ++ } ++ ++ private final ServerLevel world; ++ ++ public RegionizedPlayerChunkLoader(final ServerLevel world) { ++ this.world = world; ++ } ++ ++ public void addPlayer(final ServerPlayer player) { ++ TickThread.ensureTickThread(player, "Cannot add player to player chunk loader async"); ++ if (!player.isRealPlayer) { ++ return; ++ } ++ ++ if (player.chunkLoader != null) { ++ throw new IllegalStateException("Player is already added to player chunk loader"); ++ } ++ ++ final PlayerChunkLoaderData loader = new PlayerChunkLoaderData(this.world, player); ++ ++ player.chunkLoader = loader; ++ loader.add(); ++ } ++ ++ public void updatePlayer(final ServerPlayer player) { ++ final PlayerChunkLoaderData loader = player.chunkLoader; ++ if (loader != null) { ++ loader.update(); ++ } ++ } ++ ++ public void removePlayer(final ServerPlayer player) { ++ TickThread.ensureTickThread(player, "Cannot remove player from player chunk loader async"); ++ if (!player.isRealPlayer) { ++ return; ++ } ++ ++ final PlayerChunkLoaderData loader = player.chunkLoader; ++ ++ if (loader == null) { ++ throw new IllegalStateException("Player is already removed from player chunk loader"); ++ } ++ ++ loader.remove(); ++ player.chunkLoader = null; ++ } ++ ++ public void setSendDistance(final int distance) { ++ this.world.setSendViewDistance(distance); ++ } ++ ++ public void setLoadDistance(final int distance) { ++ this.world.setLoadViewDistance(distance); ++ } ++ ++ public void setTickDistance(final int distance) { ++ this.world.setTickViewDistance(distance); ++ } ++ ++ // Note: follow the player chunk loader so everything stays consistent... ++ public int getAPITickDistance() { ++ final ViewDistances distances = this.world.getViewDistances(); ++ final int tickViewDistance = PlayerChunkLoaderData.getTickDistance(-1, distances.tickViewDistance); ++ return tickViewDistance; ++ } ++ ++ public int getAPIViewDistance() { ++ final ViewDistances distances = this.world.getViewDistances(); ++ final int tickViewDistance = PlayerChunkLoaderData.getTickDistance(-1, distances.tickViewDistance); ++ final int loadDistance = PlayerChunkLoaderData.getLoadViewDistance(tickViewDistance, -1, distances.loadViewDistance); ++ ++ // loadDistance = api view distance + 1 ++ return loadDistance - 1; ++ } ++ ++ public int getAPISendViewDistance() { ++ final ViewDistances distances = this.world.getViewDistances(); ++ final int tickViewDistance = PlayerChunkLoaderData.getTickDistance(-1, distances.tickViewDistance); ++ final int loadDistance = PlayerChunkLoaderData.getLoadViewDistance(tickViewDistance, -1, distances.loadViewDistance); ++ final int sendViewDistance = PlayerChunkLoaderData.getSendViewDistance( ++ loadDistance, -1, -1, distances.sendViewDistance ++ ); ++ ++ return sendViewDistance; ++ } ++ ++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ, final boolean borderOnly) { ++ return borderOnly ? this.isChunkSentBorderOnly(player, chunkX, chunkZ) : this.isChunkSent(player, chunkX, chunkZ); ++ } ++ ++ public boolean isChunkSent(final ServerPlayer player, final int chunkX, final int chunkZ) { ++ final PlayerChunkLoaderData loader = player.chunkLoader; ++ if (loader == null) { ++ return false; ++ } ++ ++ return loader.sentChunks.contains(CoordinateUtils.getChunkKey(chunkX, chunkZ)); ++ } ++ ++ public boolean isChunkSentBorderOnly(final ServerPlayer player, final int chunkX, final int chunkZ) { ++ final PlayerChunkLoaderData loader = player.chunkLoader; ++ if (loader == null) { ++ return false; ++ } ++ ++ for (int dz = -1; dz <= 1; ++dz) { ++ for (int dx = -1; dx <= 1; ++dx) { ++ if (!loader.sentChunks.contains(CoordinateUtils.getChunkKey(dx + chunkX, dz + chunkZ))) { ++ return true; ++ } ++ } ++ } ++ ++ return false; ++ } ++ ++ public void tick() { ++ TickThread.ensureTickThread("Cannot tick player chunk loader async"); ++ long currTime = System.nanoTime(); ++ for (final ServerPlayer player : new java.util.ArrayList<>(this.world.players())) { ++ final PlayerChunkLoaderData loader = player.chunkLoader; ++ if (loader == null || loader.world != this.world) { ++ // not our problem anymore ++ continue; ++ } ++ loader.update(); // can't invoke plugin logic ++ loader.updateQueues(currTime); ++ } ++ } ++ ++ private static long[] generateBFSOrder(final int radius) { ++ final LongArrayList chunks = new LongArrayList(); ++ final LongArrayFIFOQueue queue = new LongArrayFIFOQueue(); ++ final LongOpenHashSet seen = new LongOpenHashSet(); ++ ++ seen.add(CoordinateUtils.getChunkKey(0, 0)); ++ queue.enqueue(CoordinateUtils.getChunkKey(0, 0)); ++ while (!queue.isEmpty()) { ++ final long chunk = queue.dequeueLong(); ++ final int chunkX = CoordinateUtils.getChunkX(chunk); ++ final int chunkZ = CoordinateUtils.getChunkZ(chunk); ++ ++ // important that the addition to the list is here, rather than when enqueueing neighbours ++ // ensures the order is actually kept ++ chunks.add(chunk); ++ ++ // -x ++ final long n1 = CoordinateUtils.getChunkKey(chunkX - 1, chunkZ); ++ // -z ++ final long n2 = CoordinateUtils.getChunkKey(chunkX, chunkZ - 1); ++ // +x ++ final long n3 = CoordinateUtils.getChunkKey(chunkX + 1, chunkZ); ++ // +z ++ final long n4 = CoordinateUtils.getChunkKey(chunkX, chunkZ + 1); ++ ++ final long[] list = new long[] {n1, n2, n3, n4}; ++ ++ for (final long neighbour : list) { ++ final int neighbourX = CoordinateUtils.getChunkX(neighbour); ++ final int neighbourZ = CoordinateUtils.getChunkZ(neighbour); ++ if (Math.max(Math.abs(neighbourX), Math.abs(neighbourZ)) > radius) { ++ // don't enqueue out of range ++ continue; ++ } ++ if (!seen.add(neighbour)) { ++ continue; ++ } ++ queue.enqueue(neighbour); ++ } ++ } ++ ++ return chunks.toLongArray(); ++ } ++ ++ public static final class PlayerChunkLoaderData { ++ ++ private static final AtomicLong ID_GENERATOR = new AtomicLong(); ++ private final long id = ID_GENERATOR.incrementAndGet(); ++ private final Long idBoxed = Long.valueOf(this.id); ++ ++ // expected that this list returns for a given radius, the set of chunks ordered ++ // by manhattan distance ++ private static final long[][] SEARCH_RADIUS_ITERATION_LIST = new long[65][]; ++ static { ++ for (int i = 0; i < SEARCH_RADIUS_ITERATION_LIST.length; ++i) { ++ // a BFS around -x, -z, +x, +z will give increasing manhatten distance ++ SEARCH_RADIUS_ITERATION_LIST[i] = generateBFSOrder(i); ++ } ++ } ++ ++ private static final long MAX_RATE = 10_000L; ++ ++ private final ServerPlayer player; ++ private final ServerLevel world; ++ ++ private int lastChunkX = Integer.MIN_VALUE; ++ private int lastChunkZ = Integer.MIN_VALUE; ++ ++ private int lastSendDistance = Integer.MIN_VALUE; ++ private int lastLoadDistance = Integer.MIN_VALUE; ++ private int lastTickDistance = Integer.MIN_VALUE; ++ ++ private int lastSentChunkCenterX = Integer.MIN_VALUE; ++ private int lastSentChunkCenterZ = Integer.MIN_VALUE; ++ ++ private int lastSentChunkRadius = Integer.MIN_VALUE; ++ private int lastSentSimulationDistance = Integer.MIN_VALUE; ++ ++ private boolean canGenerateChunks = true; ++ ++ private final ArrayDeque> delayedTicketOps = new ArrayDeque<>(); ++ private final LongOpenHashSet sentChunks = new LongOpenHashSet(); ++ ++ private static final byte CHUNK_TICKET_STAGE_NONE = 0; ++ private static final byte CHUNK_TICKET_STAGE_LOADING = 1; ++ private static final byte CHUNK_TICKET_STAGE_LOADED = 2; ++ private static final byte CHUNK_TICKET_STAGE_GENERATING = 3; ++ private static final byte CHUNK_TICKET_STAGE_GENERATED = 4; ++ private static final byte CHUNK_TICKET_STAGE_TICK = 5; ++ private static final int[] TICKET_STAGE_TO_LEVEL = new int[] { ++ ChunkHolderManager.MAX_TICKET_LEVEL + 1, ++ LOADED_TICKET_LEVEL, ++ LOADED_TICKET_LEVEL, ++ GENERATED_TICKET_LEVEL, ++ GENERATED_TICKET_LEVEL, ++ TICK_TICKET_LEVEL ++ }; ++ private final Long2ByteOpenHashMap chunkTicketStage = new Long2ByteOpenHashMap(); ++ { ++ this.chunkTicketStage.defaultReturnValue(CHUNK_TICKET_STAGE_NONE); ++ } ++ ++ // rate limiting ++ private final AllocatingRateLimiter chunkSendLimiter = new AllocatingRateLimiter(); ++ private final AllocatingRateLimiter chunkLoadTicketLimiter = new AllocatingRateLimiter(); ++ private final AllocatingRateLimiter chunkGenerateTicketLimiter = new AllocatingRateLimiter(); ++ ++ // queues ++ private final LongComparator CLOSEST_MANHATTAN_DIST = (final long c1, final long c2) -> { ++ final int c1x = CoordinateUtils.getChunkX(c1); ++ final int c1z = CoordinateUtils.getChunkZ(c1); ++ ++ final int c2x = CoordinateUtils.getChunkX(c2); ++ final int c2z = CoordinateUtils.getChunkZ(c2); ++ ++ final int centerX = PlayerChunkLoaderData.this.lastChunkX; ++ final int centerZ = PlayerChunkLoaderData.this.lastChunkZ; ++ ++ return Integer.compare( ++ Math.abs(c1x - centerX) + Math.abs(c1z - centerZ), ++ Math.abs(c2x - centerX) + Math.abs(c2z - centerZ) ++ ); ++ }; ++ private final LongHeapPriorityQueue sendQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST); ++ private final LongHeapPriorityQueue tickingQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST); ++ private final LongHeapPriorityQueue generatingQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST); ++ private final LongHeapPriorityQueue genQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST); ++ private final LongHeapPriorityQueue loadingQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST); ++ private final LongHeapPriorityQueue loadQueue = new LongHeapPriorityQueue(CLOSEST_MANHATTAN_DIST); ++ ++ private volatile boolean removed; ++ ++ public PlayerChunkLoaderData(final ServerLevel world, final ServerPlayer player) { ++ this.world = world; ++ this.player = player; ++ } ++ ++ private void flushDelayedTicketOps() { ++ if (this.delayedTicketOps.isEmpty()) { ++ return; ++ } ++ this.world.chunkTaskScheduler.chunkHolderManager.pushDelayedTicketUpdates(this.delayedTicketOps); ++ this.delayedTicketOps.clear(); ++ this.world.chunkTaskScheduler.chunkHolderManager.tryDrainTicketUpdates(); ++ } ++ ++ private void pushDelayedTicketOp(final ChunkHolderManager.TicketOperation op) { ++ this.delayedTicketOps.addLast(op); ++ } ++ ++ private void sendChunk(final int chunkX, final int chunkZ) { ++ if (this.sentChunks.add(CoordinateUtils.getChunkKey(chunkX, chunkZ))) { ++ this.world.getChunkSource().chunkMap.updateChunkTracking(this.player, ++ new ChunkPos(chunkX, chunkZ), new MutableObject<>(), false, true); // unloaded, loaded ++ return; ++ } ++ throw new IllegalStateException(); ++ } ++ ++ private void sendUnloadChunk(final int chunkX, final int chunkZ) { ++ if (!this.sentChunks.remove(CoordinateUtils.getChunkKey(chunkX, chunkZ))) { ++ return; ++ } ++ this.sendUnloadChunkRaw(chunkX, chunkZ); ++ } ++ ++ private void sendUnloadChunkRaw(final int chunkX, final int chunkZ) { ++ this.player.serverLevel().getChunkSource().chunkMap.updateChunkTracking(this.player, ++ new ChunkPos(chunkX, chunkZ), null, true, false); // unloaded, loaded ++ } ++ ++ private final SingleUserAreaMap broadcastMap = new SingleUserAreaMap<>(this) { ++ @Override ++ protected void addCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) { ++ // do nothing, we only care about remove ++ } ++ ++ @Override ++ protected void removeCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) { ++ parameter.sendUnloadChunk(chunkX, chunkZ); ++ } ++ }; ++ private final SingleUserAreaMap loadTicketCleanup = new SingleUserAreaMap<>(this) { ++ @Override ++ protected void addCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) { ++ // do nothing, we only care about remove ++ } ++ ++ @Override ++ protected void removeCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) { ++ final long chunk = CoordinateUtils.getChunkKey(chunkX, chunkZ); ++ final byte ticketStage = parameter.chunkTicketStage.remove(chunk); ++ final int level = TICKET_STAGE_TO_LEVEL[ticketStage]; ++ if (level > ChunkHolderManager.MAX_TICKET_LEVEL) { ++ return; ++ } ++ ++ parameter.pushDelayedTicketOp(ChunkHolderManager.TicketOperation.addAndRemove( ++ chunk, ++ TicketType.UNKNOWN, level, new ChunkPos(chunkX, chunkZ), ++ REGION_PLAYER_TICKET, level, parameter.idBoxed ++ )); ++ } ++ }; ++ private final SingleUserAreaMap tickMap = new SingleUserAreaMap<>(this) { ++ @Override ++ protected void addCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) { ++ // do nothing, we will detect ticking chunks when we try to load them ++ } ++ ++ @Override ++ protected void removeCallback(final PlayerChunkLoaderData parameter, final int chunkX, final int chunkZ) { ++ final long chunk = CoordinateUtils.getChunkKey(chunkX, chunkZ); ++ // note: by the time this is called, the tick cleanup should have ran - so, if the chunk is at ++ // the tick stage it was deemed in range for loading. Thus, we need to move it to generated ++ if (!parameter.chunkTicketStage.replace(chunk, CHUNK_TICKET_STAGE_TICK, CHUNK_TICKET_STAGE_GENERATED)) { ++ return; ++ } ++ ++ // Since we are possibly downgrading the ticket level, we add an unknown ticket so that ++ // the level is kept until tick(). ++ parameter.pushDelayedTicketOp(ChunkHolderManager.TicketOperation.addAndRemove( ++ chunk, ++ TicketType.UNKNOWN, TICK_TICKET_LEVEL, new ChunkPos(chunkX, chunkZ), ++ REGION_PLAYER_TICKET, TICK_TICKET_LEVEL, parameter.idBoxed ++ )); ++ // keep chunk at new generated level ++ parameter.pushDelayedTicketOp(ChunkHolderManager.TicketOperation.addOp( ++ chunk, ++ REGION_PLAYER_TICKET, GENERATED_TICKET_LEVEL, parameter.idBoxed ++ )); ++ } ++ }; ++ ++ private static boolean wantChunkLoaded(final int centerX, final int centerZ, final int chunkX, final int chunkZ, ++ final int sendRadius) { ++ // expect sendRadius to be = 1 + target viewable radius ++ return ChunkMap.isChunkInRange(chunkX, chunkZ, centerX, centerZ, sendRadius); ++ } ++ ++ private static int getClientViewDistance(final ServerPlayer player) { ++ final Integer vd = player.clientViewDistance; ++ return vd == null ? -1 : Math.max(0, vd.intValue()); ++ } ++ ++ private static int getTickDistance(final int playerTickViewDistance, final int worldTickViewDistance) { ++ return playerTickViewDistance < 0 ? worldTickViewDistance : playerTickViewDistance; ++ } ++ ++ private static int getLoadViewDistance(final int tickViewDistance, final int playerLoadViewDistance, ++ final int worldLoadViewDistance) { ++ return Math.max(tickViewDistance, playerLoadViewDistance < 0 ? worldLoadViewDistance : playerLoadViewDistance); ++ } ++ ++ private static int getSendViewDistance(final int loadViewDistance, final int clientViewDistance, ++ final int playerSendViewDistance, final int worldSendViewDistance) { ++ return Math.min( ++ loadViewDistance, ++ playerSendViewDistance < 0 ? (!GlobalConfiguration.get().chunkLoadingAdvanced.autoConfigSendDistance || clientViewDistance < 0 ? (worldSendViewDistance < 0 ? loadViewDistance : worldSendViewDistance) : clientViewDistance) : playerSendViewDistance ++ ); ++ } ++ ++ private Packet updateClientChunkRadius(final int radius) { ++ this.lastSentChunkRadius = radius; ++ return new ClientboundSetChunkCacheRadiusPacket(radius); ++ } ++ ++ private Packet updateClientSimulationDistance(final int distance) { ++ this.lastSentSimulationDistance = distance; ++ return new ClientboundSetSimulationDistancePacket(distance); ++ } ++ ++ private Packet updateClientChunkCenter(final int chunkX, final int chunkZ) { ++ this.lastSentChunkCenterX = chunkX; ++ this.lastSentChunkCenterZ = chunkZ; ++ return new ClientboundSetChunkCacheCenterPacket(chunkX, chunkZ); ++ } ++ ++ private boolean canPlayerGenerateChunks() { ++ return !this.player.isSpectator() || this.world.getGameRules().getBoolean(GameRules.RULE_SPECTATORSGENERATECHUNKS); ++ } ++ ++ private double getMaxChunkLoadRate() { ++ final double configRate = GlobalConfiguration.get().chunkLoadingBasic.playerMaxChunkLoadRate; ++ ++ return configRate < 0.0 || configRate > (double)MAX_RATE ? (double)MAX_RATE : Math.max(1.0, configRate); ++ } ++ ++ private double getMaxChunkGenRate() { ++ final double configRate = GlobalConfiguration.get().chunkLoadingBasic.playerMaxChunkGenerateRate; ++ ++ return configRate < 0.0 || configRate > (double)MAX_RATE ? (double)MAX_RATE : Math.max(1.0, configRate); ++ } ++ ++ private double getMaxChunkSendRate() { ++ final double configRate = GlobalConfiguration.get().chunkLoadingBasic.playerMaxChunkSendRate; ++ ++ return configRate < 0.0 || configRate > (double)MAX_RATE ? (double)MAX_RATE : Math.max(1.0, configRate); ++ } ++ ++ private long getMaxChunkLoads() { ++ final long radiusChunks = (2L * this.lastLoadDistance + 1L) * (2L * this.lastLoadDistance + 1L); ++ long configLimit = GlobalConfiguration.get().chunkLoadingAdvanced.playerMaxConcurrentChunkLoads; ++ if (configLimit == 0L) { ++ // by default, only allow 1/5th of the chunks in the view distance to be concurrently active ++ configLimit = Math.max(5L, radiusChunks / 5L); ++ } else if (configLimit < 0L) { ++ configLimit = Integer.MAX_VALUE; ++ } // else: use the value configured ++ configLimit = configLimit - this.loadingQueue.size(); ++ ++ return configLimit; ++ } ++ ++ private long getMaxChunkGenerates() { ++ final long radiusChunks = (2L * this.lastLoadDistance + 1L) * (2L * this.lastLoadDistance + 1L); ++ long configLimit = GlobalConfiguration.get().chunkLoadingAdvanced.playerMaxConcurrentChunkGenerates; ++ if (configLimit == 0L) { ++ // by default, only allow 1/5th of the chunks in the view distance to be concurrently active ++ configLimit = Math.max(5L, radiusChunks / 5L); ++ } else if (configLimit < 0L) { ++ configLimit = Integer.MAX_VALUE; ++ } // else: use the value configured ++ configLimit = configLimit - this.generatingQueue.size(); ++ ++ return configLimit; ++ } ++ ++ private boolean wantChunkSent(final int chunkX, final int chunkZ) { ++ final int dx = this.lastChunkX - chunkX; ++ final int dz = this.lastChunkZ - chunkZ; ++ return Math.max(Math.abs(dx), Math.abs(dz)) <= this.lastSendDistance && wantChunkLoaded( ++ this.lastChunkX, this.lastChunkZ, chunkX, chunkZ, this.lastSendDistance ++ ); ++ } ++ ++ private boolean wantChunkTicked(final int chunkX, final int chunkZ) { ++ final int dx = this.lastChunkX - chunkX; ++ final int dz = this.lastChunkZ - chunkZ; ++ return Math.max(Math.abs(dx), Math.abs(dz)) <= this.lastTickDistance; ++ } ++ ++ void updateQueues(final long time) { ++ TickThread.ensureTickThread(this.player, "Cannot tick player chunk loader async"); ++ if (this.removed) { ++ throw new IllegalStateException("Ticking removed player chunk loader"); ++ } ++ // update rate limits ++ final double loadRate = this.getMaxChunkLoadRate(); ++ final double genRate = this.getMaxChunkGenRate(); ++ final double sendRate = this.getMaxChunkSendRate(); ++ ++ this.chunkLoadTicketLimiter.tickAllocation(time, loadRate, loadRate); ++ this.chunkGenerateTicketLimiter.tickAllocation(time, genRate, genRate); ++ this.chunkSendLimiter.tickAllocation(time, sendRate, sendRate); ++ ++ // try to progress chunk loads ++ while (!this.loadingQueue.isEmpty()) { ++ final long pendingLoadChunk = this.loadingQueue.firstLong(); ++ final int pendingChunkX = CoordinateUtils.getChunkX(pendingLoadChunk); ++ final int pendingChunkZ = CoordinateUtils.getChunkZ(pendingLoadChunk); ++ final ChunkAccess pending = this.world.chunkSource.getChunkAtImmediately(pendingChunkX, pendingChunkZ); ++ if (pending == null) { ++ // nothing to do here ++ break; ++ } ++ // chunk has loaded, so we can take it out of the queue ++ this.loadingQueue.dequeueLong(); ++ ++ // try to move to generate queue ++ final byte prev = this.chunkTicketStage.put(pendingLoadChunk, CHUNK_TICKET_STAGE_LOADED); ++ if (prev != CHUNK_TICKET_STAGE_LOADING) { ++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_LOADING + ", not " + prev); ++ } ++ ++ if (this.canGenerateChunks || this.isLoadedChunkGeneratable(pending)) { ++ this.genQueue.enqueue(pendingLoadChunk); ++ } // else: don't want to generate, so just leave it loaded ++ } ++ ++ // try to push more chunk loads ++ final long maxLoads = Math.max(0L, Math.min(MAX_RATE, Math.min(this.loadQueue.size(), this.getMaxChunkLoads()))); ++ final int maxLoadsThisTick = (int)this.chunkLoadTicketLimiter.takeAllocation(time, loadRate, maxLoads); ++ if (maxLoadsThisTick > 0) { ++ final LongArrayList chunks = new LongArrayList(maxLoadsThisTick); ++ for (int i = 0; i < maxLoadsThisTick; ++i) { ++ final long chunk = this.loadQueue.dequeueLong(); ++ final byte prev = this.chunkTicketStage.put(chunk, CHUNK_TICKET_STAGE_LOADING); ++ if (prev != CHUNK_TICKET_STAGE_NONE) { ++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_NONE + ", not " + prev); ++ } ++ this.pushDelayedTicketOp( ++ ChunkHolderManager.TicketOperation.addOp( ++ chunk, ++ REGION_PLAYER_TICKET, LOADED_TICKET_LEVEL, this.idBoxed ++ ) ++ ); ++ chunks.add(chunk); ++ this.loadingQueue.enqueue(chunk); ++ } ++ ++ // here we need to flush tickets, as scheduleChunkLoad requires tickets to be propagated with addTicket = false ++ this.flushDelayedTicketOps(); ++ // we only need to call scheduleChunkLoad because the loaded ticket level is not enough to start the chunk ++ // load - only generate ticket levels start anything, but they start generation... ++ // propagate levels ++ // Note: this CAN call plugin logic, so it is VITAL that our bookkeeping logic is completely done by the time this is invoked ++ this.world.chunkTaskScheduler.chunkHolderManager.processTicketUpdates(); ++ ++ if (this.removed) { ++ // process ticket updates may invoke plugin logic, which may remove this player ++ return; ++ } ++ ++ for (int i = 0; i < maxLoadsThisTick; ++i) { ++ final long queuedLoadChunk = chunks.getLong(i); ++ final int queuedChunkX = CoordinateUtils.getChunkX(queuedLoadChunk); ++ final int queuedChunkZ = CoordinateUtils.getChunkZ(queuedLoadChunk); ++ this.world.chunkTaskScheduler.scheduleChunkLoad( ++ queuedChunkX, queuedChunkZ, ChunkStatus.EMPTY, false, PrioritisedExecutor.Priority.NORMAL, null ++ ); ++ if (this.removed) { ++ return; ++ } ++ } ++ } ++ ++ // try to progress chunk generations ++ while (!this.generatingQueue.isEmpty()) { ++ final long pendingGenChunk = this.generatingQueue.firstLong(); ++ final int pendingChunkX = CoordinateUtils.getChunkX(pendingGenChunk); ++ final int pendingChunkZ = CoordinateUtils.getChunkZ(pendingGenChunk); ++ final LevelChunk pending = this.world.chunkSource.getChunkAtIfLoadedMainThreadNoCache(pendingChunkX, pendingChunkZ); ++ if (pending == null) { ++ // nothing to do here ++ break; ++ } ++ ++ // chunk has generated, so we can take it out of queue ++ this.generatingQueue.dequeueLong(); ++ ++ final byte prev = this.chunkTicketStage.put(pendingGenChunk, CHUNK_TICKET_STAGE_GENERATED); ++ if (prev != CHUNK_TICKET_STAGE_GENERATING) { ++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_GENERATING + ", not " + prev); ++ } ++ ++ // try to move to send queue ++ if (this.wantChunkSent(pendingChunkX, pendingChunkZ)) { ++ this.sendQueue.enqueue(pendingGenChunk); ++ } ++ // try to move to tick queue ++ if (this.wantChunkTicked(pendingChunkX, pendingChunkZ)) { ++ this.tickingQueue.enqueue(pendingGenChunk); ++ } ++ } ++ ++ // try to push more chunk generations ++ final long maxGens = Math.max(0L, Math.min(MAX_RATE, Math.min(this.genQueue.size(), this.getMaxChunkGenerates()))); ++ final int maxGensThisTick = (int)this.chunkGenerateTicketLimiter.takeAllocation(time, genRate, maxGens); ++ for (int i = 0; i < maxGensThisTick; ++i) { ++ final long chunk = this.genQueue.dequeueLong(); ++ final byte prev = this.chunkTicketStage.put(chunk, CHUNK_TICKET_STAGE_GENERATING); ++ if (prev != CHUNK_TICKET_STAGE_LOADED) { ++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_LOADED + ", not " + prev); ++ } ++ this.pushDelayedTicketOp( ++ ChunkHolderManager.TicketOperation.addAndRemove( ++ chunk, ++ REGION_PLAYER_TICKET, GENERATED_TICKET_LEVEL, this.idBoxed, ++ REGION_PLAYER_TICKET, LOADED_TICKET_LEVEL, this.idBoxed ++ ) ++ ); ++ this.generatingQueue.enqueue(chunk); ++ } ++ ++ // try to pull ticking chunks ++ tick_check_outer: ++ while (!this.tickingQueue.isEmpty()) { ++ final long pendingTicking = this.tickingQueue.firstLong(); ++ final int pendingChunkX = CoordinateUtils.getChunkX(pendingTicking); ++ final int pendingChunkZ = CoordinateUtils.getChunkZ(pendingTicking); ++ ++ final int tickingReq = 2; ++ for (int dz = -tickingReq; dz <= tickingReq; ++dz) { ++ for (int dx = -tickingReq; dx <= tickingReq; ++dx) { ++ if ((dx | dz) == 0) { ++ continue; ++ } ++ final long neighbour = CoordinateUtils.getChunkKey(dx + pendingChunkX, dz + pendingChunkZ); ++ final byte stage = this.chunkTicketStage.get(neighbour); ++ if (stage != CHUNK_TICKET_STAGE_GENERATED && stage != CHUNK_TICKET_STAGE_TICK) { ++ break tick_check_outer; ++ } ++ } ++ } ++ // only gets here if all neighbours were marked as generated or ticking themselves ++ this.tickingQueue.dequeueLong(); ++ this.pushDelayedTicketOp( ++ ChunkHolderManager.TicketOperation.addAndRemove( ++ pendingTicking, ++ REGION_PLAYER_TICKET, TICK_TICKET_LEVEL, this.idBoxed, ++ REGION_PLAYER_TICKET, GENERATED_TICKET_LEVEL, this.idBoxed ++ ) ++ ); ++ // there is no queue to add after ticking ++ final byte prev = this.chunkTicketStage.put(pendingTicking, CHUNK_TICKET_STAGE_TICK); ++ if (prev != CHUNK_TICKET_STAGE_GENERATED) { ++ throw new IllegalStateException("Previous state should be " + CHUNK_TICKET_STAGE_GENERATED + ", not " + prev); ++ } ++ } ++ ++ // try to pull sending chunks ++ final long maxSends = Math.max(0L, Math.min(MAX_RATE, Integer.MAX_VALUE)); // no logic to track concurrent sends ++ final int maxSendsThisTick = Math.min((int)this.chunkSendLimiter.takeAllocation(time, sendRate, maxSends), this.sendQueue.size()); ++ // we do not return sends that we took from the allocation back because we want to limit the max send rate, not target it ++ for (int i = 0; i < maxSendsThisTick; ++i) { ++ final long pendingSend = this.sendQueue.firstLong(); ++ final int pendingSendX = CoordinateUtils.getChunkX(pendingSend); ++ final int pendingSendZ = CoordinateUtils.getChunkZ(pendingSend); ++ final LevelChunk chunk = this.world.chunkSource.getChunkAtIfLoadedMainThreadNoCache(pendingSendX, pendingSendZ); ++ if (!chunk.areNeighboursLoaded(1) || !TickThread.isTickThreadFor(this.world, pendingSendX, pendingSendZ)) { ++ // nothing to do ++ // the target chunk may not be owned by this region, but this should be resolved in the future ++ break; ++ } ++ this.sendQueue.dequeueLong(); ++ ++ this.sendChunk(pendingSendX, pendingSendZ); ++ if (this.removed) { ++ // sendChunk may invoke plugin logic ++ return; ++ } ++ } ++ ++ this.flushDelayedTicketOps(); ++ // we assume propagate ticket levels happens after this call ++ } ++ ++ void add() { ++ TickThread.ensureTickThread(this.player, "Cannot add player asynchronously"); ++ if (this.removed) { ++ throw new IllegalStateException("Adding removed player chunk loader"); ++ } ++ final ViewDistances playerDistances = this.player.getViewDistances(); ++ final ViewDistances worldDistances = this.world.getViewDistances(); ++ final int chunkX = this.player.chunkPosition().x; ++ final int chunkZ = this.player.chunkPosition().z; ++ ++ final int tickViewDistance = getTickDistance(playerDistances.tickViewDistance, worldDistances.tickViewDistance); ++ // load view cannot be less-than tick view + 1 ++ final int loadViewDistance = getLoadViewDistance(tickViewDistance, playerDistances.loadViewDistance, worldDistances.loadViewDistance); ++ // send view cannot be greater-than load view ++ final int clientViewDistance = getClientViewDistance(this.player); ++ final int sendViewDistance = getSendViewDistance(loadViewDistance, clientViewDistance, playerDistances.sendViewDistance, worldDistances.sendViewDistance); ++ ++ // send view distances ++ this.player.connection.send(this.updateClientChunkRadius(sendViewDistance)); ++ this.player.connection.send(this.updateClientSimulationDistance(tickViewDistance)); ++ ++ // add to distance maps ++ this.broadcastMap.add(chunkX, chunkZ, sendViewDistance); ++ this.loadTicketCleanup.add(chunkX, chunkZ, loadViewDistance + 1); ++ this.tickMap.add(chunkX, chunkZ, tickViewDistance); ++ ++ // update chunk center ++ this.player.connection.send(this.updateClientChunkCenter(chunkX, chunkZ)); ++ ++ // now we can update ++ this.update(); ++ } ++ ++ private boolean isLoadedChunkGeneratable(final int chunkX, final int chunkZ) { ++ return this.isLoadedChunkGeneratable(this.world.chunkSource.getChunkAtImmediately(chunkX, chunkZ)); ++ } ++ ++ private boolean isLoadedChunkGeneratable(final ChunkAccess chunkAccess) { ++ final BelowZeroRetrogen belowZeroRetrogen; ++ return chunkAccess != null && ( ++ chunkAccess.getStatus() == ChunkStatus.FULL || ++ ((belowZeroRetrogen = chunkAccess.getBelowZeroRetrogen()) != null && belowZeroRetrogen.targetStatus().isOrAfter(ChunkStatus.FULL)) ++ ); ++ } ++ ++ void update() { ++ TickThread.ensureTickThread(this.player, "Cannot update player asynchronously"); ++ if (this.removed) { ++ throw new IllegalStateException("Updating removed player chunk loader"); ++ } ++ final ViewDistances playerDistances = this.player.getViewDistances(); ++ final ViewDistances worldDistances = this.world.getViewDistances(); ++ ++ final int tickViewDistance = getTickDistance(playerDistances.tickViewDistance, worldDistances.tickViewDistance); ++ // load view cannot be less-than tick view + 1 ++ final int loadViewDistance = getLoadViewDistance(tickViewDistance, playerDistances.loadViewDistance, worldDistances.loadViewDistance); ++ // send view cannot be greater-than load view ++ final int clientViewDistance = getClientViewDistance(this.player); ++ final int sendViewDistance = getSendViewDistance(loadViewDistance, clientViewDistance, playerDistances.sendViewDistance, worldDistances.sendViewDistance); ++ ++ final ChunkPos playerPos = this.player.chunkPosition(); ++ final boolean canGenerateChunks = this.canPlayerGenerateChunks(); ++ final int currentChunkX = playerPos.x; ++ final int currentChunkZ = playerPos.z; ++ ++ final int prevChunkX = this.lastChunkX; ++ final int prevChunkZ = this.lastChunkZ; ++ ++ if ( ++ // has view distance stayed the same? ++ sendViewDistance == this.lastSendDistance ++ && loadViewDistance == this.lastLoadDistance ++ && tickViewDistance == this.lastTickDistance ++ ++ // has our chunk stayed the same? ++ && prevChunkX == currentChunkX ++ && prevChunkZ == currentChunkZ ++ ++ // can we still generate chunks? ++ && this.canGenerateChunks == canGenerateChunks ++ ) { ++ // nothing we care about changed, so we're not re-calculating ++ return; ++ } ++ ++ // update distance maps ++ this.broadcastMap.update(currentChunkX, currentChunkZ, sendViewDistance); ++ this.loadTicketCleanup.update(currentChunkX, currentChunkZ, loadViewDistance + 1); ++ this.tickMap.update(currentChunkX, currentChunkZ, tickViewDistance); ++ if (sendViewDistance > loadViewDistance || tickViewDistance > loadViewDistance) { ++ throw new IllegalStateException(); ++ } ++ ++ // update VDs for client ++ // this should be after the distance map updates, as they will send unload packets ++ if (this.lastSentChunkRadius != sendViewDistance) { ++ this.player.connection.send(this.updateClientChunkRadius(sendViewDistance)); ++ } ++ if (this.lastSentSimulationDistance != tickViewDistance) { ++ this.player.connection.send(this.updateClientSimulationDistance(tickViewDistance)); ++ } ++ ++ this.sendQueue.clear(); ++ this.tickingQueue.clear(); ++ this.generatingQueue.clear(); ++ this.genQueue.clear(); ++ this.loadingQueue.clear(); ++ this.loadQueue.clear(); ++ ++ this.lastChunkX = currentChunkX; ++ this.lastChunkZ = currentChunkZ; ++ this.lastSendDistance = sendViewDistance; ++ this.lastLoadDistance = loadViewDistance; ++ this.lastTickDistance = tickViewDistance; ++ this.canGenerateChunks = canGenerateChunks; ++ ++ // +1 since we need to load chunks +1 around the load view distance... ++ final long[] toIterate = SEARCH_RADIUS_ITERATION_LIST[loadViewDistance + 1]; ++ // the iteration order is by increasing manhattan distance - so, we do NOT need to ++ // sort anything in the queue! ++ for (final long deltaChunk : toIterate) { ++ final int dx = CoordinateUtils.getChunkX(deltaChunk); ++ final int dz = CoordinateUtils.getChunkZ(deltaChunk); ++ final int chunkX = dx + currentChunkX; ++ final int chunkZ = dz + currentChunkZ; ++ final long chunk = CoordinateUtils.getChunkKey(chunkX, chunkZ); ++ final int squareDistance = Math.max(Math.abs(dx), Math.abs(dz)); ++ final int manhattanDistance = Math.abs(dx) + Math.abs(dz); ++ ++ // since chunk sending is not by radius alone, we need an extra check here to account for ++ // everything <= sendDistance ++ // Note: Vanilla may want to send chunks outside the send view distance, so we do need ++ // the dist <= view check ++ final boolean sendChunk = squareDistance <= sendViewDistance ++ && wantChunkLoaded(currentChunkX, currentChunkZ, chunkX, chunkZ, sendViewDistance); ++ final boolean sentChunk = sendChunk ? this.sentChunks.contains(chunk) : this.sentChunks.remove(chunk); ++ ++ if (!sendChunk && sentChunk) { ++ // have sent the chunk, but don't want it anymore ++ // unload it now ++ this.sendUnloadChunkRaw(chunkX, chunkZ); ++ } ++ ++ final byte stage = this.chunkTicketStage.get(chunk); ++ switch (stage) { ++ case CHUNK_TICKET_STAGE_NONE: { ++ // we want the chunk to be at least loaded ++ this.loadQueue.enqueue(chunk); ++ break; ++ } ++ case CHUNK_TICKET_STAGE_LOADING: { ++ this.loadingQueue.enqueue(chunk); ++ break; ++ } ++ case CHUNK_TICKET_STAGE_LOADED: { ++ if (canGenerateChunks || this.isLoadedChunkGeneratable(chunkX, chunkZ)) { ++ this.genQueue.enqueue(chunk); ++ } ++ break; ++ } ++ case CHUNK_TICKET_STAGE_GENERATING: { ++ this.generatingQueue.enqueue(chunk); ++ break; ++ } ++ case CHUNK_TICKET_STAGE_GENERATED: { ++ if (sendChunk && !sentChunk) { ++ this.sendQueue.enqueue(chunk); ++ } ++ if (squareDistance <= tickViewDistance) { ++ this.tickingQueue.enqueue(chunk); ++ } ++ break; ++ } ++ case CHUNK_TICKET_STAGE_TICK: { ++ if (sendChunk && !sentChunk) { ++ this.sendQueue.enqueue(chunk); ++ } ++ break; ++ } ++ default: { ++ throw new IllegalStateException("Unknown stage: " + stage); ++ } ++ } ++ } ++ ++ // update the chunk center ++ // this must be done last so that the client does not ignore any of our unload chunk packets above ++ if (this.lastSentChunkCenterX != currentChunkX || this.lastSentChunkCenterZ != currentChunkZ) { ++ this.player.connection.send(this.updateClientChunkCenter(currentChunkX, currentChunkZ)); ++ } ++ ++ this.flushDelayedTicketOps(); ++ } ++ ++ void remove() { ++ TickThread.ensureTickThread(this.player, "Cannot add player asynchronously"); ++ if (this.removed) { ++ throw new IllegalStateException("Removing removed player chunk loader"); ++ } ++ this.removed = true; ++ // sends the chunk unload packets ++ this.broadcastMap.remove(); ++ // cleans up loading/generating tickets ++ this.loadTicketCleanup.remove(); ++ // cleans up ticking tickets ++ this.tickMap.remove(); ++ ++ // purge queues ++ this.sendQueue.clear(); ++ this.tickingQueue.clear(); ++ this.generatingQueue.clear(); ++ this.genQueue.clear(); ++ this.loadingQueue.clear(); ++ this.loadQueue.clear(); ++ ++ // flush ticket changes ++ this.flushDelayedTicketOps(); ++ ++ // now all tickets should be removed, which is all of our external state ++ } ++ } ++ ++ // TODO rebase into util patch ++ private static final class AllocatingRateLimiter { ++ ++ // max difference granularity in ns ++ private static final long MAX_GRANULARITY = TimeUnit.SECONDS.toNanos(1L); ++ ++ private double allocation; ++ private long lastAllocationUpdate; ++ private double takeCarry; ++ private long lastTakeUpdate; ++ ++ // rate in units/s, and time in ns ++ public void tickAllocation(final long time, final double rate, final double maxAllocation) { ++ final long diff = Math.min(MAX_GRANULARITY, time - this.lastAllocationUpdate); ++ this.lastAllocationUpdate = time; ++ ++ this.allocation = Math.min(maxAllocation - this.takeCarry, this.allocation + rate * (diff*1.0E-9D)); ++ } ++ ++ // rate in units/s, and time in ns ++ public long takeAllocation(final long time, final double rate, final long maxTake) { ++ if (maxTake < 1L) { ++ return 0L; ++ } ++ ++ double ret = this.takeCarry; ++ final long diff = Math.min(MAX_GRANULARITY, time - this.lastTakeUpdate); ++ this.lastTakeUpdate = time; ++ ++ // note: abs(takeCarry) <= 1.0 ++ final double take = Math.min(Math.min((double)maxTake - this.takeCarry, this.allocation), rate * (diff*1.0E-9)); ++ ++ ret += take; ++ this.allocation -= take; ++ ++ final long retInteger = (long)Math.floor(ret); ++ this.takeCarry = ret - (double)retInteger; ++ ++ return retInteger; ++ } ++ } ++ ++ public static abstract class SingleUserAreaMap { ++ ++ private static final int NOT_SET = Integer.MIN_VALUE; ++ ++ private final T parameter; ++ private int lastChunkX = NOT_SET; ++ private int lastChunkZ = NOT_SET; ++ private int distance = NOT_SET; ++ ++ public SingleUserAreaMap(final T parameter) { ++ this.parameter = parameter; ++ } ++ ++ /* math sign function except 0 returns 1 */ ++ protected static int sign(int val) { ++ return 1 | (val >> (Integer.SIZE - 1)); ++ } ++ ++ protected abstract void addCallback(final T parameter, final int chunkX, final int chunkZ); ++ ++ protected abstract void removeCallback(final T parameter, final int chunkX, final int chunkZ); ++ ++ private void addToNew(final T parameter, final int chunkX, final int chunkZ, final int distance) { ++ final int maxX = chunkX + distance; ++ final int maxZ = chunkZ + distance; ++ ++ for (int cx = chunkX - distance; cx <= maxX; ++cx) { ++ for (int cz = chunkZ - distance; cz <= maxZ; ++cz) { ++ this.addCallback(parameter, cx, cz); ++ } ++ } ++ } ++ ++ private void removeFromOld(final T parameter, final int chunkX, final int chunkZ, final int distance) { ++ final int maxX = chunkX + distance; ++ final int maxZ = chunkZ + distance; ++ ++ for (int cx = chunkX - distance; cx <= maxX; ++cx) { ++ for (int cz = chunkZ - distance; cz <= maxZ; ++cz) { ++ this.removeCallback(parameter, cx, cz); ++ } ++ } ++ } ++ ++ public final boolean add(final int chunkX, final int chunkZ, final int distance) { ++ if (distance < 0) { ++ throw new IllegalArgumentException(Integer.toString(distance)); ++ } ++ if (this.lastChunkX != NOT_SET) { ++ return false; ++ } ++ this.lastChunkX = chunkX; ++ this.lastChunkZ = chunkZ; ++ this.distance = distance; ++ ++ this.addToNew(this.parameter, chunkX, chunkZ, distance); ++ ++ return true; ++ } ++ ++ public final boolean update(final int toX, final int toZ, final int newViewDistance) { ++ if (newViewDistance < 0) { ++ throw new IllegalArgumentException(Integer.toString(newViewDistance)); ++ } ++ final int fromX = this.lastChunkX; ++ final int fromZ = this.lastChunkZ; ++ final int oldViewDistance = this.distance; ++ if (fromX == NOT_SET) { ++ return false; ++ } ++ ++ this.lastChunkX = toX; ++ this.lastChunkZ = toZ; ++ ++ final T parameter = this.parameter; ++ ++ ++ final int dx = toX - fromX; ++ final int dz = toZ - fromZ; ++ ++ final int totalX = IntegerUtil.branchlessAbs(fromX - toX); ++ final int totalZ = IntegerUtil.branchlessAbs(fromZ - toZ); ++ ++ if (Math.max(totalX, totalZ) > (2 * Math.max(newViewDistance, oldViewDistance))) { ++ // teleported? ++ this.removeFromOld(parameter, fromX, fromZ, oldViewDistance); ++ this.addToNew(parameter, toX, toZ, newViewDistance); ++ return true; ++ } ++ ++ if (oldViewDistance != newViewDistance) { ++ // remove loop ++ ++ final int oldMinX = fromX - oldViewDistance; ++ final int oldMinZ = fromZ - oldViewDistance; ++ final int oldMaxX = fromX + oldViewDistance; ++ final int oldMaxZ = fromZ + oldViewDistance; ++ for (int currX = oldMinX; currX <= oldMaxX; ++currX) { ++ for (int currZ = oldMinZ; currZ <= oldMaxZ; ++currZ) { ++ ++ // only remove if we're outside the new view distance... ++ if (Math.max(IntegerUtil.branchlessAbs(currX - toX), IntegerUtil.branchlessAbs(currZ - toZ)) > newViewDistance) { ++ this.removeCallback(parameter, currX, currZ); ++ } ++ } ++ } ++ ++ // add loop ++ ++ final int newMinX = toX - newViewDistance; ++ final int newMinZ = toZ - newViewDistance; ++ final int newMaxX = toX + newViewDistance; ++ final int newMaxZ = toZ + newViewDistance; ++ for (int currX = newMinX; currX <= newMaxX; ++currX) { ++ for (int currZ = newMinZ; currZ <= newMaxZ; ++currZ) { ++ ++ // only add if we're outside the old view distance... ++ if (Math.max(IntegerUtil.branchlessAbs(currX - fromX), IntegerUtil.branchlessAbs(currZ - fromZ)) > oldViewDistance) { ++ this.addCallback(parameter, currX, currZ); ++ } ++ } ++ } ++ ++ return true; ++ } ++ ++ // x axis is width ++ // z axis is height ++ // right refers to the x axis of where we moved ++ // top refers to the z axis of where we moved ++ ++ // same view distance ++ ++ // used for relative positioning ++ final int up = sign(dz); // 1 if dz >= 0, -1 otherwise ++ final int right = sign(dx); // 1 if dx >= 0, -1 otherwise ++ ++ // The area excluded by overlapping the two view distance squares creates four rectangles: ++ // Two on the left, and two on the right. The ones on the left we consider the "removed" section ++ // and on the right the "added" section. ++ // https://i.imgur.com/MrnOBgI.png is a reference image. Note that the outside border is not actually ++ // exclusive to the regions they surround. ++ ++ // 4 points of the rectangle ++ int maxX; // exclusive ++ int minX; // inclusive ++ int maxZ; // exclusive ++ int minZ; // inclusive ++ ++ if (dx != 0) { ++ // handle right addition ++ ++ maxX = toX + (oldViewDistance * right) + right; // exclusive ++ minX = fromX + (oldViewDistance * right) + right; // inclusive ++ maxZ = fromZ + (oldViewDistance * up) + up; // exclusive ++ minZ = toZ - (oldViewDistance * up); // inclusive ++ ++ for (int currX = minX; currX != maxX; currX += right) { ++ for (int currZ = minZ; currZ != maxZ; currZ += up) { ++ this.addCallback(parameter, currX, currZ); ++ } ++ } ++ } ++ ++ if (dz != 0) { ++ // handle up addition ++ ++ maxX = toX + (oldViewDistance * right) + right; // exclusive ++ minX = toX - (oldViewDistance * right); // inclusive ++ maxZ = toZ + (oldViewDistance * up) + up; // exclusive ++ minZ = fromZ + (oldViewDistance * up) + up; // inclusive ++ ++ for (int currX = minX; currX != maxX; currX += right) { ++ for (int currZ = minZ; currZ != maxZ; currZ += up) { ++ this.addCallback(parameter, currX, currZ); ++ } ++ } ++ } ++ ++ if (dx != 0) { ++ // handle left removal ++ ++ maxX = toX - (oldViewDistance * right); // exclusive ++ minX = fromX - (oldViewDistance * right); // inclusive ++ maxZ = fromZ + (oldViewDistance * up) + up; // exclusive ++ minZ = toZ - (oldViewDistance * up); // inclusive ++ ++ for (int currX = minX; currX != maxX; currX += right) { ++ for (int currZ = minZ; currZ != maxZ; currZ += up) { ++ this.removeCallback(parameter, currX, currZ); ++ } ++ } ++ } ++ ++ if (dz != 0) { ++ // handle down removal ++ ++ maxX = fromX + (oldViewDistance * right) + right; // exclusive ++ minX = fromX - (oldViewDistance * right); // inclusive ++ maxZ = toZ - (oldViewDistance * up); // exclusive ++ minZ = fromZ - (oldViewDistance * up); // inclusive ++ ++ for (int currX = minX; currX != maxX; currX += right) { ++ for (int currZ = minZ; currZ != maxZ; currZ += up) { ++ this.removeCallback(parameter, currX, currZ); ++ } ++ } ++ } ++ ++ return true; ++ } ++ ++ public final boolean remove() { ++ final int chunkX = this.lastChunkX; ++ final int chunkZ = this.lastChunkZ; ++ final int distance = this.distance; ++ if (chunkX == NOT_SET) { ++ return false; ++ } ++ ++ this.lastChunkX = this.lastChunkZ = this.distance = NOT_SET; ++ ++ this.removeFromOld(this.parameter, chunkX, chunkZ, distance); ++ ++ return true; ++ } ++ } ++ ++ static final class CountedSRSWLinkedQueue { ++ ++ private final SRSWLinkedQueue queue = new SRSWLinkedQueue<>(); ++ private volatile long countAdded; ++ private volatile long countRemoved; ++ ++ private static final VarHandle COUNT_ADDED_HANDLE = ConcurrentUtil.getVarHandle(CountedSRSWLinkedQueue.class, "countAdded", long.class); ++ private static final VarHandle COUNT_REMOVED_HANDLE = ConcurrentUtil.getVarHandle(CountedSRSWLinkedQueue.class, "countRemoved", long.class); ++ ++ private long getCountAddedPlain() { ++ return (long)COUNT_ADDED_HANDLE.get(this); ++ } ++ ++ private long getCountAddedAcquire() { ++ return (long)COUNT_ADDED_HANDLE.getAcquire(this); ++ } ++ ++ private void setCountAddedRelease(final long to) { ++ COUNT_ADDED_HANDLE.setRelease(this, to); ++ } ++ ++ private long getCountRemovedPlain() { ++ return (long)COUNT_REMOVED_HANDLE.get(this); ++ } ++ ++ private long getCountRemovedAcquire() { ++ return (long)COUNT_REMOVED_HANDLE.getAcquire(this); ++ } ++ ++ private void setCountRemovedRelease(final long to) { ++ COUNT_REMOVED_HANDLE.setRelease(this, to); ++ } ++ ++ public void add(final E element) { ++ this.setCountAddedRelease(this.getCountAddedPlain() + 1L); ++ this.queue.addLast(element); ++ } ++ ++ public E poll() { ++ final E ret = this.queue.poll(); ++ if (ret != null) { ++ this.setCountRemovedRelease(this.getCountRemovedPlain() + 1L); ++ } ++ ++ return ret; ++ } ++ ++ public long size() { ++ final long removed = this.getCountRemovedAcquire(); ++ final long added = this.getCountAddedAcquire(); ++ ++ return added - removed; ++ } ++ } ++} +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java +index 748cc48c6c42c694d1c9b685e96fbe6d8337d3f3..ce26fdfa1afc74ba93d19157042f6c55778011e1 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java +@@ -1,5 +1,6 @@ + package io.papermc.paper.chunk.system.scheduling; + ++import ca.spottedleaf.concurrentutil.collection.MultiThreadedQueue; + import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; + import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable; + import co.aikar.timings.Timing; +@@ -500,6 +501,21 @@ public final class ChunkHolderManager { + } + } + ++ // atomic with respect to all add/remove/addandremove ticket calls for the given chunk ++ public boolean addIfRemovedTicket(final long chunk, final TicketType addType, final int addLevel, final T addIdentifier, ++ final TicketType removeType, final int removeLevel, final V removeIdentifier) { ++ this.ticketLock.lock(); ++ try { ++ if (this.removeTicketAtLevel(removeType, chunk, removeLevel, removeIdentifier)) { ++ this.addTicketAtLevel(addType, chunk, addLevel, addIdentifier); ++ return true; ++ } ++ return false; ++ } finally { ++ this.ticketLock.unlock(); ++ } ++ } ++ + public void removeAllTicketsFor(final TicketType ticketType, final int ticketLevel, final T ticketIdentifier) { + if (ticketLevel > MAX_TICKET_LEVEL) { + return; +@@ -907,6 +923,142 @@ public final class ChunkHolderManager { + } + } + ++ public enum TicketOperationType { ++ ADD, REMOVE, ADD_IF_REMOVED, ADD_AND_REMOVE ++ } ++ ++ public static record TicketOperation ( ++ TicketOperationType op, long chunkCoord, ++ TicketType ticketType, int ticketLevel, T identifier, ++ TicketType ticketType2, int ticketLevel2, V identifier2 ++ ) { ++ ++ private TicketOperation(TicketOperationType op, long chunkCoord, ++ TicketType ticketType, int ticketLevel, T identifier) { ++ this(op, chunkCoord, ticketType, ticketLevel, identifier, null, 0, null); ++ } ++ ++ public static TicketOperation addOp(final ChunkPos chunk, final TicketType type, final int ticketLevel, final T identifier) { ++ return addOp(CoordinateUtils.getChunkKey(chunk), type, ticketLevel, identifier); ++ } ++ ++ public static TicketOperation addOp(final int chunkX, final int chunkZ, final TicketType type, final int ticketLevel, final T identifier) { ++ return addOp(CoordinateUtils.getChunkKey(chunkX, chunkZ), type, ticketLevel, identifier); ++ } ++ ++ public static TicketOperation addOp(final long chunk, final TicketType type, final int ticketLevel, final T identifier) { ++ return new TicketOperation<>(TicketOperationType.ADD, chunk, type, ticketLevel, identifier); ++ } ++ ++ public static TicketOperation removeOp(final ChunkPos chunk, final TicketType type, final int ticketLevel, final T identifier) { ++ return removeOp(CoordinateUtils.getChunkKey(chunk), type, ticketLevel, identifier); ++ } ++ ++ public static TicketOperation removeOp(final int chunkX, final int chunkZ, final TicketType type, final int ticketLevel, final T identifier) { ++ return removeOp(CoordinateUtils.getChunkKey(chunkX, chunkZ), type, ticketLevel, identifier); ++ } ++ ++ public static TicketOperation removeOp(final long chunk, final TicketType type, final int ticketLevel, final T identifier) { ++ return new TicketOperation<>(TicketOperationType.REMOVE, chunk, type, ticketLevel, identifier); ++ } ++ ++ public static TicketOperation addIfRemovedOp(final long chunk, ++ final TicketType addType, final int addLevel, final T addIdentifier, ++ final TicketType removeType, final int removeLevel, final V removeIdentifier) { ++ return new TicketOperation<>( ++ TicketOperationType.ADD_IF_REMOVED, chunk, addType, addLevel, addIdentifier, ++ removeType, removeLevel, removeIdentifier ++ ); ++ } ++ ++ public static TicketOperation addAndRemove(final long chunk, ++ final TicketType addType, final int addLevel, final T addIdentifier, ++ final TicketType removeType, final int removeLevel, final V removeIdentifier) { ++ return new TicketOperation<>( ++ TicketOperationType.ADD_AND_REMOVE, chunk, addType, addLevel, addIdentifier, ++ removeType, removeLevel, removeIdentifier ++ ); ++ } ++ } ++ ++ private final MultiThreadedQueue> delayedTicketUpdates = new MultiThreadedQueue<>(); ++ ++ // note: MUST hold ticket lock, otherwise operation ordering is lost ++ private boolean drainTicketUpdates() { ++ boolean ret = false; ++ ++ TicketOperation operation; ++ while ((operation = this.delayedTicketUpdates.poll()) != null) { ++ switch (operation.op) { ++ case ADD: { ++ ret |= this.addTicketAtLevel(operation.ticketType, operation.chunkCoord, operation.ticketLevel, operation.identifier); ++ break; ++ } ++ case REMOVE: { ++ ret |= this.removeTicketAtLevel(operation.ticketType, operation.chunkCoord, operation.ticketLevel, operation.identifier); ++ break; ++ } ++ case ADD_IF_REMOVED: { ++ ret |= this.addIfRemovedTicket( ++ operation.chunkCoord, ++ operation.ticketType, operation.ticketLevel, operation.identifier, ++ operation.ticketType2, operation.ticketLevel2, operation.identifier2 ++ ); ++ break; ++ } ++ case ADD_AND_REMOVE: { ++ ret = true; ++ this.addAndRemoveTickets( ++ operation.chunkCoord, ++ operation.ticketType, operation.ticketLevel, operation.identifier, ++ operation.ticketType2, operation.ticketLevel2, operation.identifier2 ++ ); ++ break; ++ } ++ } ++ } ++ ++ return ret; ++ } ++ ++ public Boolean tryDrainTicketUpdates() { ++ final boolean acquired = this.ticketLock.tryLock(); ++ try { ++ if (!acquired) { ++ return null; ++ } ++ ++ return Boolean.valueOf(this.drainTicketUpdates()); ++ } finally { ++ if (acquired) { ++ this.ticketLock.unlock(); ++ } ++ } ++ } ++ ++ public void pushDelayedTicketUpdate(final TicketOperation operation) { ++ this.delayedTicketUpdates.add(operation); ++ } ++ ++ public void pushDelayedTicketUpdates(final Collection> operations) { ++ this.delayedTicketUpdates.addAll(operations); ++ } ++ ++ public Boolean tryProcessTicketUpdates() { ++ final boolean acquired = this.ticketLock.tryLock(); ++ try { ++ if (!acquired) { ++ return null; ++ } ++ ++ return Boolean.valueOf(this.processTicketUpdates(false, true, null)); ++ } finally { ++ if (acquired) { ++ this.ticketLock.unlock(); ++ } ++ } ++ } ++ + private final ThreadLocal BLOCK_TICKET_UPDATES = ThreadLocal.withInitial(() -> { + return Boolean.FALSE; + }); +@@ -953,6 +1105,8 @@ public final class ChunkHolderManager { + + this.ticketLock.lock(); + try { ++ this.drainTicketUpdates(); ++ + final boolean levelsUpdated = this.ticketLevelPropagator.propagateUpdates(); + if (levelsUpdated) { + // Unlike CB, ticket level updates cannot happen recursively. Thank god. +diff --git a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java +index 52b02cb1f02d1c65b840f38cfc8baee500aa2259..09234062090c210227350cafeed141f8cb73108a 100644 +--- a/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java ++++ b/src/main/java/io/papermc/paper/configuration/GlobalConfiguration.java +@@ -274,4 +274,43 @@ public class GlobalConfiguration extends ConfigurationPart { + public boolean useDimensionTypeForCustomSpawners = false; + public boolean strictAdvancementDimensionCheck = false; + } ++ ++ public ChunkLoadingBasic chunkLoadingBasic; ++ ++ public class ChunkLoadingBasic extends ConfigurationPart { ++ @Comment("The maximum rate in chunks per second that the server will send to any individual player. Set to -1 to disable this limit.") ++ public double playerMaxChunkSendRate = 75.0; ++ ++ @Comment( ++ "The maximum rate at which chunks will load for any individual player. " + ++ "Note that this setting also affects chunk generations, since a chunk load is always first issued to test if a" + ++ "chunk is already generated. Set to -1 to disable this limit." ++ ) ++ public double playerMaxChunkLoadRate = 100.0; ++ ++ @Comment("The maximum rate at which chunks will generate for any individual player. Set to -1 to disable this limit.") ++ public double playerMaxChunkGenerateRate = -1.0; ++ } ++ ++ public ChunkLoadingAdvanced chunkLoadingAdvanced; ++ ++ public class ChunkLoadingAdvanced extends ConfigurationPart { ++ @Comment( ++ "Set to true if the server will match the chunk send radius that clients have configured" + ++ "in their view distance settings if the client is less-than the server's send distance." ++ ) ++ public boolean autoConfigSendDistance = true; ++ ++ @Comment( ++ "Specifies the maximum amount of concurrent chunk loads that an individual player can have." + ++ "Set to 0 to let the server configure it automatically per player, or set it to -1 to disable the limit." ++ ) ++ public int playerMaxConcurrentChunkLoads = 0; ++ ++ @Comment( ++ "Specifies the maximum amount of concurrent chunk generations that an individual player can have." + ++ "Set to 0 to let the server configure it automatically per player, or set it to -1 to disable the limit." ++ ) ++ public int playerMaxConcurrentChunkGenerates = 0; ++ } + } +diff --git a/src/main/java/io/papermc/paper/util/IntervalledCounter.java b/src/main/java/io/papermc/paper/util/IntervalledCounter.java +index cea9c098ade00ee87b8efc8164ab72f5279758f0..197224e31175252d8438a8df585bbb65f2288d7f 100644 +--- a/src/main/java/io/papermc/paper/util/IntervalledCounter.java ++++ b/src/main/java/io/papermc/paper/util/IntervalledCounter.java +@@ -2,6 +2,8 @@ package io.papermc.paper.util; + + public final class IntervalledCounter { + ++ private static final int INITIAL_SIZE = 8; ++ + protected long[] times; + protected long[] counts; + protected final long interval; +@@ -11,8 +13,8 @@ public final class IntervalledCounter { + protected int tail; // exclusive + + public IntervalledCounter(final long interval) { +- this.times = new long[8]; +- this.counts = new long[8]; ++ this.times = new long[INITIAL_SIZE]; ++ this.counts = new long[INITIAL_SIZE]; + this.interval = interval; + } + +@@ -67,13 +69,13 @@ public final class IntervalledCounter { + this.tail = nextTail; + } + +- public void updateAndAdd(final int count) { ++ public void updateAndAdd(final long count) { + final long currTime = System.nanoTime(); + this.updateCurrentTime(currTime); + this.addTime(currTime, count); + } + +- public void updateAndAdd(final int count, final long currTime) { ++ public void updateAndAdd(final long count, final long currTime) { + this.updateCurrentTime(currTime); + this.addTime(currTime, count); + } +@@ -93,9 +95,13 @@ public final class IntervalledCounter { + this.tail = size; + + if (tail >= head) { ++ // sequentially ordered from [head, tail) + System.arraycopy(oldElements, head, newElements, 0, size); + System.arraycopy(oldCounts, head, newCounts, 0, size); + } else { ++ // ordered from [head, length) ++ // then followed by [0, tail) ++ + System.arraycopy(oldElements, head, newElements, 0, oldElements.length - head); + System.arraycopy(oldElements, 0, newElements, oldElements.length - head, tail); + +@@ -106,10 +112,18 @@ public final class IntervalledCounter { + + // returns in units per second + public double getRate() { +- return this.size() / (this.interval * 1.0e-9); ++ return (double)this.sum / ((double)this.interval * 1.0E-9); ++ } ++ ++ public long getInterval() { ++ return this.interval; + } + +- public long size() { ++ public long getSum() { + return this.sum; + } ++ ++ public int totalDataPoints() { ++ return this.tail >= this.head ? (this.tail - this.head) : (this.tail + (this.counts.length - this.head)); ++ } + } +diff --git a/src/main/java/io/papermc/paper/util/MCUtil.java b/src/main/java/io/papermc/paper/util/MCUtil.java +index d58d44faa40be2421f4cb54740a3abdbad72875c..2e830847155e7c43ef411d28e81592c21446143b 100644 +--- a/src/main/java/io/papermc/paper/util/MCUtil.java ++++ b/src/main/java/io/papermc/paper/util/MCUtil.java +@@ -583,8 +583,8 @@ public final class MCUtil { + + worldData.addProperty("is-loaded", loadedWorlds.contains(bukkitWorld)); + worldData.addProperty("name", world.getWorld().getName()); +- worldData.addProperty("view-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance()); // Paper - replace chunk loader system +- worldData.addProperty("tick-view-distance", world.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance()); // Paper - replace chunk loader system ++ worldData.addProperty("view-distance", world.getWorld().getViewDistance()); // Paper - replace chunk loader system ++ worldData.addProperty("tick-view-distance", world.getWorld().getSimulationDistance()); // Paper - replace chunk loader system + worldData.addProperty("keep-spawn-loaded", world.keepSpawnInMemory); + worldData.addProperty("keep-spawn-loaded-range", world.paperConfig().spawn.keepSpawnLoadedRange * 16); + +diff --git a/src/main/java/net/minecraft/server/level/ChunkHolder.java b/src/main/java/net/minecraft/server/level/ChunkHolder.java +index 6b6d31e76f48a0de33ac528f990ce841dbd666f1..4b87f5d899e5ac033d78ccdbca21c9c50c46dcef 100644 +--- a/src/main/java/net/minecraft/server/level/ChunkHolder.java ++++ b/src/main/java/net/minecraft/server/level/ChunkHolder.java +@@ -96,6 +96,25 @@ public class ChunkHolder { + + public final io.papermc.paper.chunk.system.scheduling.NewChunkHolder newChunkHolder; // Paper - rewrite chunk system + ++ // Paper start - replace player chunk loader ++ private final com.destroystokyo.paper.util.maplist.ReferenceList playersSentChunkTo = new com.destroystokyo.paper.util.maplist.ReferenceList<>(); ++ ++ public void addPlayer(ServerPlayer player) { ++ if (!this.playersSentChunkTo.add(player)) { ++ throw new IllegalStateException("Already sent chunk " + this.pos + " in world '" + this.chunkMap.level.getWorld().getName() + "' to player " + player); ++ } ++ } ++ ++ public void removePlayer(ServerPlayer player) { ++ if (!this.playersSentChunkTo.remove(player)) { ++ throw new IllegalStateException("Have not sent chunk " + this.pos + " in world '" + this.chunkMap.level.getWorld().getName() + "' to player " + player); ++ } ++ } ++ ++ public boolean hasChunkBeenSent() { ++ return this.playersSentChunkTo.size() != 0; ++ } ++ // Paper end - replace player chunk loader + public ChunkHolder(ChunkPos pos, LevelHeightAccessor world, LevelLightEngine lightingProvider, ChunkHolder.PlayerProvider playersWatchingChunkProvider, io.papermc.paper.chunk.system.scheduling.NewChunkHolder newChunkHolder) { // Paper - rewrite chunk system + this.newChunkHolder = newChunkHolder; // Paper - rewrite chunk system + this.chunkToSaveHistory = null; +@@ -193,6 +212,11 @@ public class ChunkHolder { + // Paper - rewrite chunk system + + public void blockChanged(BlockPos pos) { ++ // Paper start - replace player chunk loader ++ if (this.playersSentChunkTo.size() == 0) { ++ return; ++ } ++ // Paper end - replace player chunk loader + LevelChunk chunk = this.getSendingChunk(); // Paper - no-tick view distance + + if (chunk != null) { +@@ -219,7 +243,7 @@ public class ChunkHolder { + LevelChunk chunk = this.getSendingChunk(); + // Paper end - no-tick view distance + +- if (chunk != null) { ++ if (this.playersSentChunkTo.size() != 0 && chunk != null) { // Paper - replace player chunk loader + int j = this.lightEngine.getMinLightSection(); + int k = this.lightEngine.getMaxLightSection(); + +@@ -316,25 +340,12 @@ public class ChunkHolder { + + // Paper start - rewrite chunk system + public List getPlayers(boolean onlyOnWatchDistanceEdge){ +- // Paper start - per player view distance + List ret = new java.util.ArrayList<>(); +- // there can be potential desync with player's last mapped section and the view distance map, so use the +- // view distance map here. +- com.destroystokyo.paper.util.misc.PlayerAreaMap viewDistanceMap = this.chunkMap.playerChunkManager.broadcastMap; // Paper - replace old player chunk manager +- com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet players = viewDistanceMap.getObjectsInRange(this.pos); +- if (players == null) { +- return ret; +- } +- +- Object[] backingSet = players.getBackingSet(); +- for (int i = 0, len = backingSet.length; i < len; ++i) { +- if (!(backingSet[i] instanceof ServerPlayer player)) { ++ for (int i = 0, len = this.playersSentChunkTo.size(); i < len; ++i) { ++ ServerPlayer player = this.playersSentChunkTo.getUnchecked(i); ++ if (onlyOnWatchDistanceEdge && !this.chunkMap.level.playerChunkLoader.isChunkSent(player, this.pos.x, this.pos.z, onlyOnWatchDistanceEdge)) { + continue; + } +- if (!this.chunkMap.playerChunkManager.isChunkSent(player, this.pos.x, this.pos.z, onlyOnWatchDistanceEdge)) { +- continue; +- } +- + ret.add(player); + } + +diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java +index 284393fd4ae0a7562b6bc9b60cf2c141a2de3a58..a502d293cedb2f507e6cf1792429b36685ed1910 100644 +--- a/src/main/java/net/minecraft/server/level/ChunkMap.java ++++ b/src/main/java/net/minecraft/server/level/ChunkMap.java +@@ -156,17 +156,16 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + + // Paper start - distance maps + private final com.destroystokyo.paper.util.misc.PooledLinkedHashSets pooledLinkedPlayerHashSets = new com.destroystokyo.paper.util.misc.PooledLinkedHashSets<>(); +- public final io.papermc.paper.chunk.PlayerChunkLoader playerChunkManager = new io.papermc.paper.chunk.PlayerChunkLoader(this, this.pooledLinkedPlayerHashSets); // Paper - replace chunk loader + + void addPlayerToDistanceMaps(ServerPlayer player) { +- this.playerChunkManager.addPlayer(player); // Paper - replace chunk loader ++ this.level.playerChunkLoader.addPlayer(player); // Paper - replace chunk loader + int chunkX = MCUtil.getChunkCoordinate(player.getX()); + int chunkZ = MCUtil.getChunkCoordinate(player.getZ()); + // Note: players need to be explicitly added to distance maps before they can be updated + } + + void removePlayerFromDistanceMaps(ServerPlayer player) { +- this.playerChunkManager.removePlayer(player); // Paper - replace chunk loader ++ this.level.playerChunkLoader.removePlayer(player); // Paper - replace chunk loader + + } + +@@ -174,7 +173,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + int chunkX = MCUtil.getChunkCoordinate(player.getX()); + int chunkZ = MCUtil.getChunkCoordinate(player.getZ()); + // Note: players need to be explicitly added to distance maps before they can be updated +- this.playerChunkManager.updatePlayer(player); // Paper - replace chunk loader ++ this.level.playerChunkLoader.updatePlayer(player); // Paper - replace chunk loader + } + // Paper end + // Paper start +@@ -573,7 +572,11 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + + // Paper start - replace player loader system + public void setTickViewDistance(int distance) { +- this.playerChunkManager.setTickDistance(distance); ++ this.level.playerChunkLoader.setTickDistance(distance); ++ } ++ ++ public void setSendViewDistance(int distance) { ++ this.level.playerChunkLoader.setSendDistance(distance); + } + // Paper end - replace player loader system + public void setViewDistance(int watchDistance) { +@@ -583,20 +586,24 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + int k = this.viewDistance; + + this.viewDistance = j; +- this.playerChunkManager.setLoadDistance(this.viewDistance); // Paper - replace player loader system ++ this.level.playerChunkLoader.setLoadDistance(this.viewDistance); // Paper - replace player loader system + } + + } + + public void updateChunkTracking(ServerPlayer player, ChunkPos pos, MutableObject packet, boolean oldWithinViewDistance, boolean newWithinViewDistance) { // Paper - public ++ io.papermc.paper.util.TickThread.ensureTickThread(this.level, pos, "May not update chunk tracking for chunk async"); // Paper - replace chunk loader system ++ io.papermc.paper.util.TickThread.ensureTickThread(player, "May not update chunk tracking for player async"); // Paper - replace chunk loader system + if (player.level() == this.level) { ++ ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos.toLong()); // Paper - replace chunk loader system - move up + if (newWithinViewDistance && !oldWithinViewDistance) { +- ChunkHolder playerchunk = this.getVisibleChunkIfPresent(pos.toLong()); ++ // Paper - replace chunk loader system - move up + + if (playerchunk != null) { + LevelChunk chunk = playerchunk.getSendingChunk(); // Paper - replace chunk loader system + + if (chunk != null) { ++ playerchunk.addPlayer(player); // Paper - replace chunk loader system + this.playerLoadedChunk(player, packet, chunk); + } + +@@ -605,10 +612,17 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + } + + if (!newWithinViewDistance && oldWithinViewDistance) { ++ // Paper start - replace chunk loader system ++ if (playerchunk != null) { ++ playerchunk.removePlayer(player); ++ } else { ++ LOGGER.warn("ChunkHolder at " + pos + " in world '" + this.level.getWorld().getName() + "' does not exist to untrack chunk for " + player, new Throwable()); ++ } ++ // Paper end - replace chunk loader system + player.untrackChunk(pos); + } + +- } ++ } else { LOGGER.warn("Mismatch in world for chunk " + pos + " in world '" + this.level.getWorld().getName() + "' for player " + player, new Throwable()); } // Paper - replace chunk loader system + } + + public int size() { +@@ -842,34 +856,18 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + // Paper - replaced by PlayerChunkLoader + + this.updateMaps(player); // Paper - distance maps +- this.playerChunkManager.updatePlayer(player); // Paper - respond to movement immediately + + } + + @Override + public List getPlayers(ChunkPos chunkPos, boolean onlyOnWatchDistanceEdge) { + // Paper start - per player view distance +- // there can be potential desync with player's last mapped section and the view distance map, so use the +- // view distance map here. +- com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet players = this.playerChunkManager.broadcastMap.getObjectsInRange(chunkPos); +- if (players == null) { +- return java.util.Collections.emptyList(); +- } +- +- List ret = new java.util.ArrayList<>(players.size()); +- +- Object[] backingSet = players.getBackingSet(); +- for (int i = 0, len = backingSet.length; i < len; ++i) { +- if (!(backingSet[i] instanceof ServerPlayer player)) { +- continue; +- } +- if (!this.playerChunkManager.isChunkSent(player, chunkPos.x, chunkPos.z, onlyOnWatchDistanceEdge)) { +- continue; +- } +- ret.add(player); ++ ChunkHolder holder = this.getVisibleChunkIfPresent(chunkPos.toLong()); ++ if (holder == null) { ++ return new java.util.ArrayList<>(); ++ } else { ++ return holder.getPlayers(onlyOnWatchDistanceEdge); + } +- +- return ret; + // Paper end - per player view distance + } + +@@ -1179,7 +1177,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider + org.spigotmc.AsyncCatcher.catchOp("player tracker update"); // Spigot + if (player != this.entity) { + Vec3 vec3d = player.position().subtract(this.entity.position()); +- double d0 = (double) Math.min(this.getEffectiveRange(), io.papermc.paper.chunk.PlayerChunkLoader.getSendViewDistance(player) * 16); // Paper - per player view distance ++ double d0 = (double) Math.min(this.getEffectiveRange(), io.papermc.paper.chunk.system.ChunkSystem.getSendViewDistance(player) * 16); // Paper - per player view distance + double d1 = vec3d.x * vec3d.x + vec3d.z * vec3d.z; + double d2 = d0 * d0; + boolean flag = d1 <= d2 && this.entity.broadcastToPlayer(player); +diff --git a/src/main/java/net/minecraft/server/level/DistanceManager.java b/src/main/java/net/minecraft/server/level/DistanceManager.java +index 32a07573ee23f01c98e684aefb45ff72802c9db6..20d600d29c2f2e47c798721d1f151e625b12acc3 100644 +--- a/src/main/java/net/minecraft/server/level/DistanceManager.java ++++ b/src/main/java/net/minecraft/server/level/DistanceManager.java +@@ -178,17 +178,17 @@ public abstract class DistanceManager { + } + + protected void updatePlayerTickets(int viewDistance) { +- this.chunkMap.playerChunkManager.setTargetNoTickViewDistance(viewDistance); // Paper - route to player chunk manager ++ this.chunkMap.setViewDistance(viewDistance);// Paper - route to player chunk manager + } + + // Paper start + public int getSimulationDistance() { +- return this.chunkMap.playerChunkManager.getTargetTickViewDistance(); // Paper - route to player chunk manager ++ return this.chunkMap.level.playerChunkLoader.getAPITickDistance(); + } + // Paper end + + public void updateSimulationDistance(int simulationDistance) { +- this.chunkMap.playerChunkManager.setTargetTickViewDistance(simulationDistance); // Paper - route to player chunk manager ++ this.chunkMap.level.playerChunkLoader.setTickDistance(simulationDistance); // Paper - route to player chunk manager + } + + public int getNaturalSpawnChunkCount() { +diff --git a/src/main/java/net/minecraft/server/level/ServerChunkCache.java b/src/main/java/net/minecraft/server/level/ServerChunkCache.java +index bb7b7f904639ac964f9c2d1bd5a660d8b397f647..caff28e2446177d622c999b84d8889fbf61d0b3d 100644 +--- a/src/main/java/net/minecraft/server/level/ServerChunkCache.java ++++ b/src/main/java/net/minecraft/server/level/ServerChunkCache.java +@@ -661,7 +661,7 @@ public class ServerChunkCache extends ChunkSource { + this.level.getProfiler().popPush("chunks"); + if (tickChunks) { + this.level.timings.chunks.startTiming(); // Paper - timings +- this.chunkMap.playerChunkManager.tick(); // Paper - this is mostly is to account for view distance changes ++ this.chunkMap.level.playerChunkLoader.tick(); // Paper - replace player chunk loader - this is mostly required to account for view distance changes + this.tickChunks(); + this.level.timings.chunks.stopTiming(); // Paper - timings + } +@@ -929,7 +929,7 @@ public class ServerChunkCache extends ChunkSource { + @Override + // CraftBukkit start - process pending Chunk loadCallback() and unloadCallback() after each run task + public boolean pollTask() { +- ServerChunkCache.this.chunkMap.playerChunkManager.tickMidTick(); ++ // Paper - replace player chunk loader + if (ServerChunkCache.this.runDistanceManagerUpdates()) { + return true; + } +diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java +index d7172df2489f2eb325120d950dcff32cc483db56..62a95a0fac59683948f34b202e6e3859b6652d6d 100644 +--- a/src/main/java/net/minecraft/server/level/ServerLevel.java ++++ b/src/main/java/net/minecraft/server/level/ServerLevel.java +@@ -423,6 +423,48 @@ public class ServerLevel extends Level implements WorldGenLevel { + } + // Paper end - rewrite chunk system + ++ public final io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader playerChunkLoader = new io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader(this); ++ private final java.util.concurrent.atomic.AtomicReference viewDistances = new java.util.concurrent.atomic.AtomicReference<>(new io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.ViewDistances(-1, -1, -1)); ++ ++ public io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.ViewDistances getViewDistances() { ++ return this.viewDistances.get(); ++ } ++ ++ private void updateViewDistance(final java.util.function.Function update) { ++ for (io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.ViewDistances curr = this.viewDistances.get();;) { ++ if (this.viewDistances.compareAndSet(curr, update.apply(curr))) { ++ return; ++ } ++ } ++ } ++ ++ public void setTickViewDistance(final int distance) { ++ if ((distance < io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE)) { ++ throw new IllegalArgumentException("Tick view distance must be a number between " + io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE) + ", got: " + distance); ++ } ++ this.updateViewDistance((input) -> { ++ return input.setTickViewDistance(distance); ++ }); ++ } ++ ++ public void setLoadViewDistance(final int distance) { ++ if (distance != -1 && (distance < io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1)) { ++ throw new IllegalArgumentException("Load view distance must be a number between " + io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance); ++ } ++ this.updateViewDistance((input) -> { ++ return input.setLoadViewDistance(distance); ++ }); ++ } ++ ++ public void setSendViewDistance(final int distance) { ++ if (distance != -1 && (distance < io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1)) { ++ throw new IllegalArgumentException("Send view distance must be a number between " + io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance); ++ } ++ this.updateViewDistance((input) -> { ++ return input.setSendViewDistance(distance); ++ }); ++ } ++ + // Add env and gen to constructor, IWorldDataServer -> WorldDataServer + public ServerLevel(MinecraftServer minecraftserver, Executor executor, LevelStorageSource.LevelStorageAccess convertable_conversionsession, PrimaryLevelData iworlddataserver, ResourceKey resourcekey, LevelStem worlddimension, ChunkProgressListener worldloadlistener, boolean flag, long i, List list, boolean flag1, @Nullable RandomSequences randomsequences, org.bukkit.World.Environment env, org.bukkit.generator.ChunkGenerator gen, org.bukkit.generator.BiomeProvider biomeProvider) { + // IRegistryCustom.Dimension iregistrycustom_dimension = minecraftserver.registryAccess(); // CraftBukkit - decompile error +diff --git a/src/main/java/net/minecraft/server/level/ServerPlayer.java b/src/main/java/net/minecraft/server/level/ServerPlayer.java +index 5872ead2fe3a64f02f8bc36603fbb856728fd255..32ef9f1ae0c35e927133572ebb6fbf50b0729a63 100644 +--- a/src/main/java/net/minecraft/server/level/ServerPlayer.java ++++ b/src/main/java/net/minecraft/server/level/ServerPlayer.java +@@ -260,6 +260,48 @@ public class ServerPlayer extends Player { + public boolean isRealPlayer; // Paper + public final com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet cachedSingleHashSet; // Paper + ++ private final java.util.concurrent.atomic.AtomicReference viewDistances = new java.util.concurrent.atomic.AtomicReference<>(new io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.ViewDistances(-1, -1, -1)); ++ public io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.PlayerChunkLoaderData chunkLoader; ++ ++ public io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.ViewDistances getViewDistances() { ++ return this.viewDistances.get(); ++ } ++ ++ private void updateViewDistance(final java.util.function.Function update) { ++ for (io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.ViewDistances curr = this.viewDistances.get();;) { ++ if (this.viewDistances.compareAndSet(curr, update.apply(curr))) { ++ return; ++ } ++ } ++ } ++ ++ public void setTickViewDistance(final int distance) { ++ if ((distance < io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE)) { ++ throw new IllegalArgumentException("Tick view distance must be a number between " + io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE) + ", got: " + distance); ++ } ++ this.updateViewDistance((input) -> { ++ return input.setTickViewDistance(distance); ++ }); ++ } ++ ++ public void setLoadViewDistance(final int distance) { ++ if (distance != -1 && (distance < io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1)) { ++ throw new IllegalArgumentException("Load view distance must be a number between " + io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance); ++ } ++ this.updateViewDistance((input) -> { ++ return input.setLoadViewDistance(distance); ++ }); ++ } ++ ++ public void setSendViewDistance(final int distance) { ++ if (distance != -1 && (distance < io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE || distance > io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1)) { ++ throw new IllegalArgumentException("Send view distance must be a number between " + io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MIN_VIEW_DISTANCE + " and " + (io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.MAX_VIEW_DISTANCE + 1) + " or -1, got: " + distance); ++ } ++ this.updateViewDistance((input) -> { ++ return input.setSendViewDistance(distance); ++ }); ++ } ++ + public ServerPlayer(MinecraftServer server, ServerLevel world, GameProfile profile) { + super(world, world.getSharedSpawnPos(), world.getSharedSpawnAngle(), profile); + this.chatVisibility = ChatVisiblity.FULL; +diff --git a/src/main/java/net/minecraft/server/players/PlayerList.java b/src/main/java/net/minecraft/server/players/PlayerList.java +index d4f44635b3cb7ac890ea89b6f4454fa9d4375c08..0338a6b245ee482d470f5a80da712679ab9890fa 100644 +--- a/src/main/java/net/minecraft/server/players/PlayerList.java ++++ b/src/main/java/net/minecraft/server/players/PlayerList.java +@@ -262,7 +262,7 @@ public abstract class PlayerList { + boolean flag1 = gamerules.getBoolean(GameRules.RULE_REDUCEDDEBUGINFO); + + // Spigot - view distance +- playerconnection.send(new ClientboundLoginPacket(player.getId(), worlddata.isHardcore(), player.gameMode.getGameModeForPlayer(), player.gameMode.getPreviousGameModeForPlayer(), this.server.levelKeys(), this.synchronizedRegistries, worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), this.getMaxPlayers(), worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance(), worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance(), flag1, !flag, worldserver1.isDebug(), worldserver1.isFlat(), player.getLastDeathLocation(), player.getPortalCooldown())); // Paper - replace old player chunk management ++ playerconnection.send(new ClientboundLoginPacket(player.getId(), worlddata.isHardcore(), player.gameMode.getGameModeForPlayer(), player.gameMode.getPreviousGameModeForPlayer(), this.server.levelKeys(), this.synchronizedRegistries, worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), this.getMaxPlayers(), worldserver1.getWorld().getSendViewDistance(), worldserver1.getWorld().getSimulationDistance(), flag1, !flag, worldserver1.isDebug(), worldserver1.isFlat(), player.getLastDeathLocation(), player.getPortalCooldown())); // Paper - replace old player chunk management + player.getBukkitEntity().sendSupportedChannels(); // CraftBukkit + playerconnection.send(new ClientboundUpdateEnabledFeaturesPacket(FeatureFlags.REGISTRY.toNames(worldserver1.enabledFeatures()))); + playerconnection.send(new ClientboundCustomPayloadPacket(ClientboundCustomPayloadPacket.BRAND, (new FriendlyByteBuf(Unpooled.buffer())).writeUtf(this.getServer().getServerModName()))); +@@ -800,8 +800,8 @@ public abstract class PlayerList { + // CraftBukkit start + LevelData worlddata = worldserver1.getLevelData(); + entityplayer1.connection.send(new ClientboundRespawnPacket(worldserver1.dimensionTypeId(), worldserver1.dimension(), BiomeManager.obfuscateSeed(worldserver1.getSeed()), entityplayer1.gameMode.getGameModeForPlayer(), entityplayer1.gameMode.getPreviousGameModeForPlayer(), worldserver1.isDebug(), worldserver1.isFlat(), (byte) i, entityplayer1.getLastDeathLocation(), entityplayer1.getPortalCooldown())); +- entityplayer1.connection.send(new ClientboundSetChunkCacheRadiusPacket(worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance())); // Spigot // Paper - replace old player chunk management +- entityplayer1.connection.send(new ClientboundSetSimulationDistancePacket(worldserver1.getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance())); // Spigot // Paper - replace old player chunk management ++ entityplayer1.connection.send(new ClientboundSetChunkCacheRadiusPacket(worldserver1.getWorld().getSendViewDistance())); // Spigot // Paper - replace old player chunk management ++ entityplayer1.connection.send(new ClientboundSetSimulationDistancePacket(worldserver1.getWorld().getSimulationDistance())); // Spigot // Paper - replace old player chunk management + entityplayer1.spawnIn(worldserver1); + entityplayer1.unsetRemoved(); + entityplayer1.connection.teleport(CraftLocation.toBukkit(entityplayer1.position(), worldserver1.getWorld(), entityplayer1.getYRot(), entityplayer1.getXRot())); +diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java +index 0276c32ef8323bcf82eb3400bb003a93b8a56de0..5988c0847af4e8f0094328e91f736f25d567db60 100644 +--- a/src/main/java/net/minecraft/world/level/Level.java ++++ b/src/main/java/net/minecraft/world/level/Level.java +@@ -455,7 +455,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { + this.sendBlockUpdated(blockposition, iblockdata1, iblockdata, i); + // Paper start - per player view distance - allow block updates for non-ticking chunks in player view distance + // if copied from above +- } else if ((i & 2) != 0 && (!this.isClientSide || (i & 4) == 0) && (this.isClientSide || chunk == null || ((ServerLevel)this).getChunkSource().chunkMap.playerChunkManager.broadcastMap.getObjectsInRange(io.papermc.paper.util.MCUtil.getCoordinateKey(blockposition)) != null)) { // Paper - replace old player chunk management ++ } else if ((i & 2) != 0 && (!this.isClientSide || (i & 4) == 0)) { // Paper - replace old player chunk management + ((ServerLevel)this).getChunkSource().blockChanged(blockposition); + // Paper end - per player view distance + } +diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java +index 41754f6c8162df07e2b22f4ffcacd8b6158a864e..3da04db71d6f33b2f466c11e031e0a11c298379b 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java ++++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java +@@ -172,43 +172,6 @@ public class LevelChunk extends ChunkAccess { + + protected void onNeighbourChange(final long bitsetBefore, final long bitsetAfter) { + +- // Paper start - no-tick view distance +- ServerChunkCache chunkProviderServer = ((ServerLevel)this.level).getChunkSource(); +- net.minecraft.server.level.ChunkMap chunkMap = chunkProviderServer.chunkMap; +- // this code handles the addition of ticking tickets - the distance map handles the removal +- if (!areNeighboursLoaded(bitsetBefore, 2) && areNeighboursLoaded(bitsetAfter, 2)) { +- if (chunkMap.playerChunkManager.tickMap.getObjectsInRange(this.coordinateKey) != null) { // Paper - replace old player chunk loading system +- // now we're ready for entity ticking +- chunkProviderServer.mainThreadProcessor.execute(() -> { +- // double check that this condition still holds. +- if (LevelChunk.this.areNeighboursLoaded(2) && chunkMap.playerChunkManager.tickMap.getObjectsInRange(LevelChunk.this.coordinateKey) != null) { // Paper - replace old player chunk loading system +- chunkMap.playerChunkManager.onChunkPlayerTickReady(this.chunkPos.x, this.chunkPos.z); // Paper - replace old player chunk +- chunkProviderServer.addTicketAtLevel(net.minecraft.server.level.TicketType.PLAYER, LevelChunk.this.chunkPos, 31, LevelChunk.this.chunkPos); // 31 -> entity ticking, TODO check on update +- } +- }); +- } +- } +- +- // this code handles the chunk sending +- if (!areNeighboursLoaded(bitsetBefore, 1) && areNeighboursLoaded(bitsetAfter, 1)) { +- // Paper start - replace old player chunk loading system +- if (chunkMap.playerChunkManager.isChunkNearPlayers(this.chunkPos.x, this.chunkPos.z)) { +- // the post processing is expensive, so we don't want to run it unless we're actually near +- // a player. +- chunkProviderServer.mainThreadProcessor.execute(() -> { +- if (!LevelChunk.this.areNeighboursLoaded(1)) { +- return; +- } +- LevelChunk.this.postProcessGeneration(); +- if (!LevelChunk.this.areNeighboursLoaded(1)) { +- return; +- } +- chunkMap.playerChunkManager.onChunkSendReady(this.chunkPos.x, this.chunkPos.z); +- }); +- } +- // Paper end - replace old player chunk loading system +- } +- // Paper end - no-tick view distance + } + + public final boolean isAnyNeighborsLoaded() { +@@ -781,7 +744,6 @@ public class LevelChunk extends ChunkAccess { + // Paper - rewrite chunk system - move into separate callback + org.bukkit.Server server = this.level.getCraftServer(); + // Paper - rewrite chunk system - move into separate callback +- ((ServerLevel)this.level).getChunkSource().chunkMap.playerChunkManager.onChunkLoad(this.chunkPos.x, this.chunkPos.z); // Paper - rewrite player chunk management + if (server != null) { + /* + * If it's a new world, the first few chunks are generated inside +@@ -956,6 +918,7 @@ public class LevelChunk extends ChunkAccess { + BlockState iblockdata1 = Block.updateFromNeighbourShapes(iblockdata, this.level, blockposition); + + this.level.setBlock(blockposition, iblockdata1, 20); ++ if (iblockdata1 != iblockdata) this.level.chunkSource.blockChanged(blockposition); // Paper - replace player chunk loader - notify since we send before processing full updates + } + } + +@@ -975,7 +938,6 @@ public class LevelChunk extends ChunkAccess { + this.upgradeData.upgrade(this); + } finally { // Paper start - replace chunk loader system + this.isPostProcessingDone = true; +- this.level.getChunkSource().chunkMap.playerChunkManager.onChunkPostProcessing(this.chunkPos.x, this.chunkPos.z); + } + // Paper end - replace chunk loader system + } +diff --git a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java +index 49623627555cb2b18ea8f7e17d0f6c1db54c0be4..7c4d43096031a3c93d5f835922b19d5643005128 100644 +--- a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java ++++ b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java +@@ -1947,12 +1947,12 @@ public class CraftWorld extends CraftRegionAccessor implements World { + // Spigot start + @Override + public int getViewDistance() { +- return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetNoTickViewDistance(); // Paper - replace old player chunk management ++ return this.getHandle().playerChunkLoader.getAPIViewDistance(); // Paper - replace player chunk loader + } + + @Override + public int getSimulationDistance() { +- return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetTickViewDistance(); // Paper - replace old player chunk management ++ return this.getHandle().playerChunkLoader.getAPITickDistance(); // Paper - replace player chunk loader + } + // Spigot end + // Paper start - view distance api +@@ -1986,12 +1986,12 @@ public class CraftWorld extends CraftRegionAccessor implements World { + + @Override + public int getSendViewDistance() { +- return getHandle().getChunkSource().chunkMap.playerChunkManager.getTargetSendDistance(); ++ return this.getHandle().playerChunkLoader.getAPISendViewDistance(); // Paper - replace player chunk loader + } + + @Override + public void setSendViewDistance(int viewDistance) { +- getHandle().getChunkSource().chunkMap.playerChunkManager.setSendDistance(viewDistance); ++ this.getHandle().chunkSource.chunkMap.setSendViewDistance(viewDistance); // Paper - replace player chunk loader + } + // Paper end - view distance api + +diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +index 2f35f954eb2dcdc2de54b54c47c139908438e0f0..976eadd8200b2f4811d57b3c7fbd68cff1333924 100644 +--- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java ++++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +@@ -187,44 +187,22 @@ public class CraftPlayer extends CraftHumanEntity implements Player { + // Paper start - implement view distances + @Override + public int getViewDistance() { +- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; +- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); +- if (data == null) { +- return chunkMap.playerChunkManager.getTargetNoTickViewDistance(); +- } +- return data.getTargetNoTickViewDistance(); ++ return io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.getAPIViewDistance(this); + } + + @Override + public void setViewDistance(int viewDistance) { +- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; +- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); +- if (data == null) { +- throw new IllegalStateException("Player is not attached to world"); +- } +- +- data.setTargetNoTickViewDistance(viewDistance); ++ this.getHandle().setLoadViewDistance(viewDistance < 0 ? viewDistance : viewDistance + 1); + } + + @Override + public int getSimulationDistance() { +- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; +- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); +- if (data == null) { +- return chunkMap.playerChunkManager.getTargetTickViewDistance(); +- } +- return data.getTargetTickViewDistance(); ++ return io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.getAPITickViewDistance(this); + } + + @Override + public void setSimulationDistance(int simulationDistance) { +- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; +- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); +- if (data == null) { +- throw new IllegalStateException("Player is not attached to world"); +- } +- +- data.setTargetTickViewDistance(simulationDistance); ++ this.getHandle().setTickViewDistance(simulationDistance); + } + + @Override +@@ -239,23 +217,12 @@ public class CraftPlayer extends CraftHumanEntity implements Player { + + @Override + public int getSendViewDistance() { +- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; +- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); +- if (data == null) { +- return chunkMap.playerChunkManager.getTargetSendDistance(); +- } +- return data.getTargetSendViewDistance(); ++ return io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.getAPISendViewDistance(this); + } + + @Override + public void setSendViewDistance(int viewDistance) { +- net.minecraft.server.level.ChunkMap chunkMap = this.getHandle().getLevel().getChunkSource().chunkMap; +- io.papermc.paper.chunk.PlayerChunkLoader.PlayerLoaderData data = chunkMap.playerChunkManager.getData(this.getHandle()); +- if (data == null) { +- throw new IllegalStateException("Player is not attached to world"); +- } +- +- data.setTargetSendViewDistance(viewDistance); ++ this.getHandle().setSendViewDistance(viewDistance); + } + // Paper end - implement view distances + diff --git a/patches/server/0020-Configurable-baby-zombie-movement-speed.patch b/patches/server/0022-Configurable-baby-zombie-movement-speed.patch similarity index 100% rename from patches/server/0020-Configurable-baby-zombie-movement-speed.patch rename to patches/server/0022-Configurable-baby-zombie-movement-speed.patch diff --git a/patches/server/0023-Make-ChunkStatus.EMPTY-not-rely-on-the-main-thread-f.patch b/patches/server/0023-Make-ChunkStatus.EMPTY-not-rely-on-the-main-thread-f.patch new file mode 100644 index 0000000000..e5364a1587 --- /dev/null +++ b/patches/server/0023-Make-ChunkStatus.EMPTY-not-rely-on-the-main-thread-f.patch @@ -0,0 +1,397 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Spottedleaf +Date: Thu, 16 Feb 2023 16:50:05 -0800 +Subject: [PATCH] Make ChunkStatus.EMPTY not rely on the main thread for + completion + +In order to do this, we need to push the POI consistency checks +to a later status. Since FULL is the only other status that +uses the main thread, it can go there. + +The consistency checks are only really for when a desync occurs, +and so that delaying the check only matters when the chunk data +has desync'd. As long as the desync is sorted before the +chunk is full loaded (i.e before setBlock can occur on +a chunk), it should not matter. + +This change is primarily due to behavioural changes +in the chunk task queue brought by region threading - +which is to split the queue into separate regions. As such, +it is required that in order for the sync load to complete +that the region owning the chunk drain and execute the task +while ticking. However, that is not always possible in +region threading. Thus, removing the main thread reliance allows +the chunk to progress without requiring a tick thread. +Specifically, this allows far sync loads (outside of a specific +regions bounds) to occur without issue - namely with structure +searching. + +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java +index fb42d776f15f735fb59e972e00e2b512c23a8387..300700477ee34bc22b31315825c0e40f61070cd5 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java +@@ -2,6 +2,8 @@ package io.papermc.paper.chunk.system.scheduling; + + import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; + import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; ++import com.mojang.logging.LogUtils; ++import io.papermc.paper.chunk.system.poi.PoiChunk; + import net.minecraft.server.level.ChunkMap; + import net.minecraft.server.level.ServerLevel; + import net.minecraft.world.level.chunk.ChunkAccess; +@@ -9,10 +11,13 @@ import net.minecraft.world.level.chunk.ChunkStatus; + import net.minecraft.world.level.chunk.ImposterProtoChunk; + import net.minecraft.world.level.chunk.LevelChunk; + import net.minecraft.world.level.chunk.ProtoChunk; ++import org.slf4j.Logger; + import java.lang.invoke.VarHandle; + + public final class ChunkFullTask extends ChunkProgressionTask implements Runnable { + ++ private static final Logger LOGGER = LogUtils.getClassLogger(); ++ + protected final NewChunkHolder chunkHolder; + protected final ChunkAccess fromChunk; + protected final PrioritisedExecutor.PrioritisedTask convertToFullTask; +@@ -35,6 +40,15 @@ public final class ChunkFullTask extends ChunkProgressionTask implements Runnabl + // See Vanilla protoChunkToFullChunk for what this function should be doing + final LevelChunk chunk; + try { ++ // moved from the load from nbt stage into here ++ final PoiChunk poiChunk = this.chunkHolder.getPoiChunk(); ++ if (poiChunk == null) { ++ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString()); ++ } else { ++ poiChunk.load(); ++ this.world.getPoiManager().checkConsistency(this.fromChunk); ++ } ++ + if (this.fromChunk instanceof ImposterProtoChunk wrappedFull) { + chunk = wrappedFull.getWrapped(); + } else { +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java +index be6f3f6a57668a9bd50d0ea5f2dd2335355b69d6..1f7c146ff0b2a835c818f49da6c1f1411f26aa39 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java +@@ -25,6 +25,7 @@ import org.slf4j.Logger; + import java.lang.invoke.VarHandle; + import java.util.Map; + import java.util.concurrent.atomic.AtomicInteger; ++import java.util.concurrent.atomic.AtomicLong; + import java.util.function.Consumer; + + public final class ChunkLoadTask extends ChunkProgressionTask { +@@ -34,9 +35,11 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + private final NewChunkHolder chunkHolder; + private final ChunkDataLoadTask loadTask; + +- private boolean cancelled; ++ private volatile boolean cancelled; + private NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask; + private NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask; ++ private GenericDataLoadTask.TaskResult loadResult; ++ private final AtomicInteger taskCountToComplete = new AtomicInteger(3); // one for poi, one for entity, and one for chunk data + + protected ChunkLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ, + final NewChunkHolder chunkHolder, final PrioritisedExecutor.Priority priority) { +@@ -44,10 +47,18 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + this.chunkHolder = chunkHolder; + this.loadTask = new ChunkDataLoadTask(scheduler, world, chunkX, chunkZ, priority); + this.loadTask.addCallback((final GenericDataLoadTask.TaskResult result) -> { +- ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right()); ++ ChunkLoadTask.this.loadResult = result; // must be before getAndDecrement ++ ChunkLoadTask.this.tryCompleteLoad(); + }); + } + ++ private void tryCompleteLoad() { ++ if (this.taskCountToComplete.decrementAndGet() == 0) { ++ final GenericDataLoadTask.TaskResult result = this.cancelled ? null : this.loadResult; // only after the getAndDecrement ++ ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right()); ++ } ++ } ++ + @Override + public ChunkStatus getTargetStatus() { + return ChunkStatus.EMPTY; +@@ -65,11 +76,8 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + final NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask; + final NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask; + +- final AtomicInteger count = new AtomicInteger(); + final Consumer> scheduleLoadTask = (final GenericDataLoadTask.TaskResult result) -> { +- if (count.decrementAndGet() == 0) { +- ChunkLoadTask.this.loadTask.schedule(false); +- } ++ ChunkLoadTask.this.tryCompleteLoad(); + }; + + // NOTE: it is IMPOSSIBLE for getOrLoadEntityData/getOrLoadPoiData to complete synchronously, because +@@ -85,16 +93,16 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + } + if (!this.chunkHolder.isEntityChunkNBTLoaded()) { + entityLoadTask = this.chunkHolder.getOrLoadEntityData((Consumer)scheduleLoadTask); +- count.setPlain(count.getPlain() + 1); + } else { + entityLoadTask = null; ++ this.taskCountToComplete.getAndDecrement(); // we know the chunk load is not done here, as it is not scheduled + } + + if (!this.chunkHolder.isPoiChunkLoaded()) { + poiLoadTask = this.chunkHolder.getOrLoadPoiData((Consumer)scheduleLoadTask); +- count.setPlain(count.getPlain() + 1); + } else { + poiLoadTask = null; ++ this.taskCountToComplete.getAndDecrement(); // we know the chunk load is not done here, as it is not scheduled + } + + this.entityLoadTask = entityLoadTask; +@@ -107,14 +115,11 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + entityLoadTask.schedule(); + } + +- if (poiLoadTask != null) { ++ if (poiLoadTask != null) { + poiLoadTask.schedule(); + } + +- if (entityLoadTask == null && poiLoadTask == null) { +- // no need to wait on those, we can schedule now +- this.loadTask.schedule(false); +- } ++ this.loadTask.schedule(false); + } + + @Override +@@ -129,15 +134,20 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + + /* + Note: The entityLoadTask/poiLoadTask do not complete when cancelled, +- but this is fine because if they are successfully cancelled then +- we will successfully cancel the load task, which will complete when cancelled ++ so we need to manually try to complete in those cases ++ It is also important to note that we set the cancelled field first, just in case ++ the chunk load task attempts to complete with a non-null value + */ + + if (this.entityLoadTask != null) { +- this.entityLoadTask.cancel(); ++ if (this.entityLoadTask.cancel()) { ++ this.tryCompleteLoad(); ++ } + } + if (this.poiLoadTask != null) { +- this.poiLoadTask.cancel(); ++ if (this.poiLoadTask.cancel()) { ++ this.tryCompleteLoad(); ++ } + } + this.loadTask.cancel(); + } +@@ -249,7 +259,7 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + } + } + +- public final class ChunkDataLoadTask extends CallbackDataLoadTask { ++ public static final class ChunkDataLoadTask extends CallbackDataLoadTask { + protected ChunkDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, + final int chunkZ, final PrioritisedExecutor.Priority priority) { + super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.CHUNK_DATA, priority); +@@ -262,7 +272,7 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + + @Override + protected boolean hasOnMain() { +- return true; ++ return false; + } + + @Override +@@ -272,35 +282,30 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + + @Override + protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) { +- return this.scheduler.createChunkTask(this.chunkX, this.chunkZ, run, priority); ++ throw new UnsupportedOperationException(); + } + + @Override +- protected TaskResult completeOnMainOffMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) { +- if (data != null) { +- return null; +- } +- +- final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk(); +- if (poiChunk == null) { +- LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString()); +- } else if (!poiChunk.isLoaded()) { +- // need to call poiChunk.load() on main +- return null; +- } ++ protected TaskResult completeOnMainOffMain(final ChunkAccess data, final Throwable throwable) { ++ throw new UnsupportedOperationException(); ++ } + +- return new TaskResult<>(this.getEmptyChunk(), null); ++ private ProtoChunk getEmptyChunk() { ++ return new ProtoChunk( ++ new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world, ++ this.world.registryAccess().registryOrThrow(Registries.BIOME), (BlendingData)null ++ ); + } + + @Override +- protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) { ++ protected TaskResult runOffMain(final CompoundTag data, final Throwable throwable) { + if (throwable != null) { + LOGGER.error("Failed to load chunk data for task: " + this.toString() + ", chunk data will be lost", throwable); +- return new TaskResult<>(null, null); ++ return new TaskResult<>(this.getEmptyChunk(), null); + } + + if (data == null) { +- return new TaskResult<>(null, null); ++ return new TaskResult<>(this.getEmptyChunk(), null); + } + + // need to convert data, and then deserialize it +@@ -319,53 +324,18 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + this.world, chunkMap.getPoiManager(), chunkPos, converted, true + ); + +- return new TaskResult<>(chunkHolder, null); ++ return new TaskResult<>(chunkHolder.protoChunk, null); + } catch (final ThreadDeath death) { + throw death; + } catch (final Throwable thr2) { + LOGGER.error("Failed to parse chunk data for task: " + this.toString() + ", chunk data will be lost", thr2); +- return new TaskResult<>(null, thr2); ++ return new TaskResult<>(this.getEmptyChunk(), null); + } + } + +- private ProtoChunk getEmptyChunk() { +- return new ProtoChunk( +- new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world, +- this.world.registryAccess().registryOrThrow(Registries.BIOME), (BlendingData)null +- ); +- } +- + @Override +- protected TaskResult runOnMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) { +- final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk(); +- if (poiChunk == null) { +- LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString()); +- } else { +- poiChunk.load(); +- } +- +- if (data == null || data.protoChunk == null) { +- // throwable could be non-null, but the off-main task will print its exceptions - so we don't need to care, +- // it's handled already +- +- return new TaskResult<>(this.getEmptyChunk(), null); +- } +- +- // have tasks to run (at this point, it's just the POI consistency checking) +- try { +- if (data.tasks != null) { +- for (int i = 0, len = data.tasks.size(); i < len; ++i) { +- data.tasks.poll().run(); +- } +- } +- +- return new TaskResult<>(data.protoChunk, null); +- } catch (final ThreadDeath death) { +- throw death; +- } catch (final Throwable thr2) { +- LOGGER.error("Failed to parse main tasks for task " + this.toString() + ", chunk data will be lost", thr2); +- return new TaskResult<>(this.getEmptyChunk(), null); +- } ++ protected TaskResult runOnMain(final ChunkAccess data, final Throwable throwable) { ++ throw new UnsupportedOperationException(); + } + } + +diff --git a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java +index f10ba4211cbdcc4f4ce3585c7cb3f80185e13b73..6f2c7baea0d1ac7813c7b85e1f5558573745762c 100644 +--- a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java ++++ b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java +@@ -310,6 +310,17 @@ public class PoiManager extends SectionStorage { + } + } + } ++ ++ public void checkConsistency(net.minecraft.world.level.chunk.ChunkAccess chunk) { ++ int chunkX = chunk.getPos().x; ++ int chunkZ = chunk.getPos().z; ++ int minY = io.papermc.paper.util.WorldUtil.getMinSection(chunk); ++ int maxY = io.papermc.paper.util.WorldUtil.getMaxSection(chunk); ++ LevelChunkSection[] sections = chunk.getSections(); ++ for (int section = minY; section <= maxY; ++section) { ++ this.checkConsistencyWithBlocks(SectionPos.of(chunkX, section, chunkZ), sections[section - minY]); ++ } ++ } + // Paper end - rewrite chunk system + + public void checkConsistencyWithBlocks(SectionPos sectionPos, LevelChunkSection chunkSection) { +diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java +index 55da32077d1db81ba197da0be5896da694f4bfa9..a7ee469bb2880a78540b79ae691ea449dfe22ce4 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java ++++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java +@@ -98,13 +98,11 @@ public class ChunkSerializer { + public static final class InProgressChunkHolder { + + public final ProtoChunk protoChunk; +- public final java.util.ArrayDeque tasks; + + public CompoundTag poiData; + +- public InProgressChunkHolder(final ProtoChunk protoChunk, final java.util.ArrayDeque tasks) { ++ public InProgressChunkHolder(final ProtoChunk protoChunk) { + this.protoChunk = protoChunk; +- this.tasks = tasks; + } + } + // Paper end +@@ -112,12 +110,10 @@ public class ChunkSerializer { + public static ProtoChunk read(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt) { + // Paper start - add variant for async calls + InProgressChunkHolder holder = loadChunk(world, poiStorage, chunkPos, nbt, true); +- holder.tasks.forEach(Runnable::run); + return holder.protoChunk; + } + + public static InProgressChunkHolder loadChunk(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt, boolean distinguish) { +- java.util.ArrayDeque tasksToExecuteOnMain = new java.util.ArrayDeque<>(); + // Paper end + ChunkPos chunkcoordintpair1 = new ChunkPos(nbt.getInt("xPos"), nbt.getInt("zPos")); + +@@ -184,9 +180,7 @@ public class ChunkSerializer { + achunksection[k] = chunksection; + SectionPos sectionposition = SectionPos.of(chunkPos, b0); + +- tasksToExecuteOnMain.add(() -> { // Paper - delay this task since we're executing off-main +- poiStorage.checkConsistencyWithBlocks(sectionposition, chunksection); +- }); // Paper - delay this task since we're executing off-main ++ // Paper - rewrite chunk system - moved to final load stage + } + + boolean flag3 = nbttagcompound1.contains("BlockLight", 7); +@@ -332,7 +326,7 @@ public class ChunkSerializer { + } + + if (chunkstatus_type == ChunkStatus.ChunkType.LEVELCHUNK) { +- return new InProgressChunkHolder(new ImposterProtoChunk((LevelChunk) object1, false), tasksToExecuteOnMain); // Paper - Async chunk loading ++ return new InProgressChunkHolder(new ImposterProtoChunk((LevelChunk) object1, false)); // Paper - Async chunk loading + } else { + ProtoChunk protochunk1 = (ProtoChunk) object1; + +@@ -360,7 +354,7 @@ public class ChunkSerializer { + protochunk1.setCarvingMask(worldgenstage_features, new CarvingMask(nbttagcompound5.getLongArray(s1), ((ChunkAccess) object1).getMinBuildHeight())); + } + +- return new InProgressChunkHolder(protochunk1, tasksToExecuteOnMain); // Paper - Async chunk loading ++ return new InProgressChunkHolder(protochunk1); // Paper - Async chunk loading + } + } + diff --git a/patches/server/0021-Configurable-fishing-time-ranges.patch b/patches/server/0024-Configurable-fishing-time-ranges.patch similarity index 100% rename from patches/server/0021-Configurable-fishing-time-ranges.patch rename to patches/server/0024-Configurable-fishing-time-ranges.patch diff --git a/patches/server/0025-Increase-parallelism-for-neighbour-writing-chunk-sta.patch b/patches/server/0025-Increase-parallelism-for-neighbour-writing-chunk-sta.patch new file mode 100644 index 0000000000..df5bbc9328 --- /dev/null +++ b/patches/server/0025-Increase-parallelism-for-neighbour-writing-chunk-sta.patch @@ -0,0 +1,999 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Spottedleaf +Date: Sun, 26 Feb 2023 23:42:29 -0800 +Subject: [PATCH] Increase parallelism for neighbour writing chunk statuses + +Namely, everything after FEATURES. By creating a dependency +chain indicating what chunks are in use, we can safely +schedule completely independent tasks in parallel. This +will allow the chunk system to scale beyond 10 threads +per world. + +diff --git a/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java +index cf7610b3396d03bf79a899d5d9cfc6debb5b90be..48bfee5b9db501fcdba4ddb1e4bff2718956a680 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java ++++ b/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java +@@ -286,7 +286,92 @@ public class RegionizedPlayerChunkLoader { + } + } + +- return chunks.toLongArray(); ++ // to increase generation parallelism, we want to space the chunks out so that they are not nearby when generating ++ // this also means we are minimising locality ++ // but, we need to maintain sorted order by manhatten distance ++ ++ // first, build a map of manhatten distance -> chunks ++ final java.util.List byDistance = new java.util.ArrayList<>(); ++ for (final it.unimi.dsi.fastutil.longs.LongIterator iterator = chunks.iterator(); iterator.hasNext();) { ++ final long chunkKey = iterator.nextLong(); ++ ++ final int chunkX = CoordinateUtils.getChunkX(chunkKey); ++ final int chunkZ = CoordinateUtils.getChunkZ(chunkKey); ++ ++ final int dist = Math.abs(chunkX) + Math.abs(chunkZ); ++ if (dist == byDistance.size()) { ++ final LongArrayList list = new LongArrayList(); ++ list.add(chunkKey); ++ byDistance.add(list); ++ continue; ++ } ++ ++ byDistance.get(dist).add(chunkKey); ++ } ++ ++ // per distance we transform the chunk list so that each element is maximally spaced out from each other ++ for (int i = 0, len = byDistance.size(); i < len; ++i) { ++ final LongArrayList notAdded = byDistance.get(i).clone(); ++ final LongArrayList added = new LongArrayList(); ++ ++ while (!notAdded.isEmpty()) { ++ if (added.isEmpty()) { ++ added.add(notAdded.removeLong(notAdded.size() - 1)); ++ continue; ++ } ++ ++ long maxChunk = -1L; ++ int maxDist = 0; ++ ++ // select the chunk from the not yet added set that has the largest minimum distance from ++ // the current set of added chunks ++ ++ for (final it.unimi.dsi.fastutil.longs.LongIterator iterator = notAdded.iterator(); iterator.hasNext();) { ++ final long chunkKey = iterator.nextLong(); ++ final int chunkX = CoordinateUtils.getChunkX(chunkKey); ++ final int chunkZ = CoordinateUtils.getChunkZ(chunkKey); ++ ++ int minDist = Integer.MAX_VALUE; ++ ++ for (final it.unimi.dsi.fastutil.longs.LongIterator iterator2 = added.iterator(); iterator2.hasNext();) { ++ final long addedKey = iterator2.nextLong(); ++ final int addedX = CoordinateUtils.getChunkX(addedKey); ++ final int addedZ = CoordinateUtils.getChunkZ(addedKey); ++ ++ // here we use square distance because chunk generation uses neighbours in a square radius ++ final int dist = Math.max(Math.abs(addedX - chunkX), Math.abs(addedZ - chunkZ)); ++ ++ if (dist < minDist) { ++ minDist = dist; ++ } ++ } ++ ++ if (minDist > maxDist) { ++ maxDist = minDist; ++ maxChunk = chunkKey; ++ } ++ } ++ ++ // move the selected chunk from the not added set to the added set ++ ++ if (!notAdded.rem(maxChunk)) { ++ throw new IllegalStateException(); ++ } ++ ++ added.add(maxChunk); ++ } ++ ++ byDistance.set(i, added); ++ } ++ ++ // now, rebuild the list so that it still maintains manhatten distance order ++ final LongArrayList ret = new LongArrayList(chunks.size()); ++ ++ for (final LongArrayList dist : byDistance) { ++ ret.addAll(dist); ++ } ++ ++ return ret.toLongArray(); + } + + public static final class PlayerChunkLoaderData { +diff --git a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java +index 0b7a2b0ead4f3bc07bfd9a38c2b7cf024bd140c6..36e93fefdfbebddce4c153974c7cd81af3cb92e9 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java ++++ b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java +@@ -4,7 +4,6 @@ import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; + import ca.spottedleaf.starlight.common.light.BlockStarLightEngine; + import ca.spottedleaf.starlight.common.light.SkyStarLightEngine; + import ca.spottedleaf.starlight.common.light.StarLightInterface; +-import io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler; + import io.papermc.paper.util.CoordinateUtils; + import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap; + import it.unimi.dsi.fastutil.shorts.ShortCollection; +@@ -13,6 +12,7 @@ import net.minecraft.core.BlockPos; + import net.minecraft.core.SectionPos; + import net.minecraft.server.level.ServerLevel; + import net.minecraft.world.level.ChunkPos; ++import net.minecraft.world.level.chunk.ChunkStatus; + import java.util.ArrayList; + import java.util.HashSet; + import java.util.List; +@@ -201,7 +201,10 @@ public final class LightQueue { + this.chunkCoordinate = chunkCoordinate; + this.lightEngine = lightEngine; + this.queue = queue; +- this.task = queue.world.chunkTaskScheduler.lightExecutor.createTask(this, priority); ++ this.task = queue.world.chunkTaskScheduler.radiusAwareScheduler.createTask( ++ CoordinateUtils.getChunkX(chunkCoordinate), CoordinateUtils.getChunkZ(chunkCoordinate), ++ ChunkStatus.LIGHT.writeRadius, this, priority ++ ); + } + + public void schedule() { +@@ -230,23 +233,23 @@ public final class LightQueue { + + @Override + public void run() { +- final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine(); +- final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine(); +- try { +- synchronized (this.queue) { +- this.queue.chunkTasks.remove(this.chunkCoordinate); +- } ++ synchronized (this.queue) { ++ this.queue.chunkTasks.remove(this.chunkCoordinate); ++ } + +- boolean litChunk = false; +- if (this.lightTasks != null) { +- for (final BooleanSupplier run : this.lightTasks) { +- if (run.getAsBoolean()) { +- litChunk = true; +- break; +- } ++ boolean litChunk = false; ++ if (this.lightTasks != null) { ++ for (final BooleanSupplier run : this.lightTasks) { ++ if (run.getAsBoolean()) { ++ litChunk = true; ++ break; + } + } ++ } + ++ final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine(); ++ final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine(); ++ try { + final long coordinate = this.chunkCoordinate; + final int chunkX = CoordinateUtils.getChunkX(coordinate); + final int chunkZ = CoordinateUtils.getChunkZ(coordinate); +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java +index ce26fdfa1afc74ba93d19157042f6c55778011e1..718c1dd7b52fb9a501d552fdbcb3f9ff79d127d8 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java +@@ -1022,17 +1022,23 @@ public final class ChunkHolderManager { + } + + public Boolean tryDrainTicketUpdates() { +- final boolean acquired = this.ticketLock.tryLock(); +- try { +- if (!acquired) { +- return null; +- } ++ boolean ret = false; ++ for (;;) { ++ final boolean acquired = this.ticketLock.tryLock(); ++ try { ++ if (!acquired) { ++ return ret ? Boolean.TRUE : null; ++ } + +- return Boolean.valueOf(this.drainTicketUpdates()); +- } finally { +- if (acquired) { +- this.ticketLock.unlock(); ++ ret |= this.drainTicketUpdates(); ++ } finally { ++ if (acquired) { ++ this.ticketLock.unlock(); ++ } + } ++ if (this.delayedTicketUpdates.isEmpty()) { ++ return Boolean.valueOf(ret); ++ } // else: try to re-acquire + } + } + +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java +index 84d6af5c28cd0e81d50701bebe122f462720fbf8..d2bb266a5ed344507058778a94a8a4dcac61ba17 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java +@@ -2,9 +2,9 @@ package io.papermc.paper.chunk.system.scheduling; + + import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; + import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool; +-import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue; + import ca.spottedleaf.concurrentutil.util.ConcurrentUtil; + import com.mojang.logging.LogUtils; ++import io.papermc.paper.chunk.system.scheduling.queue.RadiusAwarePrioritisedExecutor; + import io.papermc.paper.configuration.GlobalConfiguration; + import io.papermc.paper.util.CoordinateUtils; + import io.papermc.paper.util.TickThread; +@@ -21,7 +21,6 @@ import net.minecraft.world.level.ChunkPos; + import net.minecraft.world.level.chunk.ChunkAccess; + import net.minecraft.world.level.chunk.ChunkStatus; + import net.minecraft.world.level.chunk.LevelChunk; +-import org.bukkit.Bukkit; + import org.slf4j.Logger; + import java.io.File; + import java.util.ArrayDeque; +@@ -34,7 +33,6 @@ import java.util.Objects; + import java.util.concurrent.atomic.AtomicBoolean; + import java.util.concurrent.atomic.AtomicLong; + import java.util.concurrent.locks.ReentrantLock; +-import java.util.function.BooleanSupplier; + import java.util.function.Consumer; + + public final class ChunkTaskScheduler { +@@ -108,9 +106,9 @@ public final class ChunkTaskScheduler { + + public final ServerLevel world; + public final PrioritisedThreadPool workers; +- public final PrioritisedThreadPool.PrioritisedPoolExecutor lightExecutor; +- public final PrioritisedThreadPool.PrioritisedPoolExecutor genExecutor; ++ public final RadiusAwarePrioritisedExecutor radiusAwareScheduler; + public final PrioritisedThreadPool.PrioritisedPoolExecutor parallelGenExecutor; ++ private final PrioritisedThreadPool.PrioritisedPoolExecutor radiusAwareGenExecutor; + public final PrioritisedThreadPool.PrioritisedPoolExecutor loadExecutor; + + private final PrioritisedThreadedTaskQueue mainThreadExecutor = new PrioritisedThreadedTaskQueue(); +@@ -191,12 +189,11 @@ public final class ChunkTaskScheduler { + this.workers = workers; + + final String worldName = world.getWorld().getName(); +- this.genExecutor = workers.createExecutor("Chunk single-threaded generation executor for world '" + worldName + "'", 1); +- // same as genExecutor, as there are race conditions between updating blocks in FEATURE status while lighting chunks +- this.lightExecutor = this.genExecutor; +- this.parallelGenExecutor = newChunkSystemGenParallelism <= 1 ? this.genExecutor +- : workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", newChunkSystemGenParallelism); ++ this.parallelGenExecutor = workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", Math.max(1, newChunkSystemGenParallelism)); ++ this.radiusAwareGenExecutor = ++ newChunkSystemGenParallelism <= 1 ? this.parallelGenExecutor : workers.createExecutor("Chunk radius aware generator for world '" + worldName + "'", newChunkSystemGenParallelism); + this.loadExecutor = workers.createExecutor("Chunk load executor for world '" + worldName + "'", newChunkSystemLoadParallelism); ++ this.radiusAwareScheduler = new RadiusAwarePrioritisedExecutor(this.radiusAwareGenExecutor, Math.max(1, newChunkSystemGenParallelism)); + this.chunkHolderManager = new ChunkHolderManager(world, this); + } + +@@ -688,16 +685,14 @@ public final class ChunkTaskScheduler { + } + + public boolean halt(final boolean sync, final long maxWaitNS) { +- this.lightExecutor.halt(); +- this.genExecutor.halt(); ++ this.radiusAwareGenExecutor.halt(); + this.parallelGenExecutor.halt(); + this.loadExecutor.halt(); + final long time = System.nanoTime(); + if (sync) { + for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) { + if ( +- !this.lightExecutor.isActive() && +- !this.genExecutor.isActive() && ++ !this.radiusAwareGenExecutor.isActive() && + !this.parallelGenExecutor.isActive() && + !this.loadExecutor.isActive() + ) { +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java +index 73ce0909bd89244835a0d0f2030a25871461f1e0..ecc366a4176b2efadc46aa91aa21621f0fc6abe9 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java +@@ -39,8 +39,11 @@ public final class ChunkUpgradeGenericStatusTask extends ChunkProgressionTask im + this.fromStatus = chunk.getStatus(); + this.toStatus = toStatus; + this.neighbours = neighbours; +- this.generateTask = (this.toStatus.isParallelCapable ? this.scheduler.parallelGenExecutor : this.scheduler.genExecutor) +- .createTask(this, priority); ++ if (this.toStatus.isParallelCapable) { ++ this.generateTask = this.scheduler.parallelGenExecutor.createTask(this, priority); ++ } else { ++ this.generateTask = this.scheduler.radiusAwareScheduler.createTask(chunkX, chunkZ, this.toStatus.writeRadius, this, priority); ++ } + } + + @Override +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java +new file mode 100644 +index 0000000000000000000000000000000000000000..3272f73013ea7d4efdd0ae2903925cc543be7075 +--- /dev/null ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java +@@ -0,0 +1,668 @@ ++package io.papermc.paper.chunk.system.scheduling.queue; ++ ++import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor; ++import io.papermc.paper.util.CoordinateUtils; ++import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap; ++import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet; ++import java.util.ArrayList; ++import java.util.Comparator; ++import java.util.List; ++import java.util.PriorityQueue; ++ ++public class RadiusAwarePrioritisedExecutor { ++ ++ private static final Comparator DEPENDENCY_NODE_COMPARATOR = (final DependencyNode t1, final DependencyNode t2) -> { ++ return Long.compare(t1.id, t2.id); ++ }; ++ ++ private final DependencyTree[] queues = new DependencyTree[PrioritisedExecutor.Priority.TOTAL_SCHEDULABLE_PRIORITIES]; ++ private static final int NO_TASKS_QUEUED = -1; ++ private int selectedQueue = NO_TASKS_QUEUED; ++ private boolean canQueueTasks = true; ++ ++ public RadiusAwarePrioritisedExecutor(final PrioritisedExecutor executor, final int maxToSchedule) { ++ for (int i = 0; i < this.queues.length; ++i) { ++ this.queues[i] = new DependencyTree(this, executor, maxToSchedule, i); ++ } ++ } ++ ++ private boolean canQueueTasks() { ++ return this.canQueueTasks; ++ } ++ ++ private List treeFinished() { ++ this.canQueueTasks = true; ++ for (int priority = 0; priority < this.queues.length; ++priority) { ++ final DependencyTree queue = this.queues[priority]; ++ if (queue.hasWaitingTasks()) { ++ final List ret = queue.tryPushTasks(); ++ ++ if (ret == null || ret.isEmpty()) { ++ // this happens when the tasks in the wait queue were purged ++ // in this case, the queue was actually empty, we just had to purge it ++ // if we set the selected queue without scheduling any tasks, the queue will never be unselected ++ // as that requires a scheduled task completing... ++ continue; ++ } ++ ++ this.selectedQueue = priority; ++ return ret; ++ } ++ } ++ ++ this.selectedQueue = NO_TASKS_QUEUED; ++ ++ return null; ++ } ++ ++ private List queue(final Task task, final PrioritisedExecutor.Priority priority) { ++ final int priorityId = priority.priority; ++ final DependencyTree queue = this.queues[priorityId]; ++ ++ final DependencyNode node = new DependencyNode(task, queue); ++ ++ if (task.dependencyNode != null) { ++ throw new IllegalStateException(); ++ } ++ task.dependencyNode = node; ++ ++ queue.pushNode(node); ++ ++ if (this.selectedQueue == NO_TASKS_QUEUED) { ++ this.canQueueTasks = true; ++ this.selectedQueue = priorityId; ++ return queue.tryPushTasks(); ++ } ++ ++ if (!this.canQueueTasks) { ++ return null; ++ } ++ ++ if (PrioritisedExecutor.Priority.isHigherPriority(priorityId, this.selectedQueue)) { ++ // prevent the lower priority tree from queueing more tasks ++ this.canQueueTasks = false; ++ return null; ++ } ++ ++ // priorityId != selectedQueue: lower priority, don't care - treeFinished will pick it up ++ return priorityId == this.selectedQueue ? queue.tryPushTasks() : null; ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask createTask(final int chunkX, final int chunkZ, final int radius, ++ final Runnable run, final PrioritisedExecutor.Priority priority) { ++ if (radius < 0) { ++ throw new IllegalArgumentException("Radius must be > 0: " + radius); ++ } ++ return new Task(this, chunkX, chunkZ, radius, run, priority); ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask createTask(final int chunkX, final int chunkZ, final int radius, ++ final Runnable run) { ++ return this.createTask(chunkX, chunkZ, radius, run, PrioritisedExecutor.Priority.NORMAL); ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask queueTask(final int chunkX, final int chunkZ, final int radius, ++ final Runnable run, final PrioritisedExecutor.Priority priority) { ++ final PrioritisedExecutor.PrioritisedTask ret = this.createTask(chunkX, chunkZ, radius, run, priority); ++ ++ ret.queue(); ++ ++ return ret; ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask queueTask(final int chunkX, final int chunkZ, final int radius, ++ final Runnable run) { ++ final PrioritisedExecutor.PrioritisedTask ret = this.createTask(chunkX, chunkZ, radius, run); ++ ++ ret.queue(); ++ ++ return ret; ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask createInfiniteRadiusTask(final Runnable run, final PrioritisedExecutor.Priority priority) { ++ return new Task(this, 0, 0, -1, run, priority); ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask createInfiniteRadiusTask(final Runnable run) { ++ return this.createInfiniteRadiusTask(run, PrioritisedExecutor.Priority.NORMAL); ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask queueInfiniteRadiusTask(final Runnable run, final PrioritisedExecutor.Priority priority) { ++ final PrioritisedExecutor.PrioritisedTask ret = this.createInfiniteRadiusTask(run, priority); ++ ++ ret.queue(); ++ ++ return ret; ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask queueInfiniteRadiusTask(final Runnable run) { ++ final PrioritisedExecutor.PrioritisedTask ret = this.createInfiniteRadiusTask(run, PrioritisedExecutor.Priority.NORMAL); ++ ++ ret.queue(); ++ ++ return ret; ++ } ++ ++ // all accesses must be synchronised by the radius aware object ++ private static final class DependencyTree { ++ ++ private final RadiusAwarePrioritisedExecutor scheduler; ++ private final PrioritisedExecutor executor; ++ private final int maxToSchedule; ++ private final int treeIndex; ++ ++ private int currentlyExecuting; ++ private long idGenerator; ++ ++ private final PriorityQueue awaiting = new PriorityQueue<>(DEPENDENCY_NODE_COMPARATOR); ++ ++ private final PriorityQueue infiniteRadius = new PriorityQueue<>(DEPENDENCY_NODE_COMPARATOR); ++ private boolean isInfiniteRadiusScheduled; ++ ++ private final Long2ReferenceOpenHashMap nodeByPosition = new Long2ReferenceOpenHashMap<>(); ++ ++ public DependencyTree(final RadiusAwarePrioritisedExecutor scheduler, final PrioritisedExecutor executor, ++ final int maxToSchedule, final int treeIndex) { ++ this.scheduler = scheduler; ++ this.executor = executor; ++ this.maxToSchedule = maxToSchedule; ++ this.treeIndex = treeIndex; ++ } ++ ++ public boolean hasWaitingTasks() { ++ return !this.awaiting.isEmpty() || !this.infiniteRadius.isEmpty(); ++ } ++ ++ private long nextId() { ++ return this.idGenerator++; ++ } ++ ++ private boolean isExecutingAnyTasks() { ++ return this.currentlyExecuting != 0; ++ } ++ ++ private void pushNode(final DependencyNode node) { ++ if (!node.task.isFiniteRadius()) { ++ this.infiniteRadius.add(node); ++ return; ++ } ++ ++ // set up dependency for node ++ final Task task = node.task; ++ ++ final int centerX = task.chunkX; ++ final int centerZ = task.chunkZ; ++ final int radius = task.radius; ++ ++ final int minX = centerX - radius; ++ final int maxX = centerX + radius; ++ ++ final int minZ = centerZ - radius; ++ final int maxZ = centerZ + radius; ++ ++ ReferenceOpenHashSet parents = null; ++ for (int currZ = minZ; currZ <= maxZ; ++currZ) { ++ for (int currX = minX; currX <= maxX; ++currX) { ++ final DependencyNode dependency = this.nodeByPosition.put(CoordinateUtils.getChunkKey(currX, currZ), node); ++ if (dependency != null) { ++ if (parents == null) { ++ parents = new ReferenceOpenHashSet<>(); ++ } ++ if (parents.add(dependency)) { ++ // added a dependency, so we need to add as a child to the dependency ++ if (dependency.children == null) { ++ dependency.children = new ArrayList<>(); ++ } ++ dependency.children.add(node); ++ } ++ } ++ } ++ } ++ ++ if (parents == null) { ++ // no dependencies, add straight to awaiting ++ this.awaiting.add(node); ++ } else { ++ node.parents = parents; ++ // we will be added to awaiting once we have no parents ++ } ++ } ++ ++ // called only when a node is returned after being executed ++ private List returnNode(final DependencyNode node) { ++ final Task task = node.task; ++ ++ // now that the task is completed, we can push its children to the awaiting queue ++ this.pushChildren(node); ++ ++ if (task.isFiniteRadius()) { ++ // remove from dependency map ++ this.removeNodeFromMap(node); ++ } else { ++ // mark as no longer executing infinite radius ++ if (!this.isInfiniteRadiusScheduled) { ++ throw new IllegalStateException(); ++ } ++ this.isInfiniteRadiusScheduled = false; ++ } ++ ++ // decrement executing count, we are done executing this task ++ --this.currentlyExecuting; ++ ++ if (this.currentlyExecuting == 0) { ++ return this.scheduler.treeFinished(); ++ } ++ ++ return this.scheduler.canQueueTasks() ? this.tryPushTasks() : null; ++ } ++ ++ private List tryPushTasks() { ++ // tasks are not queued, but only created here - we do hold the lock for the map ++ List ret = null; ++ PrioritisedExecutor.PrioritisedTask pushedTask; ++ while ((pushedTask = this.tryPushTask()) != null) { ++ if (ret == null) { ++ ret = new ArrayList<>(); ++ } ++ ret.add(pushedTask); ++ } ++ ++ return ret; ++ } ++ ++ private void removeNodeFromMap(final DependencyNode node) { ++ final Task task = node.task; ++ ++ final int centerX = task.chunkX; ++ final int centerZ = task.chunkZ; ++ final int radius = task.radius; ++ ++ final int minX = centerX - radius; ++ final int maxX = centerX + radius; ++ ++ final int minZ = centerZ - radius; ++ final int maxZ = centerZ + radius; ++ ++ for (int currZ = minZ; currZ <= maxZ; ++currZ) { ++ for (int currX = minX; currX <= maxX; ++currX) { ++ this.nodeByPosition.remove(CoordinateUtils.getChunkKey(currX, currZ), node); ++ } ++ } ++ } ++ ++ private void pushChildren(final DependencyNode node) { ++ // add all the children that we can into awaiting ++ final List children = node.children; ++ if (children != null) { ++ for (int i = 0, len = children.size(); i < len; ++i) { ++ final DependencyNode child = children.get(i); ++ if (!child.parents.remove(node)) { ++ throw new IllegalStateException(); ++ } ++ if (child.parents.isEmpty()) { ++ // no more dependents, we can push to awaiting ++ child.parents = null; ++ // even if the child is purged, we need to push it so that its children will be pushed ++ this.awaiting.add(child); ++ } ++ } ++ } ++ } ++ ++ private DependencyNode pollAwaiting() { ++ final DependencyNode ret = this.awaiting.poll(); ++ if (ret == null) { ++ return ret; ++ } ++ ++ if (ret.parents != null) { ++ throw new IllegalStateException(); ++ } ++ ++ if (ret.purged) { ++ // need to manually remove from state here ++ this.pushChildren(ret); ++ this.removeNodeFromMap(ret); ++ } // else: delay children push until the task has finished ++ ++ return ret; ++ } ++ ++ private DependencyNode pollInfinite() { ++ return this.infiniteRadius.poll(); ++ } ++ ++ public PrioritisedExecutor.PrioritisedTask tryPushTask() { ++ if (this.currentlyExecuting >= this.maxToSchedule || this.isInfiniteRadiusScheduled) { ++ return null; ++ } ++ ++ DependencyNode firstInfinite; ++ while ((firstInfinite = this.infiniteRadius.peek()) != null && firstInfinite.purged) { ++ this.pollInfinite(); ++ } ++ ++ DependencyNode firstAwaiting; ++ while ((firstAwaiting = this.awaiting.peek()) != null && firstAwaiting.purged) { ++ this.pollAwaiting(); ++ } ++ ++ if (firstInfinite == null && firstAwaiting == null) { ++ return null; ++ } ++ ++ // firstAwaiting compared to firstInfinite ++ final int compare; ++ ++ if (firstAwaiting == null) { ++ // we choose first infinite, or infinite < awaiting ++ compare = 1; ++ } else if (firstInfinite == null) { ++ // we choose first awaiting, or awaiting < infinite ++ compare = -1; ++ } else { ++ compare = DEPENDENCY_NODE_COMPARATOR.compare(firstAwaiting, firstInfinite); ++ } ++ ++ if (compare >= 0) { ++ if (this.currentlyExecuting != 0) { ++ // don't queue infinite task while other tasks are executing in parallel ++ return null; ++ } ++ ++this.currentlyExecuting; ++ this.pollInfinite(); ++ this.isInfiniteRadiusScheduled = true; ++ return firstInfinite.task.pushTask(this.executor); ++ } else { ++ ++this.currentlyExecuting; ++ this.pollAwaiting(); ++ return firstAwaiting.task.pushTask(this.executor); ++ } ++ } ++ } ++ ++ private static final class DependencyNode { ++ ++ private final Task task; ++ private final DependencyTree tree; ++ ++ // dependency tree fields ++ // (must hold lock on the scheduler to use) ++ // null is the same as empty, we just use it so that we don't allocate the set unless we need to ++ private List children; ++ // null is the same as empty, indicating that this task is considered "awaiting" ++ private ReferenceOpenHashSet parents; ++ // false -> scheduled and not cancelled ++ // true -> scheduled but cancelled ++ private boolean purged; ++ private final long id; ++ ++ public DependencyNode(final Task task, final DependencyTree tree) { ++ this.task = task; ++ this.id = tree.nextId(); ++ this.tree = tree; ++ } ++ } ++ ++ private static final class Task implements PrioritisedExecutor.PrioritisedTask, Runnable { ++ ++ // task specific fields ++ private final RadiusAwarePrioritisedExecutor scheduler; ++ private final int chunkX; ++ private final int chunkZ; ++ private final int radius; ++ private Runnable run; ++ private PrioritisedExecutor.Priority priority; ++ ++ private DependencyNode dependencyNode; ++ private PrioritisedExecutor.PrioritisedTask queuedTask; ++ ++ private Task(final RadiusAwarePrioritisedExecutor scheduler, final int chunkX, final int chunkZ, final int radius, ++ final Runnable run, final PrioritisedExecutor.Priority priority) { ++ this.scheduler = scheduler; ++ this.chunkX = chunkX; ++ this.chunkZ = chunkZ; ++ this.radius = radius; ++ this.run = run; ++ this.priority = priority; ++ } ++ ++ private boolean isFiniteRadius() { ++ return this.radius >= 0; ++ } ++ ++ private PrioritisedExecutor.PrioritisedTask pushTask(final PrioritisedExecutor executor) { ++ return this.queuedTask = executor.createTask(this, this.priority); ++ } ++ ++ private void executeTask() { ++ final Runnable run = this.run; ++ this.run = null; ++ run.run(); ++ } ++ ++ private static void scheduleTasks(final List toSchedule) { ++ if (toSchedule != null) { ++ for (int i = 0, len = toSchedule.size(); i < len; ++i) { ++ toSchedule.get(i).queue(); ++ } ++ } ++ } ++ ++ private void returnNode() { ++ final List toSchedule; ++ synchronized (this.scheduler) { ++ final DependencyNode node = this.dependencyNode; ++ this.dependencyNode = null; ++ toSchedule = node.tree.returnNode(node); ++ } ++ ++ scheduleTasks(toSchedule); ++ } ++ ++ @Override ++ public void run() { ++ final Runnable run = this.run; ++ this.run = null; ++ try { ++ run.run(); ++ } finally { ++ this.returnNode(); ++ } ++ } ++ ++ @Override ++ public boolean queue() { ++ final List toSchedule; ++ synchronized (this.scheduler) { ++ if (this.queuedTask != null || this.dependencyNode != null || this.priority == PrioritisedExecutor.Priority.COMPLETING) { ++ return false; ++ } ++ ++ toSchedule = this.scheduler.queue(this, this.priority); ++ } ++ ++ scheduleTasks(toSchedule); ++ return true; ++ } ++ ++ @Override ++ public boolean cancel() { ++ final PrioritisedExecutor.PrioritisedTask task; ++ synchronized (this.scheduler) { ++ if ((task = this.queuedTask) == null) { ++ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) { ++ return false; ++ } ++ ++ this.priority = PrioritisedExecutor.Priority.COMPLETING; ++ if (this.dependencyNode != null) { ++ this.dependencyNode.purged = true; ++ this.dependencyNode = null; ++ } ++ ++ return true; ++ } ++ } ++ ++ if (task.cancel()) { ++ // must manually return the node ++ this.run = null; ++ this.returnNode(); ++ return true; ++ } ++ return false; ++ } ++ ++ @Override ++ public boolean execute() { ++ final PrioritisedExecutor.PrioritisedTask task; ++ synchronized (this.scheduler) { ++ if ((task = this.queuedTask) == null) { ++ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) { ++ return false; ++ } ++ ++ this.priority = PrioritisedExecutor.Priority.COMPLETING; ++ if (this.dependencyNode != null) { ++ this.dependencyNode.purged = true; ++ this.dependencyNode = null; ++ } ++ // fall through to execution logic ++ } ++ } ++ ++ if (task != null) { ++ // will run the return node logic automatically ++ return task.execute(); ++ } else { ++ // don't run node removal/insertion logic, we aren't actually removed from the dependency tree ++ this.executeTask(); ++ return true; ++ } ++ } ++ ++ @Override ++ public PrioritisedExecutor.Priority getPriority() { ++ final PrioritisedExecutor.PrioritisedTask task; ++ synchronized (this.scheduler) { ++ if ((task = this.queuedTask) == null) { ++ return this.priority; ++ } ++ } ++ ++ return task.getPriority(); ++ } ++ ++ @Override ++ public boolean setPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ ++ final PrioritisedExecutor.PrioritisedTask task; ++ List toSchedule = null; ++ synchronized (this.scheduler) { ++ if ((task = this.queuedTask) == null) { ++ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) { ++ return false; ++ } ++ ++ if (this.priority == priority) { ++ return true; ++ } ++ ++ this.priority = priority; ++ if (this.dependencyNode != null) { ++ // need to re-insert node ++ this.dependencyNode.purged = true; ++ this.dependencyNode = null; ++ toSchedule = this.scheduler.queue(this, priority); ++ } ++ } ++ } ++ ++ if (task != null) { ++ return task.setPriority(priority); ++ } ++ ++ scheduleTasks(toSchedule); ++ ++ return true; ++ } ++ ++ @Override ++ public boolean raisePriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ ++ final PrioritisedExecutor.PrioritisedTask task; ++ List toSchedule = null; ++ synchronized (this.scheduler) { ++ if ((task = this.queuedTask) == null) { ++ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) { ++ return false; ++ } ++ ++ if (this.priority.isHigherOrEqualPriority(priority)) { ++ return true; ++ } ++ ++ this.priority = priority; ++ if (this.dependencyNode != null) { ++ // need to re-insert node ++ this.dependencyNode.purged = true; ++ this.dependencyNode = null; ++ toSchedule = this.scheduler.queue(this, priority); ++ } ++ } ++ } ++ ++ if (task != null) { ++ return task.raisePriority(priority); ++ } ++ ++ scheduleTasks(toSchedule); ++ ++ return true; ++ } ++ ++ @Override ++ public boolean lowerPriority(final PrioritisedExecutor.Priority priority) { ++ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) { ++ throw new IllegalArgumentException("Invalid priority " + priority); ++ } ++ ++ final PrioritisedExecutor.PrioritisedTask task; ++ List toSchedule = null; ++ synchronized (this.scheduler) { ++ if ((task = this.queuedTask) == null) { ++ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) { ++ return false; ++ } ++ ++ if (this.priority.isLowerOrEqualPriority(priority)) { ++ return true; ++ } ++ ++ this.priority = priority; ++ if (this.dependencyNode != null) { ++ // need to re-insert node ++ this.dependencyNode.purged = true; ++ this.dependencyNode = null; ++ toSchedule = this.scheduler.queue(this, priority); ++ } ++ } ++ } ++ ++ if (task != null) { ++ return task.lowerPriority(priority); ++ } ++ ++ scheduleTasks(toSchedule); ++ ++ return true; ++ } ++ } ++} +diff --git a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java +index c709e27a00d8617f9a3346f85bd88ce47baa9c76..b4be02ec4bb77059f79d3e4d6a6f1ee4843a01f9 100644 +--- a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java ++++ b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java +@@ -94,7 +94,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl + ++totalChunks; + } + +- this.chunkMap.level.chunkTaskScheduler.lightExecutor.queueRunnable(() -> { // Paper - rewrite chunk system ++ this.chunkMap.level.chunkTaskScheduler.radiusAwareScheduler.queueInfiniteRadiusTask(() -> { // Paper - rewrite chunk system + this.theLightEngine.relightChunks(chunks, (ChunkPos chunkPos) -> { + chunkLightCallback.accept(chunkPos); + ((java.util.concurrent.Executor)((ServerLevel)this.theLightEngine.getWorld()).getChunkSource().mainThreadProcessor).execute(() -> { diff --git a/patches/server/0022-Allow-nerfed-mobs-to-jump-and-take-water-damage.patch b/patches/server/0026-Allow-nerfed-mobs-to-jump-and-take-water-damage.patch similarity index 100% rename from patches/server/0022-Allow-nerfed-mobs-to-jump-and-take-water-damage.patch rename to patches/server/0026-Allow-nerfed-mobs-to-jump-and-take-water-damage.patch diff --git a/patches/server/0027-Properly-cancel-chunk-load-tasks-that-were-not-sched.patch b/patches/server/0027-Properly-cancel-chunk-load-tasks-that-were-not-sched.patch new file mode 100644 index 0000000000..3fd8ac049c --- /dev/null +++ b/patches/server/0027-Properly-cancel-chunk-load-tasks-that-were-not-sched.patch @@ -0,0 +1,71 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Spottedleaf +Date: Mon, 15 May 2023 12:24:17 -0700 +Subject: [PATCH] Properly cancel chunk load tasks that were not scheduled + +Since the chunk load task was not scheduled, the entity/poi load +task fields will not be set, but the task complete counter +will not be adjusted. Thus, the chunk load task will not complete. + +To resolve this, detect when the entity/poi tasks were not scheduled +and decrement the task complete counter in such cases. + +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java +index 1f7c146ff0b2a835c818f49da6c1f1411f26aa39..34dc2153e90a29bc9102d9497c3c53b5de15508e 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java +@@ -25,7 +25,6 @@ import org.slf4j.Logger; + import java.lang.invoke.VarHandle; + import java.util.Map; + import java.util.concurrent.atomic.AtomicInteger; +-import java.util.concurrent.atomic.AtomicLong; + import java.util.function.Consumer; + + public final class ChunkLoadTask extends ChunkProgressionTask { +@@ -125,8 +124,12 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + @Override + public void cancel() { + // must be before load task access, so we can synchronise with the writes to the fields ++ final boolean scheduled; + this.scheduler.schedulingLock.lock(); + try { ++ // fix cancellation of chunk load task - must read field here, as it may be written later conucrrently - ++ // we need to know if we scheduled _before_ cancellation ++ scheduled = this.scheduled; + this.cancelled = true; + } finally { + this.scheduler.schedulingLock.unlock(); +@@ -139,15 +142,26 @@ public final class ChunkLoadTask extends ChunkProgressionTask { + the chunk load task attempts to complete with a non-null value + */ + +- if (this.entityLoadTask != null) { +- if (this.entityLoadTask.cancel()) { +- this.tryCompleteLoad(); ++ if (scheduled) { ++ // since we scheduled, we need to cancel the tasks ++ if (this.entityLoadTask != null) { ++ if (this.entityLoadTask.cancel()) { ++ this.tryCompleteLoad(); ++ } + } +- } +- if (this.poiLoadTask != null) { +- if (this.poiLoadTask.cancel()) { +- this.tryCompleteLoad(); ++ if (this.poiLoadTask != null) { ++ if (this.poiLoadTask.cancel()) { ++ this.tryCompleteLoad(); ++ } + } ++ } else { ++ // since nothing was scheduled, we need to decrement the task count here ourselves ++ ++ // for entity load task ++ this.tryCompleteLoad(); ++ ++ // for poi load task ++ this.tryCompleteLoad(); + } + this.loadTask.cancel(); + } diff --git a/patches/server/0023-Add-configurable-despawn-distances-for-living-entiti.patch b/patches/server/0028-Add-configurable-despawn-distances-for-living-entiti.patch similarity index 100% rename from patches/server/0023-Add-configurable-despawn-distances-for-living-entiti.patch rename to patches/server/0028-Add-configurable-despawn-distances-for-living-entiti.patch diff --git a/patches/server/0029-Mark-POI-Entity-load-tasks-as-completed-before-relea.patch b/patches/server/0029-Mark-POI-Entity-load-tasks-as-completed-before-relea.patch new file mode 100644 index 0000000000..2c6336128b --- /dev/null +++ b/patches/server/0029-Mark-POI-Entity-load-tasks-as-completed-before-relea.patch @@ -0,0 +1,110 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Spottedleaf +Date: Mon, 15 May 2023 11:34:28 -0700 +Subject: [PATCH] Mark POI/Entity load tasks as completed before releasing + scheduling lock + +It must be marked as completed during that lock hold since the +waiters field is set to null. Thus, any other thread attempting +a cancellation will fail to remove from waiters. Also, any +other thread attempting to cancel may set the completed field +to true which would cause accept() to fail as well. + +Completion was always designed to happen while holding the +scheduling lock to prevent these race conditions. The code +was originally set up to complete while not holding the +scheduling lock to avoid invoking callbacks while holding the +lock, however the access to the completion field was not +considered. + +Resolve this by marking the callback as completed during the +lock, but invoking the accept() function after releasing +the lock. This will prevent any cancellation attempts to be +blocked, and allow the current thread to complete the callback +without any issues. + +diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java +index 8013dd333e27aa5fd0beb431fa32491eec9f5246..efc9b7a304f10b6a23a36cffb0a4aaea6ab71129 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java ++++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java +@@ -156,6 +156,12 @@ public final class NewChunkHolder { + LOGGER.error("Unhandled entity data load exception, data data will be lost: ", result.right()); + } + ++ // Folia start - mark these tasks as completed before releasing the scheduling lock ++ for (final GenericDataLoadTaskCallback callback : waiters) { ++ callback.markCompleted(); ++ } ++ // Folia end - mark these tasks as completed before releasing the scheduling lock ++ + completeWaiters = waiters; + } else { + // cancelled +@@ -187,7 +193,7 @@ public final class NewChunkHolder { + // avoid holding the scheduling lock while completing + if (completeWaiters != null) { + for (final GenericDataLoadTaskCallback callback : completeWaiters) { +- callback.accept(result); ++ callback.acceptCompleted(result); // Folia - mark these tasks as completed before releasing the scheduling lock + } + } + +@@ -273,6 +279,12 @@ public final class NewChunkHolder { + LOGGER.error("Unhandled poi load exception, poi data will be lost: ", result.right()); + } + ++ // Folia start - mark these tasks as completed before releasing the scheduling lock ++ for (final GenericDataLoadTaskCallback callback : waiters) { ++ callback.markCompleted(); ++ } ++ // Folia end - mark these tasks as completed before releasing the scheduling lock ++ + completeWaiters = waiters; + } else { + // cancelled +@@ -304,7 +316,7 @@ public final class NewChunkHolder { + // avoid holding the scheduling lock while completing + if (completeWaiters != null) { + for (final GenericDataLoadTaskCallback callback : completeWaiters) { +- callback.accept(result); ++ callback.acceptCompleted(result); // Folia - mark these tasks as completed before releasing the scheduling lock + } + } + this.scheduler.schedulingLock.lock(); +@@ -357,7 +369,7 @@ public final class NewChunkHolder { + } + } + +- public static abstract class GenericDataLoadTaskCallback implements Cancellable, Consumer> { ++ public static abstract class GenericDataLoadTaskCallback implements Cancellable { // Folia - mark callbacks as completed before unlocking scheduling lock + + protected final Consumer> consumer; + protected final NewChunkHolder chunkHolder; +@@ -393,13 +405,23 @@ public final class NewChunkHolder { + return this.completed = true; + } + +- @Override +- public void accept(final GenericDataLoadTask.TaskResult result) { ++ // Folia start - mark callbacks as completed before unlocking scheduling lock ++ // must hold scheduling lock ++ void markCompleted() { ++ if (this.completed) { ++ throw new IllegalStateException("May not be completed here"); ++ } ++ this.completed = true; ++ } ++ // Folia end - mark callbacks as completed before unlocking scheduling lock ++ ++ // Folia - mark callbacks as completed before unlocking scheduling lock ++ void acceptCompleted(final GenericDataLoadTask.TaskResult result) { + if (result != null) { +- if (this.setCompleted()) { ++ if (this.completed) { // Folia - mark callbacks as completed before unlocking scheduling lock + this.consumer.accept(result); + } else { +- throw new IllegalStateException("Cannot be cancelled at this point"); ++ throw new IllegalStateException("Cannot be uncompleted at this point"); // Folia - mark callbacks as completed before unlocking scheduling lock + } + } else { + throw new NullPointerException("Result cannot be null (cancelled)"); diff --git a/patches/server/0024-Allow-for-toggling-of-spawn-chunks.patch b/patches/server/0030-Allow-for-toggling-of-spawn-chunks.patch similarity index 91% rename from patches/server/0024-Allow-for-toggling-of-spawn-chunks.patch rename to patches/server/0030-Allow-for-toggling-of-spawn-chunks.patch index 905a52fbd1..042396c575 100644 --- a/patches/server/0024-Allow-for-toggling-of-spawn-chunks.patch +++ b/patches/server/0030-Allow-for-toggling-of-spawn-chunks.patch @@ -5,7 +5,7 @@ Subject: [PATCH] Allow for toggling of spawn chunks diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java -index d87f02c748fe2e5b4ea251f6691e8907a152cb6d..f21ba175538436e59c45d5350ef7b2605ed96775 100644 +index 5988c0847af4e8f0094328e91f736f25d567db60..fb78a91d1ab77f909823422c6d4e2ef7ed10c9c3 100644 --- a/src/main/java/net/minecraft/world/level/Level.java +++ b/src/main/java/net/minecraft/world/level/Level.java @@ -261,6 +261,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { diff --git a/patches/server/0031-Cache-whether-region-files-do-not-exist.patch b/patches/server/0031-Cache-whether-region-files-do-not-exist.patch new file mode 100644 index 0000000000..3a7c248487 --- /dev/null +++ b/patches/server/0031-Cache-whether-region-files-do-not-exist.patch @@ -0,0 +1,119 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Spottedleaf +Date: Thu, 2 Mar 2023 23:19:04 -0800 +Subject: [PATCH] Cache whether region files do not exist + +The repeated I/O of creating the directory for the regionfile +or for checking if the file exists can be heavy in +when pushing chunk generation extremely hard - as each chunk gen +request may effectively go through to the I/O thread. + +diff --git a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java +index a08cde4eefe879adcee7c4118bc38f98c5097ed0..8a11e10b01fa012b2f98b1c193c53251e848f909 100644 +--- a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java ++++ b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java +@@ -819,8 +819,14 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread { + return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE; + }); + } else { ++ // first check if the region file for sure does not exist ++ if (taskController.doesRegionFileNotExist(chunkX, chunkZ)) { ++ return Boolean.FALSE; ++ } // else: it either exists or is not known, fall back to checking the loaded region file ++ + return taskController.computeForRegionFileIfLoaded(chunkX, chunkZ, (final RegionFile file) -> { + if (file == null) { // null if not loaded ++ // not sure at this point, let the I/O thread figure it out + return Boolean.TRUE; + } + +@@ -1116,6 +1122,10 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread { + return !this.tasks.isEmpty(); + } + ++ public boolean doesRegionFileNotExist(final int chunkX, final int chunkZ) { ++ return this.getCache().doesRegionFileNotExistNoIO(new ChunkPos(chunkX, chunkZ)); ++ } ++ + public T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function function) { + final RegionFileStorage cache = this.getCache(); + final RegionFile regionFile; +diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java +index 18ef7025f7f4dc2a4aff85ca65ff5a2d35a1ef06..fe8bb0037bb7f317fc32ac34461f4eb3a1f397f2 100644 +--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java ++++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java +@@ -24,6 +24,35 @@ public class RegionFileStorage implements AutoCloseable { + private final Path folder; + private final boolean sync; + ++ // Paper start - cache regionfile does not exist state ++ static final int MAX_NON_EXISTING_CACHE = 1024 * 64; ++ private final it.unimi.dsi.fastutil.longs.LongLinkedOpenHashSet nonExistingRegionFiles = new it.unimi.dsi.fastutil.longs.LongLinkedOpenHashSet(); ++ private synchronized boolean doesRegionFilePossiblyExist(long position) { ++ if (this.nonExistingRegionFiles.contains(position)) { ++ this.nonExistingRegionFiles.addAndMoveToFirst(position); ++ return false; ++ } ++ return true; ++ } ++ ++ private synchronized void createRegionFile(long position) { ++ this.nonExistingRegionFiles.remove(position); ++ } ++ ++ private synchronized void markNonExisting(long position) { ++ if (this.nonExistingRegionFiles.addAndMoveToFirst(position)) { ++ while (this.nonExistingRegionFiles.size() >= MAX_NON_EXISTING_CACHE) { ++ this.nonExistingRegionFiles.removeLastLong(); ++ } ++ } ++ } ++ ++ public synchronized boolean doesRegionFileNotExistNoIO(ChunkPos pos) { ++ long key = ChunkPos.asLong(pos.getRegionX(), pos.getRegionZ()); ++ return !this.doesRegionFilePossiblyExist(key); ++ } ++ // Paper end - cache regionfile does not exist state ++ + protected RegionFileStorage(Path directory, boolean dsync) { // Paper - protected constructor + this.folder = directory; + this.sync = dsync; +@@ -45,7 +74,7 @@ public class RegionFileStorage implements AutoCloseable { + } + public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly, boolean lock) throws IOException { + // Paper end +- long i = ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()); ++ long i = ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()); final long regionPos = i; // Paper - OBFHELPER + RegionFile regionfile = (RegionFile) this.regionCache.getAndMoveToFirst(i); + + if (regionfile != null) { +@@ -57,15 +86,27 @@ public class RegionFileStorage implements AutoCloseable { + // Paper end + return regionfile; + } else { ++ // Paper start - cache regionfile does not exist state ++ if (existingOnly && !this.doesRegionFilePossiblyExist(regionPos)) { ++ return null; ++ } ++ // Paper end - cache regionfile does not exist state + if (this.regionCache.size() >= 256) { + ((RegionFile) this.regionCache.removeLast()).close(); + } + +- FileUtil.createDirectoriesSafe(this.folder); ++ // Paper - only create directory if not existing only - moved down + Path path = this.folder; + int j = chunkcoordintpair.getRegionX(); + Path path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca"); +- if (existingOnly && !java.nio.file.Files.exists(path1)) return null; // CraftBukkit ++ if (existingOnly && !java.nio.file.Files.exists(path1)) { // Paper start - cache regionfile does not exist state ++ this.markNonExisting(regionPos); ++ return null; // CraftBukkit ++ } else { ++ this.createRegionFile(regionPos); ++ } ++ // Paper end - cache regionfile does not exist state ++ FileUtil.createDirectoriesSafe(this.folder); // Paper - only create directory if not existing only - moved from above + RegionFile regionfile1 = new RegionFile(path1, this.folder, this.sync); + + this.regionCache.putAndMoveToFirst(i, regionfile1); diff --git a/patches/server/0025-Drop-falling-block-and-tnt-entities-at-the-specified.patch b/patches/server/0032-Drop-falling-block-and-tnt-entities-at-the-specified.patch similarity index 100% rename from patches/server/0025-Drop-falling-block-and-tnt-entities-at-the-specified.patch rename to patches/server/0032-Drop-falling-block-and-tnt-entities-at-the-specified.patch diff --git a/patches/server/0026-Show-Paper-in-client-crashes-server-lists-and-Mojang.patch b/patches/server/0033-Show-Paper-in-client-crashes-server-lists-and-Mojang.patch similarity index 88% rename from patches/server/0026-Show-Paper-in-client-crashes-server-lists-and-Mojang.patch rename to patches/server/0033-Show-Paper-in-client-crashes-server-lists-and-Mojang.patch index e6a7636556..b97c0d5e4c 100644 --- a/patches/server/0026-Show-Paper-in-client-crashes-server-lists-and-Mojang.patch +++ b/patches/server/0033-Show-Paper-in-client-crashes-server-lists-and-Mojang.patch @@ -6,10 +6,10 @@ Subject: [PATCH] Show 'Paper' in client crashes, server lists, and Mojang diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java -index 96897d883671e018bae5f71545c5f7af205e309c..ff3073b6780c04f5f11b5384bb6b6289eafd0c4f 100644 +index 164ce278f2696d4be6b57404648cb0e856464589..4d5a8c051ab7746ed40d5b79558e86238618827c 100644 --- a/src/main/java/net/minecraft/server/MinecraftServer.java +++ b/src/main/java/net/minecraft/server/MinecraftServer.java -@@ -1466,7 +1466,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop targetPredicate) { double d = -1.0D; -@@ -99,6 +104,20 @@ public interface EntityGetter { +@@ -111,6 +116,20 @@ public interface EntityGetter { return this.getNearestPlayer(x, y, z, maxDistance, predicate); } @@ -137,10 +137,10 @@ index 127c4ebedb94631ceac92dbdcd465e904217d715..5e19b91e6fb7e5e354f55ea206b3d59e for(Player player : this.players()) { if (EntitySelector.NO_SPECTATORS.test(player) && EntitySelector.LIVING_ENTITY_STILL_ALIVE.test(player)) { diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -index 5afdada0d56c2f9af431ea6485faa277229befa9..1098c95f8f27e0ff3cf593a4f0dfde0dbe4d3152 100644 +index 976eadd8200b2f4811d57b3c7fbd68cff1333924..dd65045f52da395bd3d106065b7390d3925bf96f 100644 --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -@@ -2169,8 +2169,20 @@ public class CraftPlayer extends CraftHumanEntity implements Player { +@@ -2211,8 +2211,20 @@ public class CraftPlayer extends CraftHumanEntity implements Player { @Override public String getLocale() { return this.getHandle().locale; diff --git a/patches/server/0030-Further-improve-server-tick-loop.patch b/patches/server/0037-Further-improve-server-tick-loop.patch similarity index 92% rename from patches/server/0030-Further-improve-server-tick-loop.patch rename to patches/server/0037-Further-improve-server-tick-loop.patch index a7c1311e67..be0233164a 100644 --- a/patches/server/0030-Further-improve-server-tick-loop.patch +++ b/patches/server/0037-Further-improve-server-tick-loop.patch @@ -12,7 +12,7 @@ Previous implementation did not calculate TPS correctly. Switch to a realistic rolling average and factor in std deviation as an extra reporting variable diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java -index ff3073b6780c04f5f11b5384bb6b6289eafd0c4f..2aeeffe6d011a1ecc848b80f0236c8a3f871a01c 100644 +index 4d5a8c051ab7746ed40d5b79558e86238618827c..92eb6e80e3b6f74dd32a878e5436d338c89ea60e 100644 --- a/src/main/java/net/minecraft/server/MinecraftServer.java +++ b/src/main/java/net/minecraft/server/MinecraftServer.java @@ -285,7 +285,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop 5000L && this.nextTickTime - this.lastOverloadWarning >= 30000L) { // CraftBukkit long j = i / 50L; -@@ -1001,12 +1053,18 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop diff --git a/src/main/java/net/minecraft/world/entity/Entity.java b/src/main/java/net/minecraft/world/entity/Entity.java -index 06d8211ef27a21da66d4622795cbcc8b37d2ce4e..7358b918d03b0dee0541843a952d6b8bf939d877 100644 +index 193b466e5dc52b9ecc878c4a680f88b8ce05e632..3088cdbb55b3054394fd3405797539f58a46f70c 100644 --- a/src/main/java/net/minecraft/world/entity/Entity.java +++ b/src/main/java/net/minecraft/world/entity/Entity.java -@@ -663,7 +663,11 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { +@@ -715,7 +715,11 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { } public void checkBelowWorld() { diff --git a/patches/server/0035-Check-online-mode-before-converting-and-renaming-pla.patch b/patches/server/0042-Check-online-mode-before-converting-and-renaming-pla.patch similarity index 100% rename from patches/server/0035-Check-online-mode-before-converting-and-renaming-pla.patch rename to patches/server/0042-Check-online-mode-before-converting-and-renaming-pla.patch diff --git a/patches/server/0036-Always-tick-falling-blocks.patch b/patches/server/0043-Always-tick-falling-blocks.patch similarity index 90% rename from patches/server/0036-Always-tick-falling-blocks.patch rename to patches/server/0043-Always-tick-falling-blocks.patch index cb761cc088..ff78994090 100644 --- a/patches/server/0036-Always-tick-falling-blocks.patch +++ b/patches/server/0043-Always-tick-falling-blocks.patch @@ -5,7 +5,7 @@ Subject: [PATCH] Always tick falling blocks diff --git a/src/main/java/org/spigotmc/ActivationRange.java b/src/main/java/org/spigotmc/ActivationRange.java -index 3e5d541d3d7fc3956b2fc68da9f22a0ab0367ce9..66dbfabdb104d272d48588a4773d198d3938f1ef 100644 +index b47b740186c200c420dcb4d1537a93c743a887c1..a526816f261de2a75a04be82596a7d8d674ead4a 100644 --- a/src/main/java/org/spigotmc/ActivationRange.java +++ b/src/main/java/org/spigotmc/ActivationRange.java @@ -89,6 +89,7 @@ public class ActivationRange diff --git a/patches/server/0037-Configurable-end-credits.patch b/patches/server/0044-Configurable-end-credits.patch similarity index 86% rename from patches/server/0037-Configurable-end-credits.patch rename to patches/server/0044-Configurable-end-credits.patch index 81aa4de852..292210bb6d 100644 --- a/patches/server/0037-Configurable-end-credits.patch +++ b/patches/server/0044-Configurable-end-credits.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Configurable end credits diff --git a/src/main/java/net/minecraft/server/level/ServerPlayer.java b/src/main/java/net/minecraft/server/level/ServerPlayer.java -index 5872ead2fe3a64f02f8bc36603fbb856728fd255..eb42e0ce8e2698973307e2b6774cbbd79c1d6457 100644 +index 32ef9f1ae0c35e927133572ebb6fbf50b0729a63..e42c07dfba3c18464f1f8e35fbd764812d8c4e50 100644 --- a/src/main/java/net/minecraft/server/level/ServerPlayer.java +++ b/src/main/java/net/minecraft/server/level/ServerPlayer.java -@@ -1024,6 +1024,7 @@ public class ServerPlayer extends Player { +@@ -1066,6 +1066,7 @@ public class ServerPlayer extends Player { this.unRide(); this.serverLevel().removePlayerImmediately(this, Entity.RemovalReason.CHANGED_DIMENSION); if (!this.wonGame) { diff --git a/patches/server/0038-Fix-lag-from-explosions-processing-dead-entities.patch b/patches/server/0045-Fix-lag-from-explosions-processing-dead-entities.patch similarity index 100% rename from patches/server/0038-Fix-lag-from-explosions-processing-dead-entities.patch rename to patches/server/0045-Fix-lag-from-explosions-processing-dead-entities.patch diff --git a/patches/server/0039-Optimize-explosions.patch b/patches/server/0046-Optimize-explosions.patch similarity index 95% rename from patches/server/0039-Optimize-explosions.patch rename to patches/server/0046-Optimize-explosions.patch index d331d09b42..1a71c81043 100644 --- a/patches/server/0039-Optimize-explosions.patch +++ b/patches/server/0046-Optimize-explosions.patch @@ -10,10 +10,10 @@ This patch adds a per-tick cache that is used for storing and retrieving an entity's exposure during an explosion. diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java -index 2aeeffe6d011a1ecc848b80f0236c8a3f871a01c..d5ad3368a397e9fc02506187ccb1c386afd9ce2b 100644 +index 92eb6e80e3b6f74dd32a878e5436d338c89ea60e..821725460b62ebadedb789f4408ef172416c2092 100644 --- a/src/main/java/net/minecraft/server/MinecraftServer.java +++ b/src/main/java/net/minecraft/server/MinecraftServer.java -@@ -1412,6 +1412,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop diff --git a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -index 403f7c7f31e0072b0cad0706bc981ece24733a1d..42a44820a3c13e2b0e29e02ed384c1918c9a0b17 100644 +index d72e8df4f99b6219ea305742f0cf8d1c1985ffd6..ddd437906801973e11386294e7e3f6846cc11cbc 100644 --- a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java +++ b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -@@ -2034,7 +2034,7 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic +@@ -2041,7 +2041,7 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic return true; } @@ -33,7 +33,7 @@ index 403f7c7f31e0072b0cad0706bc981ece24733a1d..42a44820a3c13e2b0e29e02ed384c191 for (int i = 0; i < message.length(); ++i) { if (!SharedConstants.isAllowedChatCharacter(message.charAt(i))) { return true; -@@ -2051,7 +2051,7 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic +@@ -2058,7 +2058,7 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic } OutgoingChatMessage outgoing = OutgoingChatMessage.create(original); @@ -42,7 +42,7 @@ index 403f7c7f31e0072b0cad0706bc981ece24733a1d..42a44820a3c13e2b0e29e02ed384c191 this.handleCommand(s); } else if (this.player.getChatVisibility() == ChatVisiblity.SYSTEM) { // Do nothing, this is coming from a plugin -@@ -2141,7 +2141,29 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic +@@ -2148,7 +2148,29 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic } } @@ -74,7 +74,7 @@ index 403f7c7f31e0072b0cad0706bc981ece24733a1d..42a44820a3c13e2b0e29e02ed384c191 if ( org.spigotmc.SpigotConfig.logCommands ) // Spigot this.LOGGER.info(this.player.getScoreboardName() + " issued server command: " + s); diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java -index 17056a7ea6c6a3a671872d2be603d7dd60ae04e9..6e488fe34231afa20d0f8f62299bb631359ee51c 100644 +index fb6ea46609489d802ed4b76b4074734dd3e3886f..dd3935eeceba456b37a5fbcb874c1073bb76d473 100644 --- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java +++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java @@ -875,6 +875,28 @@ public final class CraftServer implements Server { @@ -107,10 +107,10 @@ index 17056a7ea6c6a3a671872d2be603d7dd60ae04e9..6e488fe34231afa20d0f8f62299bb631 return true; } diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -index 484fab5eeeb92407ded625b96678c9aa93199d70..b8fc2196b8f77b826d229265e96b5cce1e5301cb 100644 +index 4061b68edb8f501eea71ba009214ac3808506ec7..37ffadacbfd49743dac8739b74def818a351e563 100644 --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -@@ -437,7 +437,20 @@ public class CraftPlayer extends CraftHumanEntity implements Player { +@@ -479,7 +479,20 @@ public class CraftPlayer extends CraftHumanEntity implements Player { public void chat(String msg) { if (this.getHandle().connection == null) return; @@ -145,7 +145,7 @@ index 19c44daaa407b7c1c7a7ffe56fef8c8814c6d5b2..6a073a9dc44d93eba296a0e18a9c7be8 } finally { try { diff --git a/src/main/java/org/spigotmc/AsyncCatcher.java b/src/main/java/org/spigotmc/AsyncCatcher.java -index 78669fa035b7537ff7e533cf32aaf2995625424f..7585a30e8f063ac2656b5de519b1e9edaceffbc7 100644 +index 05e94702e42b8f5c35d2a112c486d57948a3acba..5409f230fdd53b70fc03c58177438534731ad4e6 100644 --- a/src/main/java/org/spigotmc/AsyncCatcher.java +++ b/src/main/java/org/spigotmc/AsyncCatcher.java @@ -6,6 +6,7 @@ public class AsyncCatcher diff --git a/patches/server/0052-All-chunks-are-slime-spawn-chunks-toggle.patch b/patches/server/0059-All-chunks-are-slime-spawn-chunks-toggle.patch similarity index 93% rename from patches/server/0052-All-chunks-are-slime-spawn-chunks-toggle.patch rename to patches/server/0059-All-chunks-are-slime-spawn-chunks-toggle.patch index 2873efd090..a3a9107844 100644 --- a/patches/server/0052-All-chunks-are-slime-spawn-chunks-toggle.patch +++ b/patches/server/0059-All-chunks-are-slime-spawn-chunks-toggle.patch @@ -18,10 +18,10 @@ index 23cb972ca3fde409be0d6517ef8f1c58dab47ff4..476a9b2db5deac803f1cb3c2cbe88b69 if (random.nextInt(10) == 0 && flag && pos.getY() < 40) { return checkMobSpawnRules(type, world, spawnReason, pos, random); diff --git a/src/main/java/org/bukkit/craftbukkit/CraftChunk.java b/src/main/java/org/bukkit/craftbukkit/CraftChunk.java -index bf4b2f89d3a7133155c6272379c742318b2c1514..f07a6b1d782426581c84ffa19447c1375c4cbc07 100644 +index 33677ec811ceab939c419bf7d31b99585e9a1ef1..8ae78690748b2cb5d5186d8859871c1630e10130 100644 --- a/src/main/java/org/bukkit/craftbukkit/CraftChunk.java +++ b/src/main/java/org/bukkit/craftbukkit/CraftChunk.java -@@ -213,7 +213,7 @@ public class CraftChunk implements Chunk { +@@ -169,7 +169,7 @@ public class CraftChunk implements Chunk { @Override public boolean isSlimeChunk() { // 987234911L is deterimined in EntitySlime when seeing if a slime can spawn in a chunk diff --git a/patches/server/0053-Expose-server-CommandMap.patch b/patches/server/0060-Expose-server-CommandMap.patch similarity index 87% rename from patches/server/0053-Expose-server-CommandMap.patch rename to patches/server/0060-Expose-server-CommandMap.patch index ad6b1fc796..b64c141857 100644 --- a/patches/server/0053-Expose-server-CommandMap.patch +++ b/patches/server/0060-Expose-server-CommandMap.patch @@ -5,7 +5,7 @@ Subject: [PATCH] Expose server CommandMap diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java -index 6e488fe34231afa20d0f8f62299bb631359ee51c..7681198cbc8f36b9e860182a1f3541a4b22fc82e 100644 +index dd3935eeceba456b37a5fbcb874c1073bb76d473..9c80c55e8c15aa847aea134dd8121ee9d0c24d1c 100644 --- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java +++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java @@ -2012,6 +2012,7 @@ public final class CraftServer implements Server { diff --git a/patches/server/0054-Be-a-bit-more-informative-in-maxHealth-exception.patch b/patches/server/0061-Be-a-bit-more-informative-in-maxHealth-exception.patch similarity index 100% rename from patches/server/0054-Be-a-bit-more-informative-in-maxHealth-exception.patch rename to patches/server/0061-Be-a-bit-more-informative-in-maxHealth-exception.patch diff --git a/patches/server/0055-Player-Tab-List-and-Title-APIs.patch b/patches/server/0062-Player-Tab-List-and-Title-APIs.patch similarity index 98% rename from patches/server/0055-Player-Tab-List-and-Title-APIs.patch rename to patches/server/0062-Player-Tab-List-and-Title-APIs.patch index b0fdad3c1b..7292c0742f 100644 --- a/patches/server/0055-Player-Tab-List-and-Title-APIs.patch +++ b/patches/server/0062-Player-Tab-List-and-Title-APIs.patch @@ -63,7 +63,7 @@ index bd808eb312ade7122973a47f4b96505829511da5..bf0f9cab7c66c089f35b851e799ba4a4 // Paper end buf.writeComponent(this.text); diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -index b8fc2196b8f77b826d229265e96b5cce1e5301cb..bd811f33bb2df565d876bc3433b1ce9ce58acba4 100644 +index 37ffadacbfd49743dac8739b74def818a351e563..c6ceb5044e3dcf5f512dc1efc04e0785b1caadb0 100644 --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java @@ -1,5 +1,6 @@ @@ -73,7 +73,7 @@ index b8fc2196b8f77b826d229265e96b5cce1e5301cb..bd811f33bb2df565d876bc3433b1ce9c import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableSet; import com.google.common.io.BaseEncoding; -@@ -283,6 +284,100 @@ public class CraftPlayer extends CraftHumanEntity implements Player { +@@ -325,6 +326,100 @@ public class CraftPlayer extends CraftHumanEntity implements Player { } } diff --git a/patches/server/0056-Add-configurable-portal-search-radius.patch b/patches/server/0063-Add-configurable-portal-search-radius.patch similarity index 94% rename from patches/server/0056-Add-configurable-portal-search-radius.patch rename to patches/server/0063-Add-configurable-portal-search-radius.patch index 3c91c95ccb..4666a75e15 100644 --- a/patches/server/0056-Add-configurable-portal-search-radius.patch +++ b/patches/server/0063-Add-configurable-portal-search-radius.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Add configurable portal search radius diff --git a/src/main/java/net/minecraft/world/entity/Entity.java b/src/main/java/net/minecraft/world/entity/Entity.java -index 7358b918d03b0dee0541843a952d6b8bf939d877..4d12403d8ae7c620e169e87b0a0ab48e28251811 100644 +index 3088cdbb55b3054394fd3405797539f58a46f70c..756b8be27488c81172fe05fa0361ef3866f99bee 100644 --- a/src/main/java/net/minecraft/world/entity/Entity.java +++ b/src/main/java/net/minecraft/world/entity/Entity.java -@@ -3122,7 +3122,13 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { +@@ -3174,7 +3174,13 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { double d0 = DimensionType.getTeleportationScale(this.level().dimensionType(), destination.dimensionType()); BlockPos blockposition = worldborder.clampToBounds(this.getX() * d0, this.getY(), this.getZ() * d0); // CraftBukkit start diff --git a/patches/server/0057-Add-velocity-warnings.patch b/patches/server/0064-Add-velocity-warnings.patch similarity index 91% rename from patches/server/0057-Add-velocity-warnings.patch rename to patches/server/0064-Add-velocity-warnings.patch index 17185beb42..604f376234 100644 --- a/patches/server/0057-Add-velocity-warnings.patch +++ b/patches/server/0064-Add-velocity-warnings.patch @@ -5,7 +5,7 @@ Subject: [PATCH] Add velocity warnings diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java -index 7681198cbc8f36b9e860182a1f3541a4b22fc82e..6be92dea5f2c042a427de77b6ae5b9e45a662388 100644 +index 9c80c55e8c15aa847aea134dd8121ee9d0c24d1c..1cca43506306994e740278a581b0d33924d08491 100644 --- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java +++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java @@ -296,6 +296,7 @@ public final class CraftServer implements Server { @@ -62,10 +62,10 @@ index 0a6926f9b6be67d4d710d2fbc6bd2b1fcb0677a0..c9275f73be7332f79312037954f9685f public double getHeight() { return this.getHandle().getBbHeight(); diff --git a/src/main/java/org/spigotmc/WatchdogThread.java b/src/main/java/org/spigotmc/WatchdogThread.java -index d5863b0b06384b25eaa33572fa02649795463ed8..2693cc933d746e40d8a47d96c6cb6799f0a2472f 100644 +index 11d7ede26b46d0bf9cced65e8c3bcc41c8b66dbf..3ad14bf0697e682a2e777baa8faeb323d127fb13 100644 --- a/src/main/java/org/spigotmc/WatchdogThread.java +++ b/src/main/java/org/spigotmc/WatchdogThread.java -@@ -80,7 +80,19 @@ public class WatchdogThread extends Thread +@@ -80,7 +80,19 @@ public final class WatchdogThread extends io.papermc.paper.util.TickThread // Pa log.log( Level.SEVERE, "During the run of the server, a physics stackoverflow was supressed" ); log.log( Level.SEVERE, "near " + net.minecraft.world.level.Level.lastPhysicsProblem ); } @@ -85,4 +85,4 @@ index d5863b0b06384b25eaa33572fa02649795463ed8..2693cc933d746e40d8a47d96c6cb6799 + // Paper end log.log( Level.SEVERE, "------------------------------" ); log.log( Level.SEVERE, "Server thread dump (Look for plugins here before reporting to Paper!):" ); // Paper - WatchdogThread.dumpThread( ManagementFactory.getThreadMXBean().getThreadInfo( MinecraftServer.getServer().serverThread.getId(), Integer.MAX_VALUE ), log ); + io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler.dumpAllChunkLoadInfo(isLongTimeout); // Paper // Paper - rewrite chunk system diff --git a/patches/server/0058-Configurable-inter-world-teleportation-safety.patch b/patches/server/0065-Configurable-inter-world-teleportation-safety.patch similarity index 91% rename from patches/server/0058-Configurable-inter-world-teleportation-safety.patch rename to patches/server/0065-Configurable-inter-world-teleportation-safety.patch index 677aac1b0d..39510f02c9 100644 --- a/patches/server/0058-Configurable-inter-world-teleportation-safety.patch +++ b/patches/server/0065-Configurable-inter-world-teleportation-safety.patch @@ -16,10 +16,10 @@ The wanted destination was on top of the emerald block however the player ended This only is the case if the player is teleporting between worlds. diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -index bd811f33bb2df565d876bc3433b1ce9ce58acba4..db7063bb4dddcf485f9e57505ae2bd31ade5376e 100644 +index c6ceb5044e3dcf5f512dc1efc04e0785b1caadb0..8f5d6d4ea4a8a7de95c755594ba5ec3fd3902158 100644 --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -@@ -1140,7 +1140,7 @@ public class CraftPlayer extends CraftHumanEntity implements Player { +@@ -1182,7 +1182,7 @@ public class CraftPlayer extends CraftHumanEntity implements Player { entity.connection.teleport(to); } else { // The respawn reason should never be used if the passed location is non null. diff --git a/patches/server/0059-Add-exception-reporting-event.patch b/patches/server/0066-Add-exception-reporting-event.patch similarity index 95% rename from patches/server/0059-Add-exception-reporting-event.patch rename to patches/server/0066-Add-exception-reporting-event.patch index c9b0b128ef..8a7883a6f0 100644 --- a/patches/server/0059-Add-exception-reporting-event.patch +++ b/patches/server/0066-Add-exception-reporting-event.patch @@ -88,7 +88,7 @@ index c6fb4c33d7ea52b88d8fc0d90748cbab7387c565..fed09b886f4fa0006d160e5f2abb00df } diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java -index 42e81640354c2679570823347b28ee1155e7a00e..a7fb12648c2e655e191d8c805753ae0c05421d17 100644 +index 78041052d7ab2e6b60405ce7e02468458650db22..8149286827d9b609be47a4ded0413ca11f7858de 100644 --- a/src/main/java/net/minecraft/world/level/Level.java +++ b/src/main/java/net/minecraft/world/level/Level.java @@ -1,5 +1,10 @@ @@ -102,7 +102,7 @@ index 42e81640354c2679570823347b28ee1155e7a00e..a7fb12648c2e655e191d8c805753ae0c import com.google.common.collect.Lists; import com.mojang.serialization.Codec; import java.io.IOException; -@@ -725,6 +730,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { +@@ -730,6 +735,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { // Paper start - Prevent tile entity and entity crashes final String msg = String.format("Entity threw exception at %s:%s,%s,%s", entity.level().getWorld().getName(), entity.getX(), entity.getY(), entity.getZ()); MinecraftServer.LOGGER.error(msg, throwable); @@ -131,7 +131,7 @@ index f1675e35be0ab7670c875c6b0d1e982a3ae09d1e..b2bb9bbd3af414c50ec3f8e3e171a679 } diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java -index 8e1ca484356a7eddcdb37ef7ec01dea9864eec70..2006760ff24ec9ce5f71c107ddf942e550d03589 100644 +index 5d7290216d3a6ddb8e345a5b05da21ef28ed2307..fe6b2ade9f167c36cbac594a25ad36ee14208fa0 100644 --- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java +++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java @@ -1,6 +1,7 @@ @@ -163,7 +163,7 @@ index 8e1ca484356a7eddcdb37ef7ec01dea9864eec70..2006760ff24ec9ce5f71c107ddf942e5 // CraftBukkit end } } -@@ -1060,6 +1067,7 @@ public class LevelChunk extends ChunkAccess { +@@ -1149,6 +1156,7 @@ public class LevelChunk extends ChunkAccess { // Paper start - Prevent tile entity and entity crashes final String msg = String.format("BlockEntity threw exception at %s:%s,%s,%s", LevelChunk.this.getLevel().getWorld().getName(), this.getPos().getX(), this.getPos().getY(), this.getPos().getZ()); net.minecraft.server.MinecraftServer.LOGGER.error(msg, throwable); @@ -172,10 +172,10 @@ index 8e1ca484356a7eddcdb37ef7ec01dea9864eec70..2006760ff24ec9ce5f71c107ddf942e5 // Paper end // Spigot start diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java -index d9daf07132c46548964a75588b69d7a74680e917..67a2f3c02aa4983b3ec2df073821190ddb36543c 100644 +index e68205fe7169c7c5b7c6fdada2ee97d86107ca97..aa8972fd1a1fade05d60ab69efb8ff24f344508a 100644 --- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java +++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java -@@ -274,6 +274,7 @@ public class RegionFile implements AutoCloseable { +@@ -275,6 +275,7 @@ public class RegionFile implements AutoCloseable { return true; } } catch (IOException ioexception) { @@ -183,7 +183,7 @@ index d9daf07132c46548964a75588b69d7a74680e917..67a2f3c02aa4983b3ec2df073821190d return false; } } -@@ -355,6 +356,7 @@ public class RegionFile implements AutoCloseable { +@@ -356,6 +357,7 @@ public class RegionFile implements AutoCloseable { ((java.nio.Buffer) buf).position(5); // CraftBukkit - decompile error filechannel.write(buf); } catch (Throwable throwable) { diff --git a/patches/server/0060-Don-t-nest-if-we-don-t-need-to-when-cerealising-text.patch b/patches/server/0067-Don-t-nest-if-we-don-t-need-to-when-cerealising-text.patch similarity index 100% rename from patches/server/0060-Don-t-nest-if-we-don-t-need-to-when-cerealising-text.patch rename to patches/server/0067-Don-t-nest-if-we-don-t-need-to-when-cerealising-text.patch diff --git a/patches/server/0061-Disable-Scoreboards-for-non-players-by-default.patch b/patches/server/0068-Disable-Scoreboards-for-non-players-by-default.patch similarity index 92% rename from patches/server/0061-Disable-Scoreboards-for-non-players-by-default.patch rename to patches/server/0068-Disable-Scoreboards-for-non-players-by-default.patch index 02f9f25eb2..9fe22804ea 100644 --- a/patches/server/0061-Disable-Scoreboards-for-non-players-by-default.patch +++ b/patches/server/0068-Disable-Scoreboards-for-non-players-by-default.patch @@ -11,10 +11,10 @@ So avoid looking up scoreboards and short circuit to the "not on a team" logic which is most likely to be true. diff --git a/src/main/java/net/minecraft/world/entity/Entity.java b/src/main/java/net/minecraft/world/entity/Entity.java -index 4d12403d8ae7c620e169e87b0a0ab48e28251811..bb8de860d826e4c92ffdbd7ef2675458d372bbdf 100644 +index 756b8be27488c81172fe05fa0361ef3866f99bee..194c0f9e8b537d415d7cdedf9cd5d4870b60fb08 100644 --- a/src/main/java/net/minecraft/world/entity/Entity.java +++ b/src/main/java/net/minecraft/world/entity/Entity.java -@@ -2753,6 +2753,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { +@@ -2805,6 +2805,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { @Nullable public Team getTeam() { diff --git a/patches/server/0062-Add-methods-for-working-with-arrows-stuck-in-living-.patch b/patches/server/0069-Add-methods-for-working-with-arrows-stuck-in-living-.patch similarity index 100% rename from patches/server/0062-Add-methods-for-working-with-arrows-stuck-in-living-.patch rename to patches/server/0069-Add-methods-for-working-with-arrows-stuck-in-living-.patch diff --git a/patches/server/0063-Complete-resource-pack-API.patch b/patches/server/0070-Complete-resource-pack-API.patch similarity index 92% rename from patches/server/0063-Complete-resource-pack-API.patch rename to patches/server/0070-Complete-resource-pack-API.patch index a6a04c88b0..8e5c9d35e4 100644 --- a/patches/server/0063-Complete-resource-pack-API.patch +++ b/patches/server/0070-Complete-resource-pack-API.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Complete resource pack API diff --git a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -index 42a44820a3c13e2b0e29e02ed384c1918c9a0b17..94636c92758568051f829aed59aab8728e7e7252 100644 +index ddd437906801973e11386294e7e3f6846cc11cbc..a91cc53f9d601bbc51988b2b7855c6ebcc68d780 100644 --- a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java +++ b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -@@ -1748,8 +1748,11 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic +@@ -1755,8 +1755,11 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic ServerGamePacketListenerImpl.LOGGER.info("Disconnecting {} due to resource pack rejection", this.player.getName()); this.disconnect(Component.translatable("multiplayer.requiredTexturePrompt.disconnect")); } @@ -23,7 +23,7 @@ index 42a44820a3c13e2b0e29e02ed384c1918c9a0b17..94636c92758568051f829aed59aab872 @Override diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -index db7063bb4dddcf485f9e57505ae2bd31ade5376e..04e9a84b36f7f702e34216513348a0871ade22e5 100644 +index 8f5d6d4ea4a8a7de95c755594ba5ec3fd3902158..f6006f696550126d1ba78fdc49821e20cd19b1f3 100644 --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java @@ -178,6 +178,10 @@ public class CraftPlayer extends CraftHumanEntity implements Player { @@ -37,7 +37,7 @@ index db7063bb4dddcf485f9e57505ae2bd31ade5376e..04e9a84b36f7f702e34216513348a087 public CraftPlayer(CraftServer server, ServerPlayer entity) { super(server, entity); -@@ -2292,6 +2296,45 @@ public class CraftPlayer extends CraftHumanEntity implements Player { +@@ -2334,6 +2338,45 @@ public class CraftPlayer extends CraftHumanEntity implements Player { public boolean getAffectsSpawning() { return this.getHandle().affectsSpawning; } diff --git a/patches/server/0064-Default-loading-permissions.yml-before-plugins.patch b/patches/server/0071-Default-loading-permissions.yml-before-plugins.patch similarity index 95% rename from patches/server/0064-Default-loading-permissions.yml-before-plugins.patch rename to patches/server/0071-Default-loading-permissions.yml-before-plugins.patch index e70f387135..2e6260b395 100644 --- a/patches/server/0064-Default-loading-permissions.yml-before-plugins.patch +++ b/patches/server/0071-Default-loading-permissions.yml-before-plugins.patch @@ -16,7 +16,7 @@ modify that. Under the previous logic, plugins were unable (cleanly) override pe A config option has been added for those who depend on the previous behavior, but I don't expect that. diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java -index 6be92dea5f2c042a427de77b6ae5b9e45a662388..4edf6a317ba5ce46e098c4ce1836c147853eadb1 100644 +index 1cca43506306994e740278a581b0d33924d08491..e509ab35bf8ffba3c1e04f1236f98c24c1e010f4 100644 --- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java +++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java @@ -457,6 +457,7 @@ public final class CraftServer implements Server { diff --git a/patches/server/0065-Allow-Reloading-of-Custom-Permissions.patch b/patches/server/0072-Allow-Reloading-of-Custom-Permissions.patch similarity index 94% rename from patches/server/0065-Allow-Reloading-of-Custom-Permissions.patch rename to patches/server/0072-Allow-Reloading-of-Custom-Permissions.patch index f4eb2fe050..2c35743bdb 100644 --- a/patches/server/0065-Allow-Reloading-of-Custom-Permissions.patch +++ b/patches/server/0072-Allow-Reloading-of-Custom-Permissions.patch @@ -6,7 +6,7 @@ Subject: [PATCH] Allow Reloading of Custom Permissions https://github.com/PaperMC/Paper/issues/49 diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java -index 4edf6a317ba5ce46e098c4ce1836c147853eadb1..2fa6753e7bd9c1a6c08b467fe6d2d7ca3943f0a7 100644 +index e509ab35bf8ffba3c1e04f1236f98c24c1e010f4..dea53897919d46b90c99577fbb1985b2d0c65b5e 100644 --- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java +++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java @@ -2603,5 +2603,23 @@ public final class CraftServer implements Server { diff --git a/patches/server/0066-Remove-Metadata-on-reload.patch b/patches/server/0073-Remove-Metadata-on-reload.patch similarity index 93% rename from patches/server/0066-Remove-Metadata-on-reload.patch rename to patches/server/0073-Remove-Metadata-on-reload.patch index 707db5fed2..6bff5918c0 100644 --- a/patches/server/0066-Remove-Metadata-on-reload.patch +++ b/patches/server/0073-Remove-Metadata-on-reload.patch @@ -7,7 +7,7 @@ Metadata is not meant to persist reload as things break badly with non primitive This will remove metadata on reload so it does not crash everything if a plugin uses it. diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java -index 2fa6753e7bd9c1a6c08b467fe6d2d7ca3943f0a7..0953f4555bc18176f35d91754ad308143899a8a7 100644 +index dea53897919d46b90c99577fbb1985b2d0c65b5e..dbad9e72121562d22f8476bade4032661e280066 100644 --- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java +++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java @@ -962,8 +962,16 @@ public final class CraftServer implements Server { diff --git a/patches/server/0067-Handle-Item-Meta-Inconsistencies.patch b/patches/server/0074-Handle-Item-Meta-Inconsistencies.patch similarity index 100% rename from patches/server/0067-Handle-Item-Meta-Inconsistencies.patch rename to patches/server/0074-Handle-Item-Meta-Inconsistencies.patch diff --git a/patches/server/0068-Configurable-Non-Player-Arrow-Despawn-Rate.patch b/patches/server/0075-Configurable-Non-Player-Arrow-Despawn-Rate.patch similarity index 100% rename from patches/server/0068-Configurable-Non-Player-Arrow-Despawn-Rate.patch rename to patches/server/0075-Configurable-Non-Player-Arrow-Despawn-Rate.patch diff --git a/patches/server/0069-Add-World-Util-Methods.patch b/patches/server/0076-Add-World-Util-Methods.patch similarity index 92% rename from patches/server/0069-Add-World-Util-Methods.patch rename to patches/server/0076-Add-World-Util-Methods.patch index 83fe0acd35..1610dde072 100644 --- a/patches/server/0069-Add-World-Util-Methods.patch +++ b/patches/server/0076-Add-World-Util-Methods.patch @@ -6,7 +6,7 @@ Subject: [PATCH] Add World Util Methods Methods that can be used for other patches to help improve logic. diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java -index ef48e76ca18576e631d5d2f1652d3449d558a582..71be3d403dc0c345b447685fa339cb1c455dd2a8 100644 +index feee30aa0bccfa40765f1c4a179ba04907b11433..4598ca4cd9da931db33ca26576bfbcdf7df99094 100644 --- a/src/main/java/net/minecraft/server/level/ServerLevel.java +++ b/src/main/java/net/minecraft/server/level/ServerLevel.java @@ -221,7 +221,7 @@ public class ServerLevel extends Level implements WorldGenLevel { @@ -19,7 +19,7 @@ index ef48e76ca18576e631d5d2f1652d3449d558a582..71be3d403dc0c345b447685fa339cb1c } diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java -index a7fb12648c2e655e191d8c805753ae0c05421d17..5e4b102e747a053280871879c3c18fc3d3d2d18a 100644 +index 8149286827d9b609be47a4ded0413ca11f7858de..809f7db469583ea90fbb165cf180dc87055c6105 100644 --- a/src/main/java/net/minecraft/world/level/Level.java +++ b/src/main/java/net/minecraft/world/level/Level.java @@ -342,6 +342,22 @@ public abstract class Level implements LevelAccessor, AutoCloseable { diff --git a/patches/server/0070-Custom-replacement-for-eaten-items.patch b/patches/server/0077-Custom-replacement-for-eaten-items.patch similarity index 100% rename from patches/server/0070-Custom-replacement-for-eaten-items.patch rename to patches/server/0077-Custom-replacement-for-eaten-items.patch diff --git a/patches/server/0071-handle-NaN-health-absorb-values-and-repair-bad-data.patch b/patches/server/0078-handle-NaN-health-absorb-values-and-repair-bad-data.patch similarity index 94% rename from patches/server/0071-handle-NaN-health-absorb-values-and-repair-bad-data.patch rename to patches/server/0078-handle-NaN-health-absorb-values-and-repair-bad-data.patch index 0b7f0a14f9..de6a37eea2 100644 --- a/patches/server/0071-handle-NaN-health-absorb-values-and-repair-bad-data.patch +++ b/patches/server/0078-handle-NaN-health-absorb-values-and-repair-bad-data.patch @@ -44,10 +44,10 @@ index 1238da8b035a0b0dd3d00557ca0de7a82f5fdf53..5e40ee2695b7ed50fddc0e8226f0b1b4 } diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -index 04e9a84b36f7f702e34216513348a0871ade22e5..877f82735c407c1d567628fafc7c7fd6adebb410 100644 +index f6006f696550126d1ba78fdc49821e20cd19b1f3..af7f97b605b81a0b2dd812f491b6e324660fe260 100644 --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -@@ -2096,6 +2096,7 @@ public class CraftPlayer extends CraftHumanEntity implements Player { +@@ -2138,6 +2138,7 @@ public class CraftPlayer extends CraftHumanEntity implements Player { } public void setRealHealth(double health) { diff --git a/patches/server/0072-Use-a-Shared-Random-for-Entities.patch b/patches/server/0079-Use-a-Shared-Random-for-Entities.patch similarity index 96% rename from patches/server/0072-Use-a-Shared-Random-for-Entities.patch rename to patches/server/0079-Use-a-Shared-Random-for-Entities.patch index 1a4de782ac..b6820a110f 100644 --- a/patches/server/0072-Use-a-Shared-Random-for-Entities.patch +++ b/patches/server/0079-Use-a-Shared-Random-for-Entities.patch @@ -6,7 +6,7 @@ Subject: [PATCH] Use a Shared Random for Entities Reduces memory usage and provides ensures more randomness, Especially since a lot of garbage entity objects get created. diff --git a/src/main/java/net/minecraft/world/entity/Entity.java b/src/main/java/net/minecraft/world/entity/Entity.java -index bb8de860d826e4c92ffdbd7ef2675458d372bbdf..09cf1125221bdbebddabfc46095c3dbd153714c1 100644 +index 194c0f9e8b537d415d7cdedf9cd5d4870b60fb08..3f8b0124e5c2a01f1cf3a344b8dd4075817cb001 100644 --- a/src/main/java/net/minecraft/world/entity/Entity.java +++ b/src/main/java/net/minecraft/world/entity/Entity.java @@ -162,6 +162,79 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { @@ -89,7 +89,7 @@ index bb8de860d826e4c92ffdbd7ef2675458d372bbdf..09cf1125221bdbebddabfc46095c3dbd private CraftEntity bukkitEntity; public CraftEntity getBukkitEntity() { -@@ -354,7 +427,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { +@@ -406,7 +479,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { this.bb = Entity.INITIAL_AABB; this.stuckSpeedMultiplier = Vec3.ZERO; this.nextStep = 1.0F; diff --git a/patches/server/0073-Configurable-spawn-chances-for-skeleton-horses.patch b/patches/server/0080-Configurable-spawn-chances-for-skeleton-horses.patch similarity index 90% rename from patches/server/0073-Configurable-spawn-chances-for-skeleton-horses.patch rename to patches/server/0080-Configurable-spawn-chances-for-skeleton-horses.patch index 967b6647c6..6b1ce4eb11 100644 --- a/patches/server/0073-Configurable-spawn-chances-for-skeleton-horses.patch +++ b/patches/server/0080-Configurable-spawn-chances-for-skeleton-horses.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Configurable spawn chances for skeleton horses diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java -index 71be3d403dc0c345b447685fa339cb1c455dd2a8..16e0887abe2643b13f7fefd986783e88d37fbdfb 100644 +index 4598ca4cd9da931db33ca26576bfbcdf7df99094..f6be0fefb461c1b5fd0feb5553d36b5817b96e3a 100644 --- a/src/main/java/net/minecraft/server/level/ServerLevel.java +++ b/src/main/java/net/minecraft/server/level/ServerLevel.java -@@ -607,7 +607,7 @@ public class ServerLevel extends Level implements WorldGenLevel { +@@ -756,7 +756,7 @@ public class ServerLevel extends Level implements WorldGenLevel { blockposition = this.findLightningTargetAround(this.getBlockRandomPos(j, 0, k, 15)); if (this.isRainingAt(blockposition)) { DifficultyInstance difficultydamagescaler = this.getCurrentDifficultyAt(blockposition); diff --git a/patches/server/0074-Optimize-isInWorldBounds-and-getBlockState-for-inlin.patch b/patches/server/0081-Optimize-isInWorldBounds-and-getBlockState-for-inlin.patch similarity index 97% rename from patches/server/0074-Optimize-isInWorldBounds-and-getBlockState-for-inlin.patch rename to patches/server/0081-Optimize-isInWorldBounds-and-getBlockState-for-inlin.patch index b88e09df2e..be028d4d62 100644 --- a/patches/server/0074-Optimize-isInWorldBounds-and-getBlockState-for-inlin.patch +++ b/patches/server/0081-Optimize-isInWorldBounds-and-getBlockState-for-inlin.patch @@ -29,7 +29,7 @@ index 1b29c6872ebe54351f28c1f1f38b22561ba785ee..40950db0c242c65dfd4de247c8624935 this.x = x; this.y = y; diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java -index 5e4b102e747a053280871879c3c18fc3d3d2d18a..dcf1894ed6939b47cd8323421d5734fd5af6dedb 100644 +index 809f7db469583ea90fbb165cf180dc87055c6105..e5194f0650af2bce8aac24e34752b21015b92d4e 100644 --- a/src/main/java/net/minecraft/world/level/Level.java +++ b/src/main/java/net/minecraft/world/level/Level.java @@ -284,7 +284,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { @@ -88,7 +88,7 @@ index 60e760b42dd6471a229dfd45490dcf8c51979d35..4a3ac7dedf5cb1e76f16ec4f18e82afc @Override public FluidState getFluidState(BlockPos pos) { diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java -index 2006760ff24ec9ce5f71c107ddf942e550d03589..b7e03ac0e07a48c5d3a401a918ab6cc2a7daafec 100644 +index fe6b2ade9f167c36cbac594a25ad36ee14208fa0..4e93e9ce7a7f939f802e51072efd389a86b42fc4 100644 --- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java +++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java @@ -295,12 +295,29 @@ public class LevelChunk extends ChunkAccess { diff --git a/patches/server/0075-Only-process-BlockPhysicsEvent-if-a-plugin-has-a-lis.patch b/patches/server/0082-Only-process-BlockPhysicsEvent-if-a-plugin-has-a-lis.patch similarity index 91% rename from patches/server/0075-Only-process-BlockPhysicsEvent-if-a-plugin-has-a-lis.patch rename to patches/server/0082-Only-process-BlockPhysicsEvent-if-a-plugin-has-a-lis.patch index d924936b78..69cab8d0d5 100644 --- a/patches/server/0075-Only-process-BlockPhysicsEvent-if-a-plugin-has-a-lis.patch +++ b/patches/server/0082-Only-process-BlockPhysicsEvent-if-a-plugin-has-a-lis.patch @@ -6,10 +6,10 @@ Subject: [PATCH] Only process BlockPhysicsEvent if a plugin has a listener Saves on some object allocation and processing when no plugin listens to this diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java -index d5ad3368a397e9fc02506187ccb1c386afd9ce2b..7fca70658ecdc89b79381ca2ab33b931ce6871ae 100644 +index 821725460b62ebadedb789f4408ef172416c2092..81abb732e2bb3bca683028d505e7485052c0ec8d 100644 --- a/src/main/java/net/minecraft/server/MinecraftServer.java +++ b/src/main/java/net/minecraft/server/MinecraftServer.java -@@ -1373,6 +1373,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop { return worldserver + " " + worldserver.dimension().location(); diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java -index 16e0887abe2643b13f7fefd986783e88d37fbdfb..5c2ca51139b0647a2c7ae1b268b6d151dc884eca 100644 +index f6be0fefb461c1b5fd0feb5553d36b5817b96e3a..33c6a673d98323a3790b3e494841dffa9ce5f4f5 100644 --- a/src/main/java/net/minecraft/server/level/ServerLevel.java +++ b/src/main/java/net/minecraft/server/level/ServerLevel.java @@ -220,6 +220,7 @@ public class ServerLevel extends Level implements WorldGenLevel { @@ -30,10 +30,10 @@ index 16e0887abe2643b13f7fefd986783e88d37fbdfb..5c2ca51139b0647a2c7ae1b268b6d151 @Override public LevelChunk getChunkIfLoaded(int x, int z) { // Paper - this was added in world too but keeping here for NMS ABI return this.chunkSource.getChunk(x, z, false); diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java -index dcf1894ed6939b47cd8323421d5734fd5af6dedb..fc48c03fa4f3c7e2463ef2ce477ce1e71d1e3faf 100644 +index e5194f0650af2bce8aac24e34752b21015b92d4e..127f4e98e11a57eb3dddbc8efdb0aa33fda37924 100644 --- a/src/main/java/net/minecraft/world/level/Level.java +++ b/src/main/java/net/minecraft/world/level/Level.java -@@ -491,7 +491,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { +@@ -496,7 +496,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { // CraftBukkit start iblockdata1.updateIndirectNeighbourShapes(this, blockposition, k, j - 1); // Don't call an event for the old block to limit event spam CraftWorld world = ((ServerLevel) this).getWorld(); diff --git a/patches/server/0076-Entity-AddTo-RemoveFrom-World-Events.patch b/patches/server/0083-Entity-AddTo-RemoveFrom-World-Events.patch similarity index 83% rename from patches/server/0076-Entity-AddTo-RemoveFrom-World-Events.patch rename to patches/server/0083-Entity-AddTo-RemoveFrom-World-Events.patch index f0a92f7c69..7b28d7371f 100644 --- a/patches/server/0076-Entity-AddTo-RemoveFrom-World-Events.patch +++ b/patches/server/0083-Entity-AddTo-RemoveFrom-World-Events.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Entity AddTo/RemoveFrom World Events diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java -index 5c2ca51139b0647a2c7ae1b268b6d151dc884eca..79b572c8e3481ea4a6ac523c0a2f25204cf44e2d 100644 +index 33c6a673d98323a3790b3e494841dffa9ce5f4f5..507cab9d689b774b320fac00f7760c4143957b67 100644 --- a/src/main/java/net/minecraft/server/level/ServerLevel.java +++ b/src/main/java/net/minecraft/server/level/ServerLevel.java -@@ -2126,6 +2126,7 @@ public class ServerLevel extends Level implements WorldGenLevel { +@@ -2292,6 +2292,7 @@ public class ServerLevel extends Level implements WorldGenLevel { entity.setOrigin(entity.getOriginVector().toLocation(getWorld())); } // Paper end @@ -16,7 +16,7 @@ index 5c2ca51139b0647a2c7ae1b268b6d151dc884eca..79b572c8e3481ea4a6ac523c0a2f2520 } public void onTrackingEnd(Entity entity) { -@@ -2201,6 +2202,7 @@ public class ServerLevel extends Level implements WorldGenLevel { +@@ -2367,6 +2368,7 @@ public class ServerLevel extends Level implements WorldGenLevel { } } // CraftBukkit end diff --git a/patches/server/0077-Configurable-Chunk-Inhabited-Time.patch b/patches/server/0084-Configurable-Chunk-Inhabited-Time.patch similarity index 92% rename from patches/server/0077-Configurable-Chunk-Inhabited-Time.patch rename to patches/server/0084-Configurable-Chunk-Inhabited-Time.patch index b5d1987c46..ab8002ae7f 100644 --- a/patches/server/0077-Configurable-Chunk-Inhabited-Time.patch +++ b/patches/server/0084-Configurable-Chunk-Inhabited-Time.patch @@ -11,7 +11,7 @@ For people who want all chunks to be treated equally, you can chose a fixed valu This allows to fine-tune vanilla gameplay. diff --git a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java -index b7e03ac0e07a48c5d3a401a918ab6cc2a7daafec..419125e865cb086f0ef75f69749cc03a178d9a41 100644 +index 4e93e9ce7a7f939f802e51072efd389a86b42fc4..becec8ded3f7ed53998b55523793e6e9cd05d492 100644 --- a/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java +++ b/src/main/java/net/minecraft/world/level/chunk/LevelChunk.java @@ -280,6 +280,13 @@ public class LevelChunk extends ChunkAccess { diff --git a/patches/server/0078-EntityPathfindEvent.patch b/patches/server/0085-EntityPathfindEvent.patch similarity index 100% rename from patches/server/0078-EntityPathfindEvent.patch rename to patches/server/0085-EntityPathfindEvent.patch diff --git a/patches/server/0079-Sanitise-RegionFileCache-and-make-configurable.patch b/patches/server/0086-Sanitise-RegionFileCache-and-make-configurable.patch similarity index 81% rename from patches/server/0079-Sanitise-RegionFileCache-and-make-configurable.patch rename to patches/server/0086-Sanitise-RegionFileCache-and-make-configurable.patch index 58aebba10d..53b372aa14 100644 --- a/patches/server/0079-Sanitise-RegionFileCache-and-make-configurable.patch +++ b/patches/server/0086-Sanitise-RegionFileCache-and-make-configurable.patch @@ -11,13 +11,13 @@ The implementation uses a LinkedHashMap as an LRU cache (modified from HashMap). The maximum size of the RegionFileCache is also made configurable. diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java -index b9b50c56e79297bb824a92355f437a5d4d7e6760..e4c706d553a3d7058a32409ec7af8771c92d9777 100644 +index fe8bb0037bb7f317fc32ac34461f4eb3a1f397f2..8bff53217d087a9d53aa3738660dde563ee125f1 100644 --- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java +++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java -@@ -36,7 +36,7 @@ public class RegionFileStorage implements AutoCloseable { - if (regionfile != null) { - return regionfile; - } else { +@@ -91,7 +91,7 @@ public class RegionFileStorage implements AutoCloseable { + return null; + } + // Paper end - cache regionfile does not exist state - if (this.regionCache.size() >= 256) { + if (this.regionCache.size() >= io.papermc.paper.configuration.GlobalConfiguration.get().misc.regionFileCacheSize) { // Paper - configurable ((RegionFile) this.regionCache.removeLast()).close(); diff --git a/patches/server/0080-Do-not-load-chunks-for-Pathfinding.patch b/patches/server/0087-Do-not-load-chunks-for-Pathfinding.patch similarity index 100% rename from patches/server/0080-Do-not-load-chunks-for-Pathfinding.patch rename to patches/server/0087-Do-not-load-chunks-for-Pathfinding.patch diff --git a/patches/server/0081-Add-PlayerUseUnknownEntityEvent.patch b/patches/server/0088-Add-PlayerUseUnknownEntityEvent.patch similarity index 95% rename from patches/server/0081-Add-PlayerUseUnknownEntityEvent.patch rename to patches/server/0088-Add-PlayerUseUnknownEntityEvent.patch index 97253a1100..1171770f13 100644 --- a/patches/server/0081-Add-PlayerUseUnknownEntityEvent.patch +++ b/patches/server/0088-Add-PlayerUseUnknownEntityEvent.patch @@ -22,10 +22,10 @@ index a5d57cc862036012d83b090bb1b3ccf4115a88b3..21068f766b75c414d5818073b7dca083 static final ServerboundInteractPacket.Action ATTACK_ACTION = new ServerboundInteractPacket.Action() { @Override diff --git a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -index 94636c92758568051f829aed59aab8728e7e7252..c65282a85e2822b86f56c528ab838e628ea27411 100644 +index a91cc53f9d601bbc51988b2b7855c6ebcc68d780..2b1311185bb08592038e256d860a722fbd4d9ffc 100644 --- a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java +++ b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -@@ -2534,8 +2534,37 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic +@@ -2541,8 +2541,37 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic }); } } diff --git a/patches/server/0082-Configurable-Grass-Spread-Tick-Rate.patch b/patches/server/0089-Configurable-Grass-Spread-Tick-Rate.patch similarity index 100% rename from patches/server/0082-Configurable-Grass-Spread-Tick-Rate.patch rename to patches/server/0089-Configurable-Grass-Spread-Tick-Rate.patch diff --git a/patches/server/0083-Fix-Cancelling-BlockPlaceEvent-triggering-physics.patch b/patches/server/0090-Fix-Cancelling-BlockPlaceEvent-triggering-physics.patch similarity index 82% rename from patches/server/0083-Fix-Cancelling-BlockPlaceEvent-triggering-physics.patch rename to patches/server/0090-Fix-Cancelling-BlockPlaceEvent-triggering-physics.patch index 03b7844ddb..953b68fa47 100644 --- a/patches/server/0083-Fix-Cancelling-BlockPlaceEvent-triggering-physics.patch +++ b/patches/server/0090-Fix-Cancelling-BlockPlaceEvent-triggering-physics.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Fix Cancelling BlockPlaceEvent triggering physics diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java -index 79b572c8e3481ea4a6ac523c0a2f25204cf44e2d..6896ead71f87989c2fa90d0339eedfd08ac49dd1 100644 +index 507cab9d689b774b320fac00f7760c4143957b67..508c1f2874db3add98aad29bd4eee6c9f5d58006 100644 --- a/src/main/java/net/minecraft/server/level/ServerLevel.java +++ b/src/main/java/net/minecraft/server/level/ServerLevel.java -@@ -1366,6 +1366,7 @@ public class ServerLevel extends Level implements WorldGenLevel { +@@ -1517,6 +1517,7 @@ public class ServerLevel extends Level implements WorldGenLevel { @Override public void updateNeighborsAt(BlockPos pos, Block sourceBlock) { diff --git a/patches/server/0084-Optimize-DataBits.patch b/patches/server/0091-Optimize-DataBits.patch similarity index 100% rename from patches/server/0084-Optimize-DataBits.patch rename to patches/server/0091-Optimize-DataBits.patch diff --git a/patches/server/0085-Option-to-use-vanilla-per-world-scoreboard-coloring-.patch b/patches/server/0092-Option-to-use-vanilla-per-world-scoreboard-coloring-.patch similarity index 100% rename from patches/server/0085-Option-to-use-vanilla-per-world-scoreboard-coloring-.patch rename to patches/server/0092-Option-to-use-vanilla-per-world-scoreboard-coloring-.patch diff --git a/patches/server/0086-Configurable-Player-Collision.patch b/patches/server/0093-Configurable-Player-Collision.patch similarity index 97% rename from patches/server/0086-Configurable-Player-Collision.patch rename to patches/server/0093-Configurable-Player-Collision.patch index 6ae0969008..d307875c33 100644 --- a/patches/server/0086-Configurable-Player-Collision.patch +++ b/patches/server/0093-Configurable-Player-Collision.patch @@ -18,7 +18,7 @@ index 1294b38262505b0d54089e428df9b363219de1f0..ee37ec0de1ca969144824427ae42b0c8 buf.writeComponent(this.playerPrefix); buf.writeComponent(this.playerSuffix); diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java -index 7fca70658ecdc89b79381ca2ab33b931ce6871ae..feee1da3091fdbcef0336ab0ab85fd9f52ccac4b 100644 +index 81abb732e2bb3bca683028d505e7485052c0ec8d..d2b07b5f0ff7466f22f2c25c7e0cb9a00b25f4f0 100644 --- a/src/main/java/net/minecraft/server/MinecraftServer.java +++ b/src/main/java/net/minecraft/server/MinecraftServer.java @@ -589,6 +589,20 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop { -@@ -2332,9 +2334,8 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop this.getMinX() && (double) pos.getMinBlockX() < this.getMaxX() && (double) pos.getMaxBlockZ() > this.getMinZ() && (double) pos.getMinBlockZ() < this.getMaxZ(); } diff --git a/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java b/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java -index 8bb14230beb4b012f38997eec70934b96bae4db5..d19585f0833d13867f242768dc7ed952add87e05 100644 +index 5ca3987683f7cecbce24bac434dc387bb5e9bf08..56033c41c2be567d6787420d319d4603fc0a1d10 100644 --- a/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java +++ b/src/main/java/net/minecraft/world/level/chunk/ChunkGenerator.java @@ -217,6 +217,7 @@ public abstract class ChunkGenerator { diff --git a/patches/server/0116-Configurable-Cartographer-Treasure-Maps.patch b/patches/server/0123-Configurable-Cartographer-Treasure-Maps.patch similarity index 100% rename from patches/server/0116-Configurable-Cartographer-Treasure-Maps.patch rename to patches/server/0123-Configurable-Cartographer-Treasure-Maps.patch diff --git a/patches/server/0117-Add-API-methods-to-control-if-armour-stands-can-move.patch b/patches/server/0124-Add-API-methods-to-control-if-armour-stands-can-move.patch similarity index 100% rename from patches/server/0117-Add-API-methods-to-control-if-armour-stands-can-move.patch rename to patches/server/0124-Add-API-methods-to-control-if-armour-stands-can-move.patch diff --git a/patches/server/0118-String-based-Action-Bar-API.patch b/patches/server/0125-String-based-Action-Bar-API.patch similarity index 94% rename from patches/server/0118-String-based-Action-Bar-API.patch rename to patches/server/0125-String-based-Action-Bar-API.patch index 9bfc3f6460..9fdf744ebd 100644 --- a/patches/server/0118-String-based-Action-Bar-API.patch +++ b/patches/server/0125-String-based-Action-Bar-API.patch @@ -26,10 +26,10 @@ index 32ef3edebe94a2014168b7e438752a80b2687e5f..ab6c58eed6707ab7b0aa3e7549a871ad // Paper end buf.writeComponent(this.text); diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -index 877f82735c407c1d567628fafc7c7fd6adebb410..1cccc331222b24ac99ad7f6897a81ecd383ce7d3 100644 +index af7f97b605b81a0b2dd812f491b6e324660fe260..e1c8a559644d84a6787c44431e65d7eee4dfc29a 100644 --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -@@ -289,6 +289,29 @@ public class CraftPlayer extends CraftHumanEntity implements Player { +@@ -331,6 +331,29 @@ public class CraftPlayer extends CraftHumanEntity implements Player { } // Paper start diff --git a/patches/server/0119-Properly-fix-item-duplication-bug.patch b/patches/server/0126-Properly-fix-item-duplication-bug.patch similarity index 81% rename from patches/server/0119-Properly-fix-item-duplication-bug.patch rename to patches/server/0126-Properly-fix-item-duplication-bug.patch index 31728053eb..7da9653280 100644 --- a/patches/server/0119-Properly-fix-item-duplication-bug.patch +++ b/patches/server/0126-Properly-fix-item-duplication-bug.patch @@ -6,10 +6,10 @@ Subject: [PATCH] Properly fix item duplication bug Credit to prplz for figuring out the real issue diff --git a/src/main/java/net/minecraft/server/level/ServerPlayer.java b/src/main/java/net/minecraft/server/level/ServerPlayer.java -index 95d9824ad7a00e8d75ce243403004193d0bee605..681e351b276798e2a70010e769e6aa0ed4fdbcd5 100644 +index 321d14f241df1246061ba73b2488f5ab5326a734..0cb86afa3732eaf04dd6369d7499bd628df7e12f 100644 --- a/src/main/java/net/minecraft/server/level/ServerPlayer.java +++ b/src/main/java/net/minecraft/server/level/ServerPlayer.java -@@ -2350,7 +2350,7 @@ public class ServerPlayer extends Player { +@@ -2392,7 +2392,7 @@ public class ServerPlayer extends Player { @Override public boolean isImmobile() { @@ -19,10 +19,10 @@ index 95d9824ad7a00e8d75ce243403004193d0bee605..681e351b276798e2a70010e769e6aa0e @Override diff --git a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -index 590880b71c3c1930eb84538af5f6c3b690bb690f..ec78328916a7cc544fea868afc904208d446739f 100644 +index 6017d2d5378f1d6c2a258469cc24c51e7a4ab4b6..cf5972518023dabd9cf7110d93643886d1ad047e 100644 --- a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java +++ b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -@@ -3142,7 +3142,7 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic +@@ -3149,7 +3149,7 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic } public final boolean isDisconnected() { diff --git a/patches/server/0120-Firework-API-s.patch b/patches/server/0127-Firework-API-s.patch similarity index 100% rename from patches/server/0120-Firework-API-s.patch rename to patches/server/0127-Firework-API-s.patch diff --git a/patches/server/0121-PlayerTeleportEndGatewayEvent.patch b/patches/server/0128-PlayerTeleportEndGatewayEvent.patch similarity index 100% rename from patches/server/0121-PlayerTeleportEndGatewayEvent.patch rename to patches/server/0128-PlayerTeleportEndGatewayEvent.patch diff --git a/patches/server/0122-Provide-E-TE-Chunk-count-stat-methods.patch b/patches/server/0129-Provide-E-TE-Chunk-count-stat-methods.patch similarity index 94% rename from patches/server/0122-Provide-E-TE-Chunk-count-stat-methods.patch rename to patches/server/0129-Provide-E-TE-Chunk-count-stat-methods.patch index 14845ffe92..1003420918 100644 --- a/patches/server/0122-Provide-E-TE-Chunk-count-stat-methods.patch +++ b/patches/server/0129-Provide-E-TE-Chunk-count-stat-methods.patch @@ -7,7 +7,7 @@ Provides counts without the ineffeciency of using .getEntities().size() which creates copy of the collections. diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java -index 94716983899730facb74461eaa2d89c67f999643..f36467dbcb4c39cdc3fa6f7f690d0e1fe7db4160 100644 +index 0a419c98db5f6bb6935e88b640f74c6ec0c5503c..92aa1428e723b377c12bfd20cd1e6b4242ad1261 100644 --- a/src/main/java/net/minecraft/world/level/Level.java +++ b/src/main/java/net/minecraft/world/level/Level.java @@ -116,7 +116,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { @@ -20,7 +20,7 @@ index 94716983899730facb74461eaa2d89c67f999643..f36467dbcb4c39cdc3fa6f7f690d0e1f private final List pendingBlockEntityTickers = Lists.newArrayList(); private boolean tickingBlockEntities; diff --git a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java -index 1440b6b8b71ece71b076601752b06bdcff45542f..1278b114db828606dcd2f70c8f0ab53da490c19f 100644 +index 7c4d43096031a3c93d5f835922b19d5643005128..05cc2e071f621bc7067593d75ba3a5f0eb7650c4 100644 --- a/src/main/java/org/bukkit/craftbukkit/CraftWorld.java +++ b/src/main/java/org/bukkit/craftbukkit/CraftWorld.java @@ -155,6 +155,56 @@ public class CraftWorld extends CraftRegionAccessor implements World { diff --git a/patches/server/0123-Enforce-Sync-Player-Saves.patch b/patches/server/0130-Enforce-Sync-Player-Saves.patch similarity index 92% rename from patches/server/0123-Enforce-Sync-Player-Saves.patch rename to patches/server/0130-Enforce-Sync-Player-Saves.patch index 91e7f7c582..9e4adcf15d 100644 --- a/patches/server/0123-Enforce-Sync-Player-Saves.patch +++ b/patches/server/0130-Enforce-Sync-Player-Saves.patch @@ -7,7 +7,7 @@ Saving players async is extremely dangerous. This will force it to main the same way we handle async chunk loads. diff --git a/src/main/java/net/minecraft/server/players/PlayerList.java b/src/main/java/net/minecraft/server/players/PlayerList.java -index ebbe5fb5d22cc6a7a2f131ab8fa209ff47c0c92a..d50334e6cacd9d751135d253ac96b9cdf4aee213 100644 +index 05dd9e618336c5a8e44e20b97d9a7fd0fb5b3002..ba96e12395fd8c9cd55f45631b2816d30f90830a 100644 --- a/src/main/java/net/minecraft/server/players/PlayerList.java +++ b/src/main/java/net/minecraft/server/players/PlayerList.java @@ -1069,11 +1069,13 @@ public abstract class PlayerList { diff --git a/patches/server/0124-Don-t-allow-entities-to-ride-themselves-572.patch b/patches/server/0131-Don-t-allow-entities-to-ride-themselves-572.patch similarity index 84% rename from patches/server/0124-Don-t-allow-entities-to-ride-themselves-572.patch rename to patches/server/0131-Don-t-allow-entities-to-ride-themselves-572.patch index c435a11785..5782de9ec6 100644 --- a/patches/server/0124-Don-t-allow-entities-to-ride-themselves-572.patch +++ b/patches/server/0131-Don-t-allow-entities-to-ride-themselves-572.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Don't allow entities to ride themselves - #572 diff --git a/src/main/java/net/minecraft/world/entity/Entity.java b/src/main/java/net/minecraft/world/entity/Entity.java -index 49f465d0009b084be2a8e809c53126af1ad72cd2..20d5a46c4022dfea24f275fe143c32b8ddf629cb 100644 +index f331c9ff4cc341c981515210d3f4bd92faa8fdc9..9dcdfeaa4be508a5f59a0fae54494e743e13caf7 100644 --- a/src/main/java/net/minecraft/world/entity/Entity.java +++ b/src/main/java/net/minecraft/world/entity/Entity.java -@@ -2485,6 +2485,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { +@@ -2537,6 +2537,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { } protected boolean addPassenger(Entity entity) { // CraftBukkit diff --git a/patches/server/0125-ExperienceOrbs-API-for-Reason-Source-Triggering-play.patch b/patches/server/0132-ExperienceOrbs-API-for-Reason-Source-Triggering-play.patch similarity index 100% rename from patches/server/0125-ExperienceOrbs-API-for-Reason-Source-Triggering-play.patch rename to patches/server/0132-ExperienceOrbs-API-for-Reason-Source-Triggering-play.patch diff --git a/patches/server/0126-Cap-Entity-Collisions.patch b/patches/server/0133-Cap-Entity-Collisions.patch similarity index 95% rename from patches/server/0126-Cap-Entity-Collisions.patch rename to patches/server/0133-Cap-Entity-Collisions.patch index 52f5223375..6034a3cf3d 100644 --- a/patches/server/0126-Cap-Entity-Collisions.patch +++ b/patches/server/0133-Cap-Entity-Collisions.patch @@ -12,7 +12,7 @@ just as it does in Vanilla, but entity pushing logic will be capped. You can set this to 0 to disable collisions. diff --git a/src/main/java/net/minecraft/world/entity/Entity.java b/src/main/java/net/minecraft/world/entity/Entity.java -index 20d5a46c4022dfea24f275fe143c32b8ddf629cb..568631c8711788053af407574ab928cb57c11560 100644 +index 9dcdfeaa4be508a5f59a0fae54494e743e13caf7..d0d4e1e24aa518a258bdc66046ff44f03649ee84 100644 --- a/src/main/java/net/minecraft/world/entity/Entity.java +++ b/src/main/java/net/minecraft/world/entity/Entity.java @@ -387,6 +387,7 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource { diff --git a/patches/server/0127-Remove-CraftScheduler-Async-Task-Debugger.patch b/patches/server/0134-Remove-CraftScheduler-Async-Task-Debugger.patch similarity index 100% rename from patches/server/0127-Remove-CraftScheduler-Async-Task-Debugger.patch rename to patches/server/0134-Remove-CraftScheduler-Async-Task-Debugger.patch diff --git a/patches/server/0128-Do-not-let-armorstands-drown.patch b/patches/server/0135-Do-not-let-armorstands-drown.patch similarity index 100% rename from patches/server/0128-Do-not-let-armorstands-drown.patch rename to patches/server/0135-Do-not-let-armorstands-drown.patch diff --git a/patches/server/0129-Properly-handle-async-calls-to-restart-the-server.patch b/patches/server/0136-Properly-handle-async-calls-to-restart-the-server.patch similarity index 97% rename from patches/server/0129-Properly-handle-async-calls-to-restart-the-server.patch rename to patches/server/0136-Properly-handle-async-calls-to-restart-the-server.patch index dc8b6a03e1..bbe6465d36 100644 --- a/patches/server/0129-Properly-handle-async-calls-to-restart-the-server.patch +++ b/patches/server/0136-Properly-handle-async-calls-to-restart-the-server.patch @@ -30,7 +30,7 @@ will have plugins and worlds saving to the disk has a high potential to result in corruption/dataloss. diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java -index 857cc4fcc586f7a8ea5baeeae69da62ebfdac77a..e292035cf361c8efa8845450b72a6986694a8ca9 100644 +index 14edacfd5cc77cc85dc84e3fdad94be8c8932dc6..79b4139b653a09142e1b624849ad7c529c35450d 100644 --- a/src/main/java/net/minecraft/server/MinecraftServer.java +++ b/src/main/java/net/minecraft/server/MinecraftServer.java @@ -237,6 +237,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop 0.0D; if (this.player.onGround() && !packet.isOnGround() && flag) { diff --git a/patches/server/0156-handle-ServerboundKeepAlivePacket-async.patch b/patches/server/0163-handle-ServerboundKeepAlivePacket-async.patch similarity index 92% rename from patches/server/0156-handle-ServerboundKeepAlivePacket-async.patch rename to patches/server/0163-handle-ServerboundKeepAlivePacket-async.patch index c2953ef95c..2f222ad510 100644 --- a/patches/server/0156-handle-ServerboundKeepAlivePacket-async.patch +++ b/patches/server/0163-handle-ServerboundKeepAlivePacket-async.patch @@ -15,10 +15,10 @@ also adding some additional logging in order to help work out what is causing random disconnections for clients. diff --git a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -index 177c26ac1853d3a0834bb2eb5e7a0b656775a663..8ff49b7ea0671c4126ea24d03267b53cfa66b84c 100644 +index 35e7f877a7d188767660c97418f9194cc50ac464..248bedc4ce26f3d848f336c995fb27672affbf22 100644 --- a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java +++ b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -@@ -3101,14 +3101,18 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic +@@ -3108,14 +3108,18 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic @Override public void handleKeepAlive(ServerboundKeepAlivePacket packet) { diff --git a/patches/server/0157-Expose-client-protocol-version-and-virtual-host.patch b/patches/server/0164-Expose-client-protocol-version-and-virtual-host.patch similarity index 89% rename from patches/server/0157-Expose-client-protocol-version-and-virtual-host.patch rename to patches/server/0164-Expose-client-protocol-version-and-virtual-host.patch index 3c4ab726c5..70bfdb15f2 100644 --- a/patches/server/0157-Expose-client-protocol-version-and-virtual-host.patch +++ b/patches/server/0164-Expose-client-protocol-version-and-virtual-host.patch @@ -60,13 +60,13 @@ index 0000000000000000000000000000000000000000..a5a7624f1f372a26b982836cd31cff15 + +} diff --git a/src/main/java/net/minecraft/network/Connection.java b/src/main/java/net/minecraft/network/Connection.java -index 3ab557277cff7c8a43b0f3de45d17f2bf78f7747..ff1069f57227783f440e4ec9deb58fc709dfd0e7 100644 +index c457bdcb93fa306a2d67b31c0abb53465d809862..c8e34e4f1519163fcac77606fe20c2ec9a282901 100644 --- a/src/main/java/net/minecraft/network/Connection.java +++ b/src/main/java/net/minecraft/network/Connection.java -@@ -92,6 +92,10 @@ public class Connection extends SimpleChannelInboundHandler> { - private int tickCount; - private boolean handlingFault; - public String hostname = ""; // CraftBukkit - add field +@@ -114,6 +114,10 @@ public class Connection extends SimpleChannelInboundHandler> { + } + } + // Paper end - add pending task queue + // Paper start - NetworkClient implementation + public int protocolVersion; + public java.net.InetSocketAddress virtualHost; @@ -90,10 +90,10 @@ index 77d7f070cce1a47e41b5d4f5a1cc8c778352a126..a3b610cb1ed97a635677bc46ccdf0463 @Override diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -index 1cccc331222b24ac99ad7f6897a81ecd383ce7d3..5af1f24c42e61ce159a3f6221411d01bdc4178c0 100644 +index e1c8a559644d84a6787c44431e65d7eee4dfc29a..46b486fb069eeed8ca30d2c2b3a6dca6ad9e26c5 100644 --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -@@ -233,6 +233,20 @@ public class CraftPlayer extends CraftHumanEntity implements Player { +@@ -275,6 +275,20 @@ public class CraftPlayer extends CraftHumanEntity implements Player { } } diff --git a/patches/server/0158-revert-serverside-behavior-of-keepalives.patch b/patches/server/0165-revert-serverside-behavior-of-keepalives.patch similarity index 97% rename from patches/server/0158-revert-serverside-behavior-of-keepalives.patch rename to patches/server/0165-revert-serverside-behavior-of-keepalives.patch index a3f6c85ef8..38a2c754fb 100644 --- a/patches/server/0158-revert-serverside-behavior-of-keepalives.patch +++ b/patches/server/0165-revert-serverside-behavior-of-keepalives.patch @@ -17,7 +17,7 @@ from networking or during connections flood of chunk packets on slower clients, at the cost of dead connections being kept open for longer. diff --git a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -index 8ff49b7ea0671c4126ea24d03267b53cfa66b84c..a0777e65e0dcc2511c13ee32bfde9e2cfd6aafaa 100644 +index 248bedc4ce26f3d848f336c995fb27672affbf22..57ed1d99846f147258b33c6bad2ef7386981462e 100644 --- a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java +++ b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java @@ -259,7 +259,7 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic diff --git a/patches/server/0159-Send-attack-SoundEffects-only-to-players-who-can-see.patch b/patches/server/0166-Send-attack-SoundEffects-only-to-players-who-can-see.patch similarity index 100% rename from patches/server/0159-Send-attack-SoundEffects-only-to-players-who-can-see.patch rename to patches/server/0166-Send-attack-SoundEffects-only-to-players-who-can-see.patch diff --git a/patches/server/0160-Add-PlayerArmorChangeEvent.patch b/patches/server/0167-Add-PlayerArmorChangeEvent.patch similarity index 100% rename from patches/server/0160-Add-PlayerArmorChangeEvent.patch rename to patches/server/0167-Add-PlayerArmorChangeEvent.patch diff --git a/patches/server/0161-Prevent-logins-from-being-processed-when-the-player-.patch b/patches/server/0168-Prevent-logins-from-being-processed-when-the-player-.patch similarity index 100% rename from patches/server/0161-Prevent-logins-from-being-processed-when-the-player-.patch rename to patches/server/0168-Prevent-logins-from-being-processed-when-the-player-.patch diff --git a/patches/server/0162-Fix-MC-117075-TE-Unload-Lag-Spike.patch b/patches/server/0169-Fix-MC-117075-TE-Unload-Lag-Spike.patch similarity index 88% rename from patches/server/0162-Fix-MC-117075-TE-Unload-Lag-Spike.patch rename to patches/server/0169-Fix-MC-117075-TE-Unload-Lag-Spike.patch index f7f0c43c1c..b68fa8a909 100644 --- a/patches/server/0162-Fix-MC-117075-TE-Unload-Lag-Spike.patch +++ b/patches/server/0169-Fix-MC-117075-TE-Unload-Lag-Spike.patch @@ -5,10 +5,10 @@ Subject: [PATCH] Fix MC-117075: TE Unload Lag Spike diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java -index f36467dbcb4c39cdc3fa6f7f690d0e1fe7db4160..40752609f9582fc5d0783535d024ed36290510a1 100644 +index 92aa1428e723b377c12bfd20cd1e6b4242ad1261..79e23503b7082633de654f04575b8323ea887e44 100644 --- a/src/main/java/net/minecraft/world/level/Level.java +++ b/src/main/java/net/minecraft/world/level/Level.java -@@ -716,6 +716,8 @@ public abstract class Level implements LevelAccessor, AutoCloseable { +@@ -721,6 +721,8 @@ public abstract class Level implements LevelAccessor, AutoCloseable { // Spigot start // Iterator iterator = this.blockEntityTickers.iterator(); int tilesThisCycle = 0; @@ -17,7 +17,7 @@ index f36467dbcb4c39cdc3fa6f7f690d0e1fe7db4160..40752609f9582fc5d0783535d024ed36 for (tileTickPosition = 0; tileTickPosition < this.blockEntityTickers.size(); tileTickPosition++) { // Paper - Disable tick limiters this.tileTickPosition = (this.tileTickPosition < this.blockEntityTickers.size()) ? this.tileTickPosition : 0; TickingBlockEntity tickingblockentity = (TickingBlockEntity) this.blockEntityTickers.get(tileTickPosition); -@@ -723,7 +725,6 @@ public abstract class Level implements LevelAccessor, AutoCloseable { +@@ -728,7 +730,6 @@ public abstract class Level implements LevelAccessor, AutoCloseable { if (tickingblockentity == null) { this.getCraftServer().getLogger().severe("Spigot has detected a null entity and has removed it, preventing a crash"); tilesThisCycle--; @@ -25,7 +25,7 @@ index f36467dbcb4c39cdc3fa6f7f690d0e1fe7db4160..40752609f9582fc5d0783535d024ed36 continue; } // Spigot end -@@ -731,12 +732,13 @@ public abstract class Level implements LevelAccessor, AutoCloseable { +@@ -736,12 +737,13 @@ public abstract class Level implements LevelAccessor, AutoCloseable { if (tickingblockentity.isRemoved()) { // Spigot start tilesThisCycle--; diff --git a/patches/server/0163-use-CB-BlockState-implementations-for-captured-block.patch b/patches/server/0170-use-CB-BlockState-implementations-for-captured-block.patch similarity index 95% rename from patches/server/0163-use-CB-BlockState-implementations-for-captured-block.patch rename to patches/server/0170-use-CB-BlockState-implementations-for-captured-block.patch index 46d4517669..2b736ff336 100644 --- a/patches/server/0163-use-CB-BlockState-implementations-for-captured-block.patch +++ b/patches/server/0170-use-CB-BlockState-implementations-for-captured-block.patch @@ -18,7 +18,7 @@ the blockstate that will be valid for restoration, as opposed to dropping information on restoration when the event is cancelled. diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java -index 40752609f9582fc5d0783535d024ed36290510a1..a34be79fa1767c581fbfbff5deb747729bd9e5da 100644 +index 79e23503b7082633de654f04575b8323ea887e44..b282f32c95b23b69156970d76171772218b87bc7 100644 --- a/src/main/java/net/minecraft/world/level/Level.java +++ b/src/main/java/net/minecraft/world/level/Level.java @@ -154,7 +154,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { @@ -49,7 +49,7 @@ index 40752609f9582fc5d0783535d024ed36290510a1..a34be79fa1767c581fbfbff5deb74772 this.capturedBlockStates.put(pos.immutable(), blockstate); captured = true; } -@@ -606,7 +607,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { +@@ -611,7 +612,7 @@ public abstract class Level implements LevelAccessor, AutoCloseable { public BlockState getBlockState(BlockPos pos) { // CraftBukkit start - tree generation if (this.captureTreeGeneration) { diff --git a/patches/server/0164-API-to-get-a-BlockState-without-a-snapshot.patch b/patches/server/0171-API-to-get-a-BlockState-without-a-snapshot.patch similarity index 100% rename from patches/server/0164-API-to-get-a-BlockState-without-a-snapshot.patch rename to patches/server/0171-API-to-get-a-BlockState-without-a-snapshot.patch diff --git a/patches/server/0165-AsyncTabCompleteEvent.patch b/patches/server/0172-AsyncTabCompleteEvent.patch similarity index 96% rename from patches/server/0165-AsyncTabCompleteEvent.patch rename to patches/server/0172-AsyncTabCompleteEvent.patch index fbc2ced4ed..a33073330a 100644 --- a/patches/server/0165-AsyncTabCompleteEvent.patch +++ b/patches/server/0172-AsyncTabCompleteEvent.patch @@ -16,10 +16,10 @@ Also adds isCommand and getLocation to the sync TabCompleteEvent Co-authored-by: Aikar diff --git a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -index a0777e65e0dcc2511c13ee32bfde9e2cfd6aafaa..528ef2cb78ece52e389578e5ed36287ac395b824 100644 +index 57ed1d99846f147258b33c6bad2ef7386981462e..c43f50f884b6c9c0eb6eb8e11ac892c8986529b8 100644 --- a/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java +++ b/src/main/java/net/minecraft/server/network/ServerGamePacketListenerImpl.java -@@ -788,27 +788,58 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic +@@ -788,12 +788,16 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic } @@ -37,6 +37,10 @@ index a0777e65e0dcc2511c13ee32bfde9e2cfd6aafaa..528ef2cb78ece52e389578e5ed36287a + server.scheduleOnMain(() -> this.disconnect(Component.translatable("disconnect.spam", new Object[0]))); // Paper return; } + // Paper start +@@ -804,18 +808,45 @@ public class ServerGamePacketListenerImpl implements ServerPlayerConnection, Tic + } + // Paper end // CraftBukkit end + // Paper start - async tab completion + TAB_COMPLETE_EXECUTOR.execute(() -> { @@ -87,7 +91,7 @@ index a0777e65e0dcc2511c13ee32bfde9e2cfd6aafaa..528ef2cb78ece52e389578e5ed36287a @Override diff --git a/src/main/java/org/bukkit/craftbukkit/CraftServer.java b/src/main/java/org/bukkit/craftbukkit/CraftServer.java -index 1a2ef9476bd25b9cde3b0b9c1bb32ce77c5a9b92..976e4814f9b395a011d5687a86ba187392ee52ab 100644 +index c29973de83e827a2f372590a43948b0ad8b5ec64..141bba5e92044437b199b5aa701ded049737c509 100644 --- a/src/main/java/org/bukkit/craftbukkit/CraftServer.java +++ b/src/main/java/org/bukkit/craftbukkit/CraftServer.java @@ -2116,7 +2116,7 @@ public final class CraftServer implements Server { diff --git a/patches/server/0166-PlayerPickupExperienceEvent.patch b/patches/server/0173-PlayerPickupExperienceEvent.patch similarity index 100% rename from patches/server/0166-PlayerPickupExperienceEvent.patch rename to patches/server/0173-PlayerPickupExperienceEvent.patch diff --git a/patches/server/0167-Ability-to-apply-mending-to-XP-API.patch b/patches/server/0174-Ability-to-apply-mending-to-XP-API.patch similarity index 94% rename from patches/server/0167-Ability-to-apply-mending-to-XP-API.patch rename to patches/server/0174-Ability-to-apply-mending-to-XP-API.patch index 9e2f2cf291..7dded97619 100644 --- a/patches/server/0167-Ability-to-apply-mending-to-XP-API.patch +++ b/patches/server/0174-Ability-to-apply-mending-to-XP-API.patch @@ -14,10 +14,10 @@ public net.minecraft.world.entity.ExperienceOrb durabilityToXp(I)I public net.minecraft.world.entity.ExperienceOrb xpToDurability(I)I diff --git a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -index 5af1f24c42e61ce159a3f6221411d01bdc4178c0..1ce5ec09f6fe562dace89575e2a73f727e1eb951 100644 +index 46b486fb069eeed8ca30d2c2b3a6dca6ad9e26c5..e87f6e4ecb23b2147af1419e6c40f96f8b38c0fc 100644 --- a/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java +++ b/src/main/java/org/bukkit/craftbukkit/entity/CraftPlayer.java -@@ -1467,7 +1467,37 @@ public class CraftPlayer extends CraftHumanEntity implements Player { +@@ -1509,7 +1509,37 @@ public class CraftPlayer extends CraftHumanEntity implements Player { } @Override diff --git a/patches/server/0168-PlayerNaturallySpawnCreaturesEvent.patch b/patches/server/0175-PlayerNaturallySpawnCreaturesEvent.patch similarity index 81% rename from patches/server/0168-PlayerNaturallySpawnCreaturesEvent.patch rename to patches/server/0175-PlayerNaturallySpawnCreaturesEvent.patch index 22f01e7b8d..4e4ed2afee 100644 --- a/patches/server/0168-PlayerNaturallySpawnCreaturesEvent.patch +++ b/patches/server/0175-PlayerNaturallySpawnCreaturesEvent.patch @@ -9,10 +9,10 @@ from triggering monster spawns on a server. Also a highly more effecient way to blanket block spawns in a world diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java -index 19bd6f9aee3ccb1af1b010ee51a54aa2d0bf9c84..ab01080c3b00a3988f2dd48fd4ecf1488bfcce8b 100644 +index a502d293cedb2f507e6cf1792429b36685ed1910..e50af28f806593a0171ad7cee5805f74b25fec89 100644 --- a/src/main/java/net/minecraft/server/level/ChunkMap.java +++ b/src/main/java/net/minecraft/server/level/ChunkMap.java -@@ -1191,7 +1191,9 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider +@@ -708,7 +708,9 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider chunkRange = (chunkRange > level.spigotConfig.viewDistance) ? (byte) level.spigotConfig.viewDistance : chunkRange; chunkRange = (chunkRange > 8) ? 8 : chunkRange; @@ -23,7 +23,7 @@ index 19bd6f9aee3ccb1af1b010ee51a54aa2d0bf9c84..ab01080c3b00a3988f2dd48fd4ecf148 // Spigot end long i = chunkcoordintpair.toLong(); -@@ -1208,6 +1210,15 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider +@@ -725,6 +727,15 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider } entityplayer = (ServerPlayer) iterator.next(); @@ -40,10 +40,10 @@ index 19bd6f9aee3ccb1af1b010ee51a54aa2d0bf9c84..ab01080c3b00a3988f2dd48fd4ecf148 return true; diff --git a/src/main/java/net/minecraft/server/level/ServerChunkCache.java b/src/main/java/net/minecraft/server/level/ServerChunkCache.java -index fc1afda5a60ab0f3c275f849d8af08d308b36c3f..d0330d1baf89f949c05b2380ce875366802834e3 100644 +index caff28e2446177d622c999b84d8889fbf61d0b3d..d7022cda5f4654b1cbfeed3f096af028a9fbc564 100644 --- a/src/main/java/net/minecraft/server/level/ServerChunkCache.java +++ b/src/main/java/net/minecraft/server/level/ServerChunkCache.java -@@ -718,6 +718,15 @@ public class ServerChunkCache extends ChunkSource { +@@ -716,6 +716,15 @@ public class ServerChunkCache extends ChunkSource { boolean flag2 = this.level.getGameRules().getBoolean(GameRules.RULE_DOMOBSPAWNING) && !this.level.players().isEmpty(); // CraftBukkit Collections.shuffle(list); @@ -60,7 +60,7 @@ index fc1afda5a60ab0f3c275f849d8af08d308b36c3f..d0330d1baf89f949c05b2380ce875366 while (iterator1.hasNext()) { diff --git a/src/main/java/net/minecraft/server/level/ServerPlayer.java b/src/main/java/net/minecraft/server/level/ServerPlayer.java -index 681e351b276798e2a70010e769e6aa0ed4fdbcd5..92b69b38394ff50ca7692fa8794cc2b50acb8b66 100644 +index 0cb86afa3732eaf04dd6369d7499bd628df7e12f..8b5b66c8f55660085ff3af824b8bf0c455be79bc 100644 --- a/src/main/java/net/minecraft/server/level/ServerPlayer.java +++ b/src/main/java/net/minecraft/server/level/ServerPlayer.java @@ -1,5 +1,6 @@ @@ -76,5 +76,5 @@ index 681e351b276798e2a70010e769e6aa0ed4fdbcd5..92b69b38394ff50ca7692fa8794cc2b5 public final com.destroystokyo.paper.util.misc.PooledLinkedHashSets.PooledObjectLinkedOpenHashSet cachedSingleHashSet; // Paper + public PlayerNaturallySpawnCreaturesEvent playerNaturallySpawnedEvent; // Paper - public ServerPlayer(MinecraftServer server, ServerLevel world, GameProfile profile) { - super(world, world.getSharedSpawnPos(), world.getSharedSpawnAngle(), profile); + private final java.util.concurrent.atomic.AtomicReference viewDistances = new java.util.concurrent.atomic.AtomicReference<>(new io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.ViewDistances(-1, -1, -1)); + public io.papermc.paper.chunk.system.RegionizedPlayerChunkLoader.PlayerChunkLoaderData chunkLoader; diff --git a/patches/server/0169-Add-setPlayerProfile-API-for-Skulls.patch b/patches/server/0176-Add-setPlayerProfile-API-for-Skulls.patch similarity index 100% rename from patches/server/0169-Add-setPlayerProfile-API-for-Skulls.patch rename to patches/server/0176-Add-setPlayerProfile-API-for-Skulls.patch diff --git a/patches/server/0170-PreCreatureSpawnEvent.patch b/patches/server/0177-PreCreatureSpawnEvent.patch similarity index 100% rename from patches/server/0170-PreCreatureSpawnEvent.patch rename to patches/server/0177-PreCreatureSpawnEvent.patch diff --git a/patches/server/0171-Fill-Profile-Property-Events.patch b/patches/server/0178-Fill-Profile-Property-Events.patch similarity index 100% rename from patches/server/0171-Fill-Profile-Property-Events.patch rename to patches/server/0178-Fill-Profile-Property-Events.patch diff --git a/patches/server/0172-PlayerAdvancementCriterionGrantEvent.patch b/patches/server/0179-PlayerAdvancementCriterionGrantEvent.patch similarity index 100% rename from patches/server/0172-PlayerAdvancementCriterionGrantEvent.patch rename to patches/server/0179-PlayerAdvancementCriterionGrantEvent.patch diff --git a/patches/server/0173-Add-ArmorStand-Item-Meta.patch b/patches/server/0180-Add-ArmorStand-Item-Meta.patch similarity index 100% rename from patches/server/0173-Add-ArmorStand-Item-Meta.patch rename to patches/server/0180-Add-ArmorStand-Item-Meta.patch diff --git a/patches/server/0174-Extend-Player-Interact-cancellation.patch b/patches/server/0181-Extend-Player-Interact-cancellation.patch similarity index 100% rename from patches/server/0174-Extend-Player-Interact-cancellation.patch rename to patches/server/0181-Extend-Player-Interact-cancellation.patch diff --git a/patches/server/0175-Tameable-getOwnerUniqueId-API.patch b/patches/server/0182-Tameable-getOwnerUniqueId-API.patch similarity index 100% rename from patches/server/0175-Tameable-getOwnerUniqueId-API.patch rename to patches/server/0182-Tameable-getOwnerUniqueId-API.patch diff --git a/patches/server/0176-Toggleable-player-crits-helps-mitigate-hacked-client.patch b/patches/server/0183-Toggleable-player-crits-helps-mitigate-hacked-client.patch similarity index 100% rename from patches/server/0176-Toggleable-player-crits-helps-mitigate-hacked-client.patch rename to patches/server/0183-Toggleable-player-crits-helps-mitigate-hacked-client.patch diff --git a/patches/server/0177-Disable-Explicit-Network-Manager-Flushing.patch b/patches/server/0184-Disable-Explicit-Network-Manager-Flushing.patch similarity index 85% rename from patches/server/0177-Disable-Explicit-Network-Manager-Flushing.patch rename to patches/server/0184-Disable-Explicit-Network-Manager-Flushing.patch index ba688f6ff8..b69e3f67f6 100644 --- a/patches/server/0177-Disable-Explicit-Network-Manager-Flushing.patch +++ b/patches/server/0184-Disable-Explicit-Network-Manager-Flushing.patch @@ -12,10 +12,10 @@ flushing on the netty event loop, so it won't do the flush on the main thread. Renable flushing by passing -Dpaper.explicit-flush=true diff --git a/src/main/java/net/minecraft/network/Connection.java b/src/main/java/net/minecraft/network/Connection.java -index ff1069f57227783f440e4ec9deb58fc709dfd0e7..0a4a875cd3d7f8f864d583d94b6f3869139db48e 100644 +index c8e34e4f1519163fcac77606fe20c2ec9a282901..39b1d17cfb7d5c1b0f0ee449ee1c24ef050b0c0e 100644 --- a/src/main/java/net/minecraft/network/Connection.java +++ b/src/main/java/net/minecraft/network/Connection.java -@@ -95,6 +95,7 @@ public class Connection extends SimpleChannelInboundHandler> { +@@ -117,6 +117,7 @@ public class Connection extends SimpleChannelInboundHandler> { // Paper start - NetworkClient implementation public int protocolVersion; public java.net.InetSocketAddress virtualHost; @@ -23,7 +23,7 @@ index ff1069f57227783f440e4ec9deb58fc709dfd0e7..0a4a875cd3d7f8f864d583d94b6f3869 // Paper end public Connection(PacketFlow side) { -@@ -288,7 +289,7 @@ public class Connection extends SimpleChannelInboundHandler> { +@@ -317,7 +318,7 @@ public class Connection extends SimpleChannelInboundHandler> { } if (this.channel != null) { diff --git a/patches/server/0178-Implement-extended-PaperServerListPingEvent.patch b/patches/server/0185-Implement-extended-PaperServerListPingEvent.patch similarity index 98% rename from patches/server/0178-Implement-extended-PaperServerListPingEvent.patch rename to patches/server/0185-Implement-extended-PaperServerListPingEvent.patch index 0a100f499d..335ada6c02 100644 --- a/patches/server/0178-Implement-extended-PaperServerListPingEvent.patch +++ b/patches/server/0185-Implement-extended-PaperServerListPingEvent.patch @@ -181,7 +181,7 @@ index 0000000000000000000000000000000000000000..6b0bdc266109cdfb874f08bf74323603 + +} diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java -index 9fad7c117033694bc5850b9f9585e439c44ef0dd..646d6d3833ab7e40165502f29b0522562d6b8a71 100644 +index bc96a5c546c01b092f513f202e833b7b5f320e72..a003144fb74c9e7314b24216bc0e506e3d2bde0f 100644 --- a/src/main/java/net/minecraft/server/MinecraftServer.java +++ b/src/main/java/net/minecraft/server/MinecraftServer.java @@ -3,6 +3,9 @@ package net.minecraft.server; @@ -194,7 +194,7 @@ index 9fad7c117033694bc5850b9f9585e439c44ef0dd..646d6d3833ab7e40165502f29b052256 import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; -@@ -1353,7 +1356,7 @@ public abstract class MinecraftServer extends ReentrantBlockableEventLoop