Paper/patches/server/0488-Improve-Chunk-Status-Transition-Speed.patch

89 lines
4.6 KiB
Diff
Raw Normal View History

2021-06-11 14:02:28 +02:00
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Aikar <aikar@aikar.co>
Date: Fri, 29 May 2020 23:32:14 -0400
Subject: [PATCH] Improve Chunk Status Transition Speed
When a chunk is loaded from disk that has already been generated,
the server has to promote the chunk through the system to reach
it's current desired status level.
This results in every single status transition going from the main thread
to the world gen threads, only to discover it has no work it actually
needs to do.... and then it returns back to main.
This back and forth costs a lot of time and can really delay chunk loads
when the server is under high TPS due to their being a lot of time in
between chunk load times, as well as hogs up the chunk threads from doing
actual generation and light work.
Additionally, the whole task system uses a lot of CPU on the server threads anyways.
So by optimizing status transitions for status's that are already complete,
we can run them to the desired level while on main thread (where it has
to happen anyways) instead of ever jumping to world gen thread.
This will improve chunk loading effeciency to be reduced down to the following
scenario / path:
1) MAIN: Chunk Requested, Load Request sent to ChunkTaskManager / IO Queue
2) IO: Once position in queue comes, submit read IO data and schedule to chunk task thread
3) CHUNK: Once IO is loaded and position in queue comes, deserialize the chunk data, process conversions, submit to main queue
4) MAIN: next Chunk Task process (Mid Tick or End Of Tick), load chunk data into world (POI, main thread tasks)
5) MAIN: process status transitions all the way to LIGHT, light schedules Threaded task
6) SERVER: Light tasks register light enablement for chunk and any lighting needing to be done
7) MAIN: Task returns to main, finish processing to FULL/TICKING status
Previously would have hopped to SERVER around 12+ times there extra.
diff --git a/src/main/java/net/minecraft/server/level/ChunkHolder.java b/src/main/java/net/minecraft/server/level/ChunkHolder.java
index 8245e07f6ecfd9dd997c8525b52c6eadd392e6f1..52ae149d22e8b0f3c2321f383351a4244c517b5e 100644
2021-06-11 14:02:28 +02:00
--- a/src/main/java/net/minecraft/server/level/ChunkHolder.java
+++ b/src/main/java/net/minecraft/server/level/ChunkHolder.java
@@ -113,6 +113,20 @@ public class ChunkHolder {
2021-06-11 14:02:28 +02:00
// Paper end - optimise isOutsideOfRange
2021-06-17 10:37:27 +02:00
long lastAutoSaveTime; // Paper - incremental autosave
long inactiveTimeStart; // Paper - incremental autosave
2021-06-11 14:02:28 +02:00
+ // Paper start - optimize chunk status progression without jumping through thread pool
+ public boolean canAdvanceStatus() {
+ ChunkStatus status = getChunkHolderStatus();
+ ChunkAccess chunk = getAvailableChunkNow();
+ return chunk != null && (status == null || chunk.getStatus().isOrAfter(getNextStatus(status)));
2021-06-11 14:02:28 +02:00
+ }
+ // Yanked from chunk priotisation patch - remove?
+ public static ChunkStatus getNextStatus(ChunkStatus status) {
+ if (status == ChunkStatus.FULL) {
+ return status;
+ }
+ return CHUNK_STATUSES.get(status.getIndex() + 1);
+ }
2021-06-11 14:02:28 +02:00
+ // Paper end
public ChunkHolder(ChunkPos pos, int level, LevelHeightAccessor world, LevelLightEngine lightingProvider, ChunkHolder.LevelChangeListener levelUpdateListener, ChunkHolder.PlayerProvider playersWatchingChunkProvider) {
this.futures = new AtomicReferenceArray(ChunkHolder.CHUNK_STATUSES.size());
2021-06-11 14:02:28 +02:00
diff --git a/src/main/java/net/minecraft/server/level/ChunkMap.java b/src/main/java/net/minecraft/server/level/ChunkMap.java
index f518fc7a39a807bff2e141fd238ab1bfc34ce890..cd76029f2dc3ecfa52bae127ae261a028fa8839a 100644
2021-06-11 14:02:28 +02:00
--- a/src/main/java/net/minecraft/server/level/ChunkMap.java
+++ b/src/main/java/net/minecraft/server/level/ChunkMap.java
@@ -702,7 +702,7 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
2021-06-11 14:02:28 +02:00
return either.mapLeft((list) -> {
return (LevelChunk) list.get(list.size() / 2);
});
- }, this.mainThreadExecutor);
+ }, this.mainInvokingExecutor); // Paper
}
@Nullable
@@ -1133,6 +1133,12 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
return "chunkGenerate " + requiredStatus.getName();
});
Executor executor = (runnable) -> {
2021-06-11 14:02:28 +02:00
+ // Paper start - optimize chunk status progression without jumping through thread pool
+ if (holder.canAdvanceStatus()) {
+ this.mainInvokingExecutor.execute(runnable);
+ return;
+ }
+ // Paper end
this.worldgenMailbox.tell(ChunkTaskPriorityQueueSorter.message(holder, runnable));
};