Paper/patches/server/0711-Allow-controlled-flushing-for-network-manager.patch
Spottedleaf 01a13871de
Rewrite chunk system (#8177)
Patch documentation to come

Issues with the old system that are fixed now:
- World generation does not scale with cpu cores effectively.
- Relies on the main thread for scheduling and maintaining chunk state, dropping chunk load/generate rates at lower tps.
- Unreliable prioritisation of chunk gen/load calls that block the main thread.
- Shutdown logic is utterly unreliable, as it has to wait for all chunks to unload - is it guaranteed that the chunk system is in a state on shutdown that it can reliably do this? Watchdog shutdown also typically failed due to thread checks, which is now resolved.
- Saving of data is not unified (i.e can save chunk data without saving entity data, poses problems for desync if shutdown is really abnormal.
- Entities are not loaded with chunks. This caused quite a bit of headache for Chunk#getEntities API, but now the new chunk system loads entities with chunks so that they are ready whenever the chunk loads in. Effectively brings the behavior back to 1.16 era, but still storing entities in their own separate regionfiles.

The above list is not complete. The patch documentation will complete it.

New chunk system hard relies on starlight and dataconverter, and most importantly the new concurrent utilities in ConcurrentUtil.

Some of the old async chunk i/o interface (i.e the old file io thread reroutes _some_ calls to the new file io thread) is kept for plugin compat reasons. It will be removed in the next major version of minecraft.

The old legacy chunk system patches have been moved to the removed folder in case we need them again.
2022-09-26 01:02:51 -07:00

148 lines
7.3 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
Date: Sat, 4 Apr 2020 15:27:44 -0700
Subject: [PATCH] Allow controlled flushing for network manager
Only make one flush call when emptying the packet queue too
This patch will be used to optimise out flush calls in later
patches.
diff --git a/src/main/java/net/minecraft/network/Connection.java b/src/main/java/net/minecraft/network/Connection.java
index 2f38d04b369b345c89f85cee32081df8baf4239f..13ab14b1fb3acfd245fbab35f84f5c30c97ed855 100644
--- a/src/main/java/net/minecraft/network/Connection.java
+++ b/src/main/java/net/minecraft/network/Connection.java
@@ -121,6 +121,39 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
public ConnectionProtocol protocol;
// Paper end
+ // Paper start - allow controlled flushing
+ volatile boolean canFlush = true;
+ private final java.util.concurrent.atomic.AtomicInteger packetWrites = new java.util.concurrent.atomic.AtomicInteger();
+ private int flushPacketsStart;
+ private final Object flushLock = new Object();
+
+ public void disableAutomaticFlush() {
+ synchronized (this.flushLock) {
+ this.flushPacketsStart = this.packetWrites.get(); // must be volatile and before canFlush = false
+ this.canFlush = false;
+ }
+ }
+
+ public void enableAutomaticFlush() {
+ synchronized (this.flushLock) {
+ this.canFlush = true;
+ if (this.packetWrites.get() != this.flushPacketsStart) { // must be after canFlush = true
+ this.flush(); // only make the flush call if we need to
+ }
+ }
+ }
+
+ private final void flush() {
+ if (this.channel.eventLoop().inEventLoop()) {
+ this.channel.flush();
+ } else {
+ this.channel.eventLoop().execute(() -> {
+ this.channel.flush();
+ });
+ }
+ }
+ // Paper end - allow controlled flushing
+
public Connection(PacketFlow side) {
this.receiving = side;
}
@@ -286,7 +319,7 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
net.minecraft.server.MCUtil.isMainThread() && packet.isReady() && this.queue.isEmpty() &&
(packet.getExtraPackets() == null || packet.getExtraPackets().isEmpty())
))) {
- this.sendPacket(packet, callbacks);
+ this.sendPacket(packet, callbacks, null); // Paper
return;
}
// write the packets to the queue, then flush - antixray hooks there already
@@ -310,6 +343,14 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
}
private void sendPacket(Packet<?> packet, @Nullable PacketSendListener callbacks) {
+ // Paper start - add flush parameter
+ this.sendPacket(packet, callbacks, Boolean.TRUE);
+ }
+ private void sendPacket(Packet<?> packet, @Nullable PacketSendListener callbacks, Boolean flushConditional) {
+ this.packetWrites.getAndIncrement(); // must be befeore using canFlush
+ boolean effectiveFlush = flushConditional == null ? this.canFlush : flushConditional.booleanValue();
+ final boolean flush = effectiveFlush || packet instanceof net.minecraft.network.protocol.game.ClientboundKeepAlivePacket || packet instanceof ClientboundDisconnectPacket; // no delay for certain packets
+ // Paper end - add flush parameter
ConnectionProtocol enumprotocol = ConnectionProtocol.getProtocolForPacket(packet);
ConnectionProtocol enumprotocol1 = this.getCurrentProtocol();
@@ -320,16 +361,21 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
}
if (this.channel.eventLoop().inEventLoop()) {
- this.doSendPacket(packet, callbacks, enumprotocol, enumprotocol1);
+ this.doSendPacket(packet, callbacks, enumprotocol, enumprotocol1, flush); // Paper
} else {
this.channel.eventLoop().execute(() -> {
- this.doSendPacket(packet, callbacks, enumprotocol, enumprotocol1);
+ this.doSendPacket(packet, callbacks, enumprotocol, enumprotocol1, flush); // Paper
});
}
}
private void doSendPacket(Packet<?> packet, @Nullable PacketSendListener callbacks, ConnectionProtocol packetState, ConnectionProtocol currentState) {
+ // Paper start - add flush parameter
+ this.doSendPacket(packet, callbacks, packetState, currentState, true);
+ }
+ private void doSendPacket(Packet<?> packet, @Nullable PacketSendListener callbacks, ConnectionProtocol packetState, ConnectionProtocol currentState, boolean flush) {
+ // Paper end - add flush parameter
if (packetState != currentState) {
this.setProtocol(packetState);
}
@@ -343,7 +389,7 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
try {
// Paper end
- ChannelFuture channelfuture = this.channel.writeAndFlush(packet);
+ ChannelFuture channelfuture = flush ? this.channel.writeAndFlush(packet) : this.channel.write(packet); // Paper - add flush parameter
if (callbacks != null) {
channelfuture.addListener((future) -> {
@@ -399,6 +445,10 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
private boolean processQueue() {
try { // Paper - add pending task queue
if (this.queue.isEmpty()) return true;
+ // Paper start - make only one flush call per sendPacketQueue() call
+ final boolean needsFlush = this.canFlush;
+ boolean hasWrotePacket = false;
+ // Paper end - make only one flush call per sendPacketQueue() call
// If we are on main, we are safe here in that nothing else should be processing queue off main anymore
// But if we are not on main due to login/status, the parent is synchronized on packetQueue
java.util.Iterator<PacketHolder> iterator = this.queue.iterator();
@@ -406,16 +456,22 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
PacketHolder queued = iterator.next(); // poll -> peek
// Fix NPE (Spigot bug caused by handleDisconnection())
- if (queued == null) {
+ if (false && queued == null) { // Paper - diff on change, this logic is redundant: iterator guarantees ret of an element - on change, hook the flush logic here
return true;
}
Packet<?> packet = queued.packet;
if (!packet.isReady()) {
+ // Paper start - make only one flush call per sendPacketQueue() call
+ if (hasWrotePacket && (needsFlush || this.canFlush)) {
+ this.flush();
+ }
+ // Paper end - make only one flush call per sendPacketQueue() call
return false;
} else {
iterator.remove();
- this.sendPacket(packet, queued.listener);
+ this.sendPacket(packet, queued.listener, (!iterator.hasNext() && (needsFlush || this.canFlush)) ? Boolean.TRUE : Boolean.FALSE); // Paper - make only one flush call per sendPacketQueue() call
+ hasWrotePacket = true; // Paper - make only one flush call per sendPacketQueue() call
}
}
return true;