From 4fded9828f082263ee68c2ef17f74e56b6fc25b7 Mon Sep 17 00:00:00 2001 From: Outfluencer Date: Wed, 29 Jan 2025 07:52:09 +1100 Subject: [PATCH] #3775: Allow decompressed packets to grow to max capacity Do not use size as max capacity, as its possible that the entity rewriter increases the size afterwards. This would result in a kick (it happens rarely as the entity ids size must differ). --- .../java/net/md_5/bungee/compress/PacketDecompressor.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/proxy/src/main/java/net/md_5/bungee/compress/PacketDecompressor.java b/proxy/src/main/java/net/md_5/bungee/compress/PacketDecompressor.java index 8e89d4b651..1612ffcf9a 100644 --- a/proxy/src/main/java/net/md_5/bungee/compress/PacketDecompressor.java +++ b/proxy/src/main/java/net/md_5/bungee/compress/PacketDecompressor.java @@ -41,7 +41,9 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) t throw new OverflowPacketException( "Packet may not be larger than " + MAX_DECOMPRESSED_LEN + " bytes" ); } - ByteBuf decompressed = ctx.alloc().directBuffer( size, size ); + // Do not use size as max capacity, as its possible that the entity rewriter increases the size afterwards + // This would result in a kick (it happens rarely as the entity ids size must differ) + ByteBuf decompressed = ctx.alloc().directBuffer( size, MAX_DECOMPRESSED_LEN ); try { zlib.process( in, decompressed );