summaryrefslogtreecommitdiffstats
path: root/src/Protocol/ChunkDataSerializer.cpp
diff options
context:
space:
mode:
authorPokechu22 <Pokechu22@users.noreply.github.com>2016-05-14 21:12:42 +0200
committerAlexander Harkness <me@bearbin.net>2016-05-14 21:12:42 +0200
commita4f327118b06ced1cd4510b7d20d34da83aa78a3 (patch)
treed6dbd41cfbec99f2598fb51907b86d9cbe0e8a05 /src/Protocol/ChunkDataSerializer.cpp
parentFixed death on teleportation or leaving Minecart (#3181) (diff)
downloadcuberite-a4f327118b06ced1cd4510b7d20d34da83aa78a3.tar
cuberite-a4f327118b06ced1cd4510b7d20d34da83aa78a3.tar.gz
cuberite-a4f327118b06ced1cd4510b7d20d34da83aa78a3.tar.bz2
cuberite-a4f327118b06ced1cd4510b7d20d34da83aa78a3.tar.lz
cuberite-a4f327118b06ced1cd4510b7d20d34da83aa78a3.tar.xz
cuberite-a4f327118b06ced1cd4510b7d20d34da83aa78a3.tar.zst
cuberite-a4f327118b06ced1cd4510b7d20d34da83aa78a3.zip
Diffstat (limited to '')
-rw-r--r--src/Protocol/ChunkDataSerializer.cpp298
1 files changed, 296 insertions, 2 deletions
diff --git a/src/Protocol/ChunkDataSerializer.cpp b/src/Protocol/ChunkDataSerializer.cpp
index 848a6f597..31f7e8d9c 100644
--- a/src/Protocol/ChunkDataSerializer.cpp
+++ b/src/Protocol/ChunkDataSerializer.cpp
@@ -10,6 +10,7 @@
#include "zlib/zlib.h"
#include "ByteBuffer.h"
#include "Protocol18x.h"
+#include "Protocol19x.h"
@@ -19,13 +20,15 @@ cChunkDataSerializer::cChunkDataSerializer(
const cChunkDef::BlockNibbles & a_BlockMetas,
const cChunkDef::BlockNibbles & a_BlockLight,
const cChunkDef::BlockNibbles & a_BlockSkyLight,
- const unsigned char * a_BiomeData
+ const unsigned char * a_BiomeData,
+ const eDimension a_Dimension
) :
m_BlockTypes(a_BlockTypes),
m_BlockMetas(a_BlockMetas),
m_BlockLight(a_BlockLight),
m_BlockSkyLight(a_BlockSkyLight),
- m_BiomeData(a_BiomeData)
+ m_BiomeData(a_BiomeData),
+ m_Dimension(a_Dimension)
{
}
@@ -45,6 +48,8 @@ const AString & cChunkDataSerializer::Serialize(int a_Version, int a_ChunkX, int
{
case RELEASE_1_3_2: Serialize39(data); break;
case RELEASE_1_8_0: Serialize47(data, a_ChunkX, a_ChunkZ); break;
+ case RELEASE_1_9_0: Serialize107(data, a_ChunkX, a_ChunkZ); break;
+ case RELEASE_1_9_4: Serialize110(data, a_ChunkX, a_ChunkZ); break;
// TODO: Other protocol versions may serialize the data differently; implement here
default:
@@ -189,3 +194,292 @@ void cChunkDataSerializer::Serialize47(AString & a_Data, int a_ChunkX, int a_Chu
+
+void cChunkDataSerializer::Serialize107(AString & a_Data, int a_ChunkX, int a_ChunkZ)
+{
+ // This function returns the fully compressed packet (including packet size), not the raw packet!
+
+ // Create the packet:
+ cByteBuffer Packet(512 KiB);
+ Packet.WriteVarInt32(0x20); // Packet id (Chunk Data packet)
+ Packet.WriteBEInt32(a_ChunkX);
+ Packet.WriteBEInt32(a_ChunkZ);
+ Packet.WriteBool(true); // "Ground-up continuous", or rather, "biome data present" flag
+ Packet.WriteVarInt32(0x0000ffff); // We're aways sending the full chunk with no additional data, so the bitmap is 0xffff
+ // Write the chunk size:
+ const size_t NumChunkSections = 16;
+ const size_t ChunkSectionBlocks = 16 * 16 * 16;
+ const size_t BitsPerEntry = 13;
+ const size_t Mask = (1 << BitsPerEntry) - 1; // Creates a mask that is 13 bits long, ie 0b1111111111111
+ const size_t ChunkSectionDataArraySize = (ChunkSectionBlocks * BitsPerEntry) / 8 / 8; // Convert from bit count to long count
+ size_t ChunkSectionSize = (
+ 1 + // Bits per block - set to 13, so the global palette is used and the palette has a length of 0
+ 1 + // Palette length
+ 2 + // Data array length VarInt - 2 bytes for the current value
+ ChunkSectionDataArraySize * 8 + // Actual block data - multiplied by 8 because first number is longs
+ sizeof(m_BlockLight) / NumChunkSections // Block light
+ );
+
+ if (m_Dimension == dimOverworld)
+ {
+ // Sky light is only sent in the overworld.
+ ChunkSectionSize += sizeof(m_BlockSkyLight) / NumChunkSections;
+ }
+
+ const size_t BiomeDataSize = cChunkDef::Width * cChunkDef::Width;
+ size_t ChunkSize = (
+ ChunkSectionSize * 16 +
+ BiomeDataSize
+ );
+ Packet.WriteVarInt32(static_cast<UInt32>(ChunkSize));
+
+ // Write each chunk section...
+ for (size_t SectionIndex = 0; SectionIndex < 16; SectionIndex++)
+ {
+ Packet.WriteBEUInt8(BitsPerEntry);
+ Packet.WriteVarInt32(0); // Palette length is 0
+ Packet.WriteVarInt32(static_cast<UInt32>(ChunkSectionDataArraySize));
+
+ size_t StartIndex = SectionIndex * ChunkSectionBlocks;
+
+ UInt64 TempLong = 0; // Temporary value that will be stored into
+ UInt64 CurrentlyWrittenIndex = 0; // "Index" of the long that would be written to
+
+ for (size_t Index = 0; Index < ChunkSectionBlocks; Index++)
+ {
+ UInt64 Value = static_cast<UInt64>(m_BlockTypes[StartIndex + Index] << 4);
+ if (Index % 2 == 0)
+ {
+ Value |= m_BlockMetas[(StartIndex + Index) / 2] & 0x0f;
+ }
+ else
+ {
+ Value |= m_BlockMetas[(StartIndex + Index) / 2] >> 4;
+ }
+ Value &= Mask; // It shouldn't go out of bounds, but it's still worth being careful
+
+ // Painful part where we write data into the long array. Based off of the normal code.
+ size_t BitPosition = Index * BitsPerEntry;
+ size_t FirstIndex = BitPosition / 64;
+ size_t SecondIndex = ((Index + 1) * BitsPerEntry - 1) / 64;
+ size_t BitOffset = BitPosition % 64;
+
+ if (FirstIndex != CurrentlyWrittenIndex)
+ {
+ // Write the current data before modifiying it.
+ Packet.WriteBEUInt64(TempLong);
+ TempLong = 0;
+ CurrentlyWrittenIndex = FirstIndex;
+ }
+
+ TempLong |= (Value << BitOffset);
+
+ if (FirstIndex != SecondIndex)
+ {
+ // Part of the data is now in the second long; write the first one first
+ Packet.WriteBEUInt64(TempLong);
+ CurrentlyWrittenIndex = SecondIndex;
+
+ TempLong = (Value >> (64 - BitOffset));
+ }
+ }
+ // The last long will generally not be written
+ Packet.WriteBEUInt64(TempLong);
+
+ // Light - stored as a nibble, so we need half sizes
+ // As far as I know, there isn't a method to only write a range of the array
+ for (size_t Index = 0; Index < ChunkSectionBlocks / 2; Index++)
+ {
+ Packet.WriteBEUInt8(m_BlockLight[(StartIndex / 2) + Index]);
+ }
+ if (m_Dimension == dimOverworld)
+ {
+ // Skylight is only sent in the overworld; the nether and end do not use it
+ for (size_t Index = 0; Index < ChunkSectionBlocks / 2; Index++)
+ {
+ Packet.WriteBEUInt8(m_BlockSkyLight[(StartIndex / 2) + Index]);
+ }
+ }
+ }
+
+ // Write the biome data
+ Packet.WriteBuf(m_BiomeData, BiomeDataSize);
+
+ AString PacketData;
+ Packet.ReadAll(PacketData);
+ Packet.CommitRead();
+
+ cByteBuffer Buffer(20);
+ if (PacketData.size() >= 256)
+ {
+ if (!cProtocol190::CompressPacket(PacketData, a_Data))
+ {
+ ASSERT(!"Packet compression failed.");
+ a_Data.clear();
+ return;
+ }
+ }
+ else
+ {
+ AString PostData;
+ Buffer.WriteVarInt32(static_cast<UInt32>(Packet.GetUsedSpace() + 1));
+ Buffer.WriteVarInt32(0);
+ Buffer.ReadAll(PostData);
+ Buffer.CommitRead();
+
+ a_Data.clear();
+ a_Data.reserve(PostData.size() + PacketData.size());
+ a_Data.append(PostData.data(), PostData.size());
+ a_Data.append(PacketData.data(), PacketData.size());
+ }
+}
+
+
+
+
+
+void cChunkDataSerializer::Serialize110(AString & a_Data, int a_ChunkX, int a_ChunkZ)
+{
+ // This function returns the fully compressed packet (including packet size), not the raw packet!
+
+ // Create the packet:
+ cByteBuffer Packet(512 KiB);
+ Packet.WriteVarInt32(0x20); // Packet id (Chunk Data packet)
+ Packet.WriteBEInt32(a_ChunkX);
+ Packet.WriteBEInt32(a_ChunkZ);
+ Packet.WriteBool(true); // "Ground-up continuous", or rather, "biome data present" flag
+ Packet.WriteVarInt32(0x0000ffff); // We're aways sending the full chunk with no additional data, so the bitmap is 0xffff
+ // Write the chunk size:
+ const size_t NumChunkSections = 16;
+ const size_t ChunkSectionBlocks = 16 * 16 * 16;
+ const size_t BitsPerEntry = 13;
+ const size_t Mask = (1 << BitsPerEntry) - 1; // Creates a mask that is 13 bits long, ie 0b1111111111111
+ const size_t ChunkSectionDataArraySize = (ChunkSectionBlocks * BitsPerEntry) / 8 / 8; // Convert from bit count to long count
+ size_t ChunkSectionSize = (
+ 1 + // Bits per block - set to 13, so the global palette is used and the palette has a length of 0
+ 1 + // Palette length
+ 2 + // Data array length VarInt - 2 bytes for the current value
+ ChunkSectionDataArraySize * 8 + // Actual block data - multiplied by 8 because first number is longs
+ sizeof(m_BlockLight) / NumChunkSections // Block light
+ );
+
+ if (m_Dimension == dimOverworld)
+ {
+ // Sky light is only sent in the overworld.
+ ChunkSectionSize += sizeof(m_BlockSkyLight) / NumChunkSections;
+ }
+
+ const size_t BiomeDataSize = cChunkDef::Width * cChunkDef::Width;
+ size_t ChunkSize = (
+ ChunkSectionSize * 16 +
+ BiomeDataSize
+ );
+ Packet.WriteVarInt32(static_cast<UInt32>(ChunkSize));
+
+ // Write each chunk section...
+ for (size_t SectionIndex = 0; SectionIndex < 16; SectionIndex++)
+ {
+ Packet.WriteBEUInt8(BitsPerEntry);
+ Packet.WriteVarInt32(0); // Palette length is 0
+ Packet.WriteVarInt32(static_cast<UInt32>(ChunkSectionDataArraySize));
+
+ size_t StartIndex = SectionIndex * ChunkSectionBlocks;
+
+ UInt64 TempLong = 0; // Temporary value that will be stored into
+ UInt64 CurrentlyWrittenIndex = 0; // "Index" of the long that would be written to
+
+ for (size_t Index = 0; Index < ChunkSectionBlocks; Index++)
+ {
+ UInt64 Value = static_cast<UInt64>(m_BlockTypes[StartIndex + Index] << 4);
+ if (Index % 2 == 0)
+ {
+ Value |= m_BlockMetas[(StartIndex + Index) / 2] & 0x0f;
+ }
+ else
+ {
+ Value |= m_BlockMetas[(StartIndex + Index) / 2] >> 4;
+ }
+ Value &= Mask; // It shouldn't go out of bounds, but it's still worth being careful
+
+ // Painful part where we write data into the long array. Based off of the normal code.
+ size_t BitPosition = Index * BitsPerEntry;
+ size_t FirstIndex = BitPosition / 64;
+ size_t SecondIndex = ((Index + 1) * BitsPerEntry - 1) / 64;
+ size_t BitOffset = BitPosition % 64;
+
+ if (FirstIndex != CurrentlyWrittenIndex)
+ {
+ // Write the current data before modifiying it.
+ Packet.WriteBEUInt64(TempLong);
+ TempLong = 0;
+ CurrentlyWrittenIndex = FirstIndex;
+ }
+
+ TempLong |= (Value << BitOffset);
+
+ if (FirstIndex != SecondIndex)
+ {
+ // Part of the data is now in the second long; write the first one first
+ Packet.WriteBEUInt64(TempLong);
+ CurrentlyWrittenIndex = SecondIndex;
+
+ TempLong = (Value >> (64 - BitOffset));
+ }
+ }
+ // The last long will generally not be written
+ Packet.WriteBEUInt64(TempLong);
+
+ // Light - stored as a nibble, so we need half sizes
+ // As far as I know, there isn't a method to only write a range of the array
+ for (size_t Index = 0; Index < ChunkSectionBlocks / 2; Index++)
+ {
+ Packet.WriteBEUInt8(m_BlockLight[(StartIndex / 2) + Index]);
+ }
+ if (m_Dimension == dimOverworld)
+ {
+ // Skylight is only sent in the overworld; the nether and end do not use it
+ for (size_t Index = 0; Index < ChunkSectionBlocks / 2; Index++)
+ {
+ Packet.WriteBEUInt8(m_BlockSkyLight[(StartIndex / 2) + Index]);
+ }
+ }
+ }
+
+ // Write the biome data
+ Packet.WriteBuf(m_BiomeData, BiomeDataSize);
+
+ // Identify 1.9.4's tile entity list as empty
+ Packet.WriteBEUInt8(0);
+
+ AString PacketData;
+ Packet.ReadAll(PacketData);
+ Packet.CommitRead();
+
+ cByteBuffer Buffer(20);
+ if (PacketData.size() >= 256)
+ {
+ if (!cProtocol190::CompressPacket(PacketData, a_Data))
+ {
+ ASSERT(!"Packet compression failed.");
+ a_Data.clear();
+ return;
+ }
+ }
+ else
+ {
+ AString PostData;
+ Buffer.WriteVarInt32(static_cast<UInt32>(Packet.GetUsedSpace() + 1));
+ Buffer.WriteVarInt32(0);
+ Buffer.ReadAll(PostData);
+ Buffer.CommitRead();
+
+ a_Data.clear();
+ a_Data.reserve(PostData.size() + PacketData.size());
+ a_Data.append(PostData.data(), PostData.size());
+ a_Data.append(PacketData.data(), PacketData.size());
+ }
+}
+
+
+
+