summaryrefslogtreecommitdiffstats
path: root/src/Protocol/ChunkDataSerializer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/Protocol/ChunkDataSerializer.cpp')
-rw-r--r--src/Protocol/ChunkDataSerializer.cpp83
1 files changed, 63 insertions, 20 deletions
diff --git a/src/Protocol/ChunkDataSerializer.cpp b/src/Protocol/ChunkDataSerializer.cpp
index 29e32ce32..a2e0c9ef9 100644
--- a/src/Protocol/ChunkDataSerializer.cpp
+++ b/src/Protocol/ChunkDataSerializer.cpp
@@ -8,6 +8,7 @@
#include "Globals.h"
#include "ChunkDataSerializer.h"
#include "zlib/zlib.h"
+#include "ByteBuffer.h"
@@ -30,7 +31,7 @@ cChunkDataSerializer::cChunkDataSerializer(
-const AString & cChunkDataSerializer::Serialize(int a_Version)
+const AString & cChunkDataSerializer::Serialize(int a_Version, int a_ChunkX, int a_ChunkZ)
{
Serializations::const_iterator itr = m_Serializations.find(a_Version);
if (itr != m_Serializations.end())
@@ -43,7 +44,7 @@ const AString & cChunkDataSerializer::Serialize(int a_Version)
{
case RELEASE_1_2_5: Serialize29(data); break;
case RELEASE_1_3_2: Serialize39(data); break;
- case RELEASE_1_8_0: Serialize80(data); break;
+ case RELEASE_1_8_0: Serialize80(data, a_ChunkX, a_ChunkZ); break;
// TODO: Other protocol versions may serialize the data differently; implement here
default:
@@ -176,45 +177,87 @@ void cChunkDataSerializer::Serialize39(AString & a_Data)
-void cChunkDataSerializer::Serialize80(AString & a_Data)
+void cChunkDataSerializer::Serialize80(AString & a_Data, int a_ChunkX, int a_ChunkZ)
{
// TODO: Do not copy data and then compress it; rather, compress partial blocks of data (zlib *can* stream)
// Blocktypes converter (1.8 included the meta into the blocktype):
- unsigned short Blocks[ARRAYCOUNT(m_BlockTypes)];
+ /*unsigned short Blocks[ARRAYCOUNT(m_BlockTypes)];
for (size_t Index = 0; Index < cChunkDef::NumBlocks; Index++)
{
BLOCKTYPE BlockType = m_BlockTypes[Index];
NIBBLETYPE BlockMeta = m_BlockMetas[Index / 2] >> ((Index & 1) * 4) & 0x0f;
Blocks[Index] = ((unsigned short)BlockType << 4) | ((unsigned short)BlockMeta);
- }
+ }*/
const int BiomeDataSize = cChunkDef::Width * cChunkDef::Width;
- const int BlockLightOffset = sizeof(Blocks);
+ const int MetadataOffset = sizeof(m_BlockTypes);
+ const int BlockLightOffset = MetadataOffset + sizeof(m_BlockMetas);
const int SkyLightOffset = BlockLightOffset + sizeof(m_BlockLight);
const int BiomeOffset = SkyLightOffset + sizeof(m_BlockSkyLight);
const int DataSize = BiomeOffset + BiomeDataSize;
// Temporary buffer for the composed data:
char AllData [DataSize];
- memcpy(AllData, Blocks, sizeof(Blocks));
+ memcpy(AllData, m_BlockTypes, sizeof(m_BlockTypes));
+ memcpy(AllData + MetadataOffset, m_BlockMetas, sizeof(m_BlockMetas));
memcpy(AllData + BlockLightOffset, m_BlockLight, sizeof(m_BlockLight));
memcpy(AllData + SkyLightOffset, m_BlockSkyLight, sizeof(m_BlockSkyLight));
memcpy(AllData + BiomeOffset, m_BiomeData, BiomeDataSize);
- // Put all those data into a_Data:
- a_Data.push_back('\x01'); // "Ground-up continuous", or rather, "biome data present" flag
-
- // Two bitmaps; we're aways sending the full chunk with no additional data, so the bitmaps are 0xffff and 0, respectively
- // Also, no endian flipping is needed because of the const values
- unsigned short BitMap = 0xffff;
- a_Data.append((const char *)&BitMap, sizeof(unsigned short));
-
- // Write chunk size:
- UInt32 ChunkSize = htonl((UInt32)DataSize);
- a_Data.append((const char *)&ChunkSize, 4);
-
- a_Data.append(AllData, DataSize); // Chunk data
+ cByteBuffer Packet(512 KiB);
+ Packet.WriteVarInt(0x21); // Packet id (Chunk Data packet)
+ Packet.WriteBEInt(a_ChunkX);
+ Packet.WriteBEInt(a_ChunkZ);
+ Packet.WriteBool(true); // "Ground-up continuous", or rather, "biome data present" flag
+ Packet.WriteBEShort(0xffff); // We're aways sending the full chunk with no additional data, so the bitmap is 0xffff
+ Packet.WriteVarInt(DataSize); // Chunk size
+ Packet.WriteBuf(AllData, DataSize); // Chunk data
+
+ AString PacketData;
+ Packet.ReadAll(PacketData);
+ Packet.CommitRead();
+
+ cByteBuffer NumberBuffer(20);
+ if (PacketData.size() >= 256)
+ {
+ AString PostData;
+ NumberBuffer.WriteVarInt(PacketData.size());
+ NumberBuffer.ReadAll(PostData);
+ NumberBuffer.CommitRead();
+
+ // Compress the data:
+ const uLongf CompressedMaxSize = 200000;
+ char CompressedData[CompressedMaxSize];
+
+ uLongf CompressedSize = compressBound(PacketData.size());
+ // Run-time check that our compile-time guess about CompressedMaxSize was enough:
+ ASSERT(CompressedSize <= CompressedMaxSize);
+ compress2((Bytef*)CompressedData, &CompressedSize, (const Bytef*)PacketData.data(), PacketData.size(), Z_DEFAULT_COMPRESSION);
+
+ NumberBuffer.WriteVarInt(CompressedSize + PostData.size());
+ NumberBuffer.WriteVarInt(PacketData.size());
+ NumberBuffer.ReadAll(PostData);
+ NumberBuffer.CommitRead();
+
+ a_Data.clear();
+ a_Data.resize(PostData.size() + CompressedSize);
+ a_Data.append(PostData.data(), PostData.size());
+ a_Data.append(CompressedData, CompressedSize);
+ }
+ else
+ {
+ AString PostData;
+ NumberBuffer.WriteVarInt(Packet.GetUsedSpace() + 1);
+ NumberBuffer.WriteVarInt(0);
+ NumberBuffer.ReadAll(PostData);
+ NumberBuffer.CommitRead();
+
+ a_Data.clear();
+ a_Data.resize(PostData.size() + PacketData.size());
+ a_Data.append(PostData.data(), PostData.size());
+ a_Data.append(PacketData.data(), PacketData.size());
+ }
}