summaryrefslogtreecommitdiffstats
path: root/source/cChunkMap.cpp
diff options
context:
space:
mode:
authormadmaxoft@gmail.com <madmaxoft@gmail.com@0a769ca7-a7f5-676a-18bf-c427514a06d6>2012-02-13 22:47:03 +0100
committermadmaxoft@gmail.com <madmaxoft@gmail.com@0a769ca7-a7f5-676a-18bf-c427514a06d6>2012-02-13 22:47:03 +0100
commit4f17362aeb80e5339c58a5d3b0fbaeb88d9e701c (patch)
treefebea3ecd89c0d4aa83924e430bf11366d754733 /source/cChunkMap.cpp
parentNew makefile with automatic *.cpp sourcefile import, automatic header file dependencies and switchable debug / release configuration. gnumake-specific :( (diff)
downloadcuberite-4f17362aeb80e5339c58a5d3b0fbaeb88d9e701c.tar
cuberite-4f17362aeb80e5339c58a5d3b0fbaeb88d9e701c.tar.gz
cuberite-4f17362aeb80e5339c58a5d3b0fbaeb88d9e701c.tar.bz2
cuberite-4f17362aeb80e5339c58a5d3b0fbaeb88d9e701c.tar.lz
cuberite-4f17362aeb80e5339c58a5d3b0fbaeb88d9e701c.tar.xz
cuberite-4f17362aeb80e5339c58a5d3b0fbaeb88d9e701c.tar.zst
cuberite-4f17362aeb80e5339c58a5d3b0fbaeb88d9e701c.zip
Diffstat (limited to '')
-rw-r--r--source/cChunkMap.cpp568
1 files changed, 112 insertions, 456 deletions
diff --git a/source/cChunkMap.cpp b/source/cChunkMap.cpp
index 2d3aabd7b..b864b9792 100644
--- a/source/cChunkMap.cpp
+++ b/source/cChunkMap.cpp
@@ -6,7 +6,6 @@
#include "cWorld.h"
#include "cRoot.h"
#include "cMakeDir.h"
-#include <math.h> // floorf
#ifndef _WIN32
#include <cstdlib> // abs
@@ -18,27 +17,6 @@
#define USE_MEMCPY
-#define LAYER_SIZE (32)
-
-
-
-
-
-////////////////////////////////////////////////////////////////////////////////
-// cChunkMap::cChunkLayer:
-
-cChunkMap::cChunkData* cChunkMap::cChunkLayer::GetChunk( int a_X, int a_Z )
-{
- const int LocalX = a_X - m_X * LAYER_SIZE;
- const int LocalZ = a_Z - m_Z * LAYER_SIZE;
- //LOG("LocalX:%i LocalZ:%i", LocalX, LocalZ );
- if ((LocalX < LAYER_SIZE) && (LocalZ < LAYER_SIZE) && (LocalX > -1) && (LocalZ > -1))
- {
- return &m_Chunks[ LocalX + LocalZ * LAYER_SIZE ];
- }
- return 0;
-}
-
@@ -46,10 +24,8 @@ cChunkMap::cChunkData* cChunkMap::cChunkLayer::GetChunk( int a_X, int a_Z )
////////////////////////////////////////////////////////////////////////////////
// cChunkMap:
-cChunkMap::cChunkMap(cWorld* a_World )
- : m_Layers( 0 )
- , m_NumLayers( 0 )
- , m_World( a_World )
+cChunkMap::cChunkMap(cWorld * a_World )
+ : m_World( a_World )
{
}
@@ -59,173 +35,54 @@ cChunkMap::cChunkMap(cWorld* a_World )
cChunkMap::~cChunkMap()
{
- // TODO: delete layers
-}
-
-
-
-
-
-bool cChunkMap::RemoveLayer( cChunkLayer* a_Layer )
-{
- cChunkLayer* NewLayers = 0;
- if( m_NumLayers > 1 )
- NewLayers = new cChunkLayer[m_NumLayers-1];
-
- int idx = 0;
- bool bExcludedLayer = false;
- for( int i = 0; i < m_NumLayers; ++i )
- {
- if( &m_Layers[i] != a_Layer )
- {
- if( idx < m_NumLayers-1 )
- {
- NewLayers[ idx ] = m_Layers[i];
- idx++;
- }
- }
- else
- bExcludedLayer = true;
- }
-
- if( !bExcludedLayer )
- {
- LOGWARN("Could not remove layer, because layer was not found %i %i", a_Layer->m_X, a_Layer->m_Z);
- delete [] NewLayers;
- return false;
- }
-
- if( m_Layers ) delete [] m_Layers;
- m_Layers = NewLayers;
- m_NumLayers--;
- return true;
-}
-
-
-
-
-
-cChunkMap::cChunkLayer* cChunkMap::AddLayer( const cChunkLayer & a_Layer )
-{
- cChunkLayer* TempLayers = new cChunkLayer[m_NumLayers+1];
- if( m_NumLayers > 0 )
+ cCSLock Lock(m_CSLayers);
+ for (cChunkLayerList::iterator itr = m_Layers.begin(); itr != m_Layers.end(); ++itr)
{
- memcpy( TempLayers, m_Layers, sizeof( cChunkLayer ) * m_NumLayers );
- delete [] m_Layers;
- }
- m_Layers = TempLayers;
-
- m_Layers[m_NumLayers] = a_Layer;
- cChunkLayer* NewLayer = &m_Layers[m_NumLayers];
- m_NumLayers++;
-
- return NewLayer;
+ delete *itr;
+ } // for itr - m_Layers[]
}
-void cChunkMap::AddChunk( cChunk* a_Chunk )
+void cChunkMap::RemoveLayer( cChunkLayer* a_Layer )
{
- const int LayerX = (int)(floorf((float)a_Chunk->GetPosX() / (float)(LAYER_SIZE)));
- const int LayerZ = (int)(floorf((float)a_Chunk->GetPosZ() / (float)(LAYER_SIZE)));
- cChunkLayer* FoundLayer = GetLayer( LayerX, LayerZ );
- if( !FoundLayer )
- {
- cChunkLayer NewLayer( LAYER_SIZE*LAYER_SIZE );
- NewLayer.m_X = LayerX;
- NewLayer.m_Z = LayerZ;
- FoundLayer = AddLayer( NewLayer );
- LOG("Created new layer [%i %i] (total layers %i)", LayerX, LayerZ, m_NumLayers );
- }
-
- //Get local coordinates in layer
- const int LocalX = a_Chunk->GetPosX() - LayerX * LAYER_SIZE;
- const int LocalZ = a_Chunk->GetPosZ() - LayerZ * LAYER_SIZE;
- if( FoundLayer->m_Chunks[ LocalX + LocalZ * LAYER_SIZE ].m_LiveChunk )
- {
- LOGWARN("WARNING: Added chunk to layer while it was already loaded!");
- }
- if( FoundLayer->m_Chunks[ LocalX + LocalZ * LAYER_SIZE ].m_Compressed )
- {
- LOGWARN("WARNING: Added chunk to layer while a compressed version exists!");
- }
- FoundLayer->m_Chunks[ LocalX + LocalZ * LAYER_SIZE ].m_LiveChunk = a_Chunk;
- FoundLayer->m_NumChunksLoaded++;
+ cCSLock Lock(m_CSLayers);
+ m_Layers.remove(a_Layer);
}
-void cChunkMap::RemoveChunk( cChunk* a_Chunk )
+cChunkMap::cChunkLayer * cChunkMap::GetLayer(int a_LayerX, int a_LayerZ)
{
- cChunkLayer* Layer = GetLayerForChunk( a_Chunk->GetPosX(), a_Chunk->GetPosZ() );
- if( Layer )
+ cCSLock Lock(m_CSLayers);
+ for (cChunkLayerList::const_iterator itr = m_Layers.begin(); itr != m_Layers.end(); ++itr)
{
- cChunkData* Data = Layer->GetChunk( a_Chunk->GetPosX(), a_Chunk->GetPosZ() );
- if( Data->m_LiveChunk )
+ if (((*itr)->GetX() == a_LayerX) && ((*itr)->GetZ() == a_LayerZ))
{
- CompressChunk( Data );
- Data->m_LiveChunk = 0; // Set live chunk to 0
+ return *itr;
}
- Layer->m_NumChunksLoaded--;
}
-}
-
-
-
-
-
-void cChunkMap::CompressChunk( cChunkData* a_ChunkData )
-{
- if( a_ChunkData->m_LiveChunk )
+
+ // Not found, create new:
+ cChunkLayer * Layer = new cChunkLayer(a_LayerX, a_LayerZ, this);
+ if (Layer == NULL)
{
- // Delete already present compressed data
- if( a_ChunkData->m_Compressed ) delete [] a_ChunkData->m_Compressed;
-
- // Get Json data
- Json::Value root;
- std::string JsonData = "";
- a_ChunkData->m_LiveChunk->SaveToJson( root );
- if( !root.empty() )
- {
- Json::StyledWriter writer; // TODO FIXME: change to FastWriter ? :D
- JsonData = writer.write( root );
- }
-
- unsigned int TotalSize = cChunk::c_BlockDataSize + JsonData.size();
- uLongf CompressedSize = compressBound( TotalSize );
- a_ChunkData->m_Compressed = new char[CompressedSize];
- char* DataSource = a_ChunkData->m_LiveChunk->pGetBlockData();
- if( JsonData.size() > 0 )
- {
- // Move stuff around, so data is aligned in memory
- DataSource = new char[TotalSize];
- memcpy( DataSource, a_ChunkData->m_LiveChunk->pGetBlockData(), cChunk::c_BlockDataSize );
- memcpy( DataSource + cChunk::c_BlockDataSize, JsonData.c_str(), JsonData.size() );
- }
-
- int errorcode = compress2( (Bytef*)a_ChunkData->m_Compressed, &CompressedSize, (const Bytef*)DataSource, TotalSize, Z_DEFAULT_COMPRESSION);
- if( errorcode != Z_OK )
- {
- LOGERROR("Error compressing data (%i)", errorcode );
- }
-
- a_ChunkData->m_CompressedSize = CompressedSize;
- a_ChunkData->m_UncompressedSize = TotalSize;
-
- if( DataSource != a_ChunkData->m_LiveChunk->pGetBlockData() )
- delete [] DataSource;
+ LOGERROR("cChunkMap: Cannot create new layer, server out of memory?");
+ return NULL;
}
+ m_Layers.push_back(Layer);
+ return Layer;
}
-cChunkMap::cChunkLayer* cChunkMap::GetLayerForChunk( int a_ChunkX, int a_ChunkZ )
+cChunkMap::cChunkLayer * cChunkMap::GetLayerForChunk( int a_ChunkX, int a_ChunkZ )
{
const int LayerX = (int)(floorf((float)a_ChunkX / (float)(LAYER_SIZE)));
const int LayerZ = (int)(floorf((float)a_ChunkZ / (float)(LAYER_SIZE)));
@@ -236,108 +93,43 @@ cChunkMap::cChunkLayer* cChunkMap::GetLayerForChunk( int a_ChunkX, int a_ChunkZ
-cChunkMap::cChunkLayer* cChunkMap::GetLayer( int a_LayerX, int a_LayerZ )
+cChunkPtr cChunkMap::GetChunk( int a_ChunkX, int a_ChunkY, int a_ChunkZ )
{
- // Find layer in memory
- for( int i = 0; i < m_NumLayers; ++i )
+ cCSLock Lock(m_CSLayers);
+ cChunkLayer * Layer = GetLayerForChunk( a_ChunkX, a_ChunkZ );
+ if (Layer == NULL)
{
- if( m_Layers[i].m_X == a_LayerX && m_Layers[i].m_Z == a_LayerZ )
- {
- return &m_Layers[i];
- }
+ // An error must have occurred, since layers are automatically created if they don't exist
+ return cChunkPtr();
}
-
- // Find layer on disk
- cChunkLayer* Layer = LoadLayer( a_LayerX, a_LayerZ );
- if( !Layer ) return 0;
- cChunkLayer* NewLayer = AddLayer( *Layer );
- delete Layer;
- return NewLayer;
+ cChunkPtr Chunk = Layer->GetChunk(a_ChunkX, a_ChunkZ);
+ if (!(Chunk->IsValid()))
+ {
+ m_World->GetStorage().QueueLoadChunk(Chunk);
+ }
+ return Chunk;
}
-cChunk* cChunkMap::GetChunk( int a_X, int a_Y, int a_Z )
+cChunkPtr cChunkMap::GetChunkNoGen( int a_ChunkX, int a_ChunkY, int a_ChunkZ )
{
- cChunkLayer* Layer = GetLayerForChunk( a_X, a_Z );
+ cCSLock Lock(m_CSLayers);
+ cChunkLayer * Layer = GetLayerForChunk( a_ChunkX, a_ChunkZ );
if (Layer == NULL)
{
- return NULL;
+ // An error must have occurred, since layers are automatically created if they don't exist
+ return cChunkPtr();
}
- cChunkData* Data = Layer->GetChunk( a_X, a_Z );
- if (Data->m_LiveChunk != NULL)
- {
- // Already loaded and alive
- return Data->m_LiveChunk;
- }
-
- // Do we at least have the compressed chunk?
- if (Data->m_Compressed == NULL)
- {
- return NULL;
- }
-
- // The chunk has been cached (loaded from file, but not decompressed):
- uLongf DestSize = Data->m_UncompressedSize;
- char* BlockData = new char[ DestSize ];
- int errorcode = uncompress( (Bytef*)BlockData, &DestSize, (Bytef*)Data->m_Compressed, Data->m_CompressedSize );
- if( Data->m_UncompressedSize != DestSize )
- {
- LOGWARN("Lulwtf, expected uncompressed size differs!");
- delete [] BlockData;
- }
- else if( errorcode != Z_OK )
- {
- LOGERROR("ERROR: Decompressing chunk data! %i", errorcode );
- switch( errorcode )
- {
- case Z_MEM_ERROR:
- LOGERROR("Not enough memory");
- break;
- case Z_BUF_ERROR:
- LOGERROR("Not enough room in output buffer");
- break;
- case Z_DATA_ERROR:
- LOGERROR("Input data corrupted or incomplete");
- break;
- default:
- break;
- };
-
- delete [] BlockData;
- }
- else
- {
- cChunk* Chunk = new cChunk(a_X, a_Y, a_Z, m_World);
- memcpy( Chunk->m_BlockData, BlockData, cChunk::c_BlockDataSize );
- Chunk->CalculateHeightmap();
- Data->m_LiveChunk = Chunk;
- Layer->m_NumChunksLoaded++;
-
- if( DestSize > cChunk::c_BlockDataSize ) // We gots some extra data :D
- {
- LOGINFO("Parsing trailing JSON");
- Json::Value root; // will contains the root value after parsing.
- Json::Reader reader;
- if( !reader.parse( BlockData + cChunk::c_BlockDataSize, root, false ) )
- {
- LOGERROR("Failed to parse trailing JSON!");
- }
- else
- {
- Chunk->LoadFromJson( root );
- }
- }
-
- delete [] BlockData;
- delete [] Data->m_Compressed; Data->m_Compressed = 0; Data->m_CompressedSize = 0;
- return Chunk;
- }
- return NULL;
+ cChunkPtr Chunk = Layer->GetChunk(a_ChunkX, a_ChunkZ);
+
+ // TODO: Load, but do not generate, if not valid
+
+ return Chunk;
}
@@ -346,17 +138,11 @@ cChunk* cChunkMap::GetChunk( int a_X, int a_Y, int a_Z )
void cChunkMap::Tick( float a_Dt, MTRand & a_TickRandom )
{
- for( int lay = 0; lay < m_NumLayers; ++lay )
+ cCSLock Lock(m_CSLayers);
+ for (cChunkLayerList::iterator itr = m_Layers.begin(); itr != m_Layers.end(); ++itr)
{
- for( int i = 0; i < LAYER_SIZE*LAYER_SIZE; ++i )
- {
- cChunk* Chunk = m_Layers[lay].m_Chunks[i].m_LiveChunk;
- if ( Chunk != NULL)
- {
- Chunk->Tick( a_Dt, a_TickRandom );
- }
- }
- } // for lay - m_Layers[]
+ (*itr)->Tick(a_Dt, a_TickRandom);
+ } // for itr - m_Layers
}
@@ -365,255 +151,125 @@ void cChunkMap::Tick( float a_Dt, MTRand & a_TickRandom )
void cChunkMap::UnloadUnusedChunks()
{
- cWorld* World = m_World;
- for( int l = 0; l < m_NumLayers; ++l )
+ cCSLock Lock(m_CSLayers);
+ for (cChunkLayerList::iterator itr = m_Layers.begin(); itr != m_Layers.end(); ++itr)
{
- cChunkLayer & Layer = m_Layers[l];
- for( int i = 0; i < LAYER_SIZE*LAYER_SIZE; ++i )
- {
- cChunk* Chunk = Layer.m_Chunks[i].m_LiveChunk;
- if( Chunk && Chunk->GetClients().size() == 0 && Chunk->GetReferenceCount() <= 0 )
- {
- //Chunk->SaveToDisk();
- World->RemoveSpread( ptr_cChunk( Chunk ) );
- RemoveChunk( Chunk );
- delete Chunk;
- }
- }
-
- // Unload layers
- if( Layer.m_NumChunksLoaded == 0 )
- {
- SaveLayer( &Layer );
- for( int i = 0; i < LAYER_SIZE*LAYER_SIZE; ++i ) // Free all chunk data for layer
- {
- delete [] Layer.m_Chunks[i].m_Compressed;
- delete Layer.m_Chunks[i].m_LiveChunk;
- }
- if( RemoveLayer( &Layer ) ) l--;
- }
- else if( Layer.m_NumChunksLoaded < 0 )
- {
- LOGERROR("WTF! Chunks loaded in layer is %i !!", Layer.m_NumChunksLoaded );
- }
- }
+ (*itr)->UnloadUnusedChunks();
+ } // for itr - m_Layers
}
-bool cChunkMap::RemoveEntityFromChunk( cEntity & a_Entity, cChunk* a_CalledFrom /* = 0 */ )
+void cChunkMap::SaveAllChunks(void)
{
- for( int i = 0; i < m_NumLayers; ++i )
+ cCSLock Lock(m_CSLayers);
+ for (cChunkLayerList::iterator itr = m_Layers.begin(); itr != m_Layers.end(); ++itr)
{
- cChunkLayer & Layer = m_Layers[i];
- for( int i = 0; i < LAYER_SIZE*LAYER_SIZE; ++i )
- {
- cChunk* Chunk = Layer.m_Chunks[i].m_LiveChunk;
- if( Chunk != a_CalledFrom )
- {
- if( Chunk && Chunk->RemoveEntity( a_Entity, a_CalledFrom ) )
- return true;
- }
- }
- }
-
- LOG("WARNING: Entity was not found in any chunk!");
- return false;
+ (*itr)->Save();
+ } // for itr - m_Layers[]
}
-void cChunkMap::SaveAllChunks()
+////////////////////////////////////////////////////////////////////////////////
+// cChunkMap::cChunkLayer:
+
+cChunkMap::cChunkLayer::cChunkLayer(int a_LayerX, int a_LayerZ, cChunkMap * a_Parent)
+ : m_LayerX( a_LayerX )
+ , m_LayerZ( a_LayerZ )
+ , m_Parent( a_Parent )
+ , m_NumChunksLoaded( 0 )
{
- for( int i = 0; i < m_NumLayers; ++i )
- {
- SaveLayer( &m_Layers[i] );
- }
}
-/********************************
- * Saving and loading
- **/
-
-void cChunkMap::SaveLayer( cChunkLayer* a_Layer )
+cChunkPtr cChunkMap::cChunkLayer::GetChunk( int a_ChunkX, int a_ChunkZ )
{
- std::string WorldName = m_World->GetName();
- cMakeDir::MakeDir( WorldName.c_str() );
-
- AString SourceFile;
- Printf(SourceFile, "%s/X%i_Z%i.pak", WorldName.c_str(), a_Layer->m_X, a_Layer->m_Z );
-
- cFile f;
- if (!f.Open(SourceFile, cFile::fmWrite))
+ // Always returns an assigned chunkptr, but the chunk needn't be valid (loaded / generated) - callers must check
+
+ const int LocalX = a_ChunkX - m_LayerX * LAYER_SIZE;
+ const int LocalZ = a_ChunkZ - m_LayerZ * LAYER_SIZE;
+
+
+ if (!((LocalX < LAYER_SIZE) && (LocalZ < LAYER_SIZE) && (LocalX > -1) && (LocalZ > -1)))
{
- LOGERROR("ERROR: Could not write to file %s", SourceFile.c_str());
- return;
+ assert(!"Asking a cChunkLayer for a chunk that doesn't belong to it!");
+ return cChunkPtr();
}
-
- //---------------
- // Header
- char PakVersion = 1;
- char ChunkVersion = 1;
- f.Write(&PakVersion, sizeof(PakVersion)); // pak version
- f.Write(&ChunkVersion, sizeof(ChunkVersion)); // chunk version
-
- // Count number of chunks in layer
- short NumChunks = 0;
- for( int i = 0; i < LAYER_SIZE*LAYER_SIZE; ++i )
+
+ int Index = LocalX + LocalZ * LAYER_SIZE;
+ if (m_Chunks[Index].get() == NULL)
{
- if( a_Layer->m_Chunks[i].m_Compressed || a_Layer->m_Chunks[i].m_LiveChunk )
- {
- NumChunks++;
- }
+ m_Chunks[Index].reset(new cChunk(a_ChunkX, 0, a_ChunkZ, m_Parent->GetWorld()));
}
+ return m_Chunks[Index];
+}
- f.Write(&NumChunks, sizeof(NumChunks));
- LOG("Num Chunks in layer [%d, %d]: %i", a_Layer->m_X, a_Layer->m_Z, NumChunks);
- // Chunk headers
- for (int z = 0; z < LAYER_SIZE; ++z)
- {
- for (int x = 0; x < LAYER_SIZE; ++x)
- {
- cChunkData & Data = a_Layer->m_Chunks[x + z * LAYER_SIZE];
- CompressChunk(&Data);
- if (Data.m_Compressed != NULL)
- {
- int ChunkX = a_Layer->m_X * LAYER_SIZE + x;
- int ChunkZ = a_Layer->m_Z * LAYER_SIZE + z;
- unsigned int Size = Data.m_CompressedSize; // Needs to be size of compressed data
- unsigned int USize = Data.m_UncompressedSize; // Uncompressed size
- f.Write(&ChunkX, sizeof(ChunkX));
- f.Write(&ChunkZ, sizeof(ChunkZ));
- f.Write(&Size, sizeof(Size));
- f.Write(&USize, sizeof(USize));
- }
- } // for x - a_Layer->mChunks[x]
- } // for z - a_Layer->m_Chunks[z]
-
- // Chunk data
- for (int i = 0; i < LAYER_SIZE*LAYER_SIZE; ++i)
+
+
+
+void cChunkMap::cChunkLayer::Tick(float a_Dt, MTRand & a_TickRand)
+{
+ for (int i = 0; i < ARRAYCOUNT(m_Chunks); i++)
{
- char * Compressed = a_Layer->m_Chunks[i].m_Compressed;
- if (Compressed != NULL)
+ if ((m_Chunks[i] != NULL) && (m_Chunks[i]->IsValid()))
{
- f.Write(Compressed, a_Layer->m_Chunks[i].m_CompressedSize);
- if (a_Layer->m_Chunks[i].m_LiveChunk != NULL) // If there's a live chunk we have no need for compressed data
- {
- delete [] a_Layer->m_Chunks[i].m_Compressed;
- a_Layer->m_Chunks[i].m_Compressed = 0;
- a_Layer->m_Chunks[i].m_CompressedSize = 0;
- }
+ m_Chunks[i]->Tick(a_Dt, a_TickRand);
}
- } // for i - a_Layer->m_Chunks[]
+ } // for i - m_Chunks[]
}
-#define READ(File, Var) \
- if (File.Read(&Var, sizeof(Var)) != sizeof(Var)) \
- { \
- LOGERROR("ERROR READING %s FROM FILE %s (line %d)", #Var, SourceFile.c_str(), __LINE__); \
- return NULL; \
- }
-
-cChunkMap::cChunkLayer* cChunkMap::LoadLayer(int a_LayerX, int a_LayerZ )
+void cChunkMap::cChunkLayer::Save(void)
{
- std::string WorldName = m_World->GetName();
-
- AString SourceFile;
- Printf(SourceFile, "%s/X%i_Z%i.pak", WorldName.c_str(), a_LayerX, a_LayerZ);
-
- cFile f(SourceFile, cFile::fmRead);
- if (!f.IsOpen())
+ cWorld * World = m_Parent->GetWorld();
+ for (int i = 0; i < ARRAYCOUNT(m_Chunks); ++i)
{
- return NULL;
- }
-
- char PakVersion = 0;
- char ChunkVersion = 0;
+ if ((m_Chunks[i] != NULL) && m_Chunks[i]->IsValid())
+ {
+ World->GetStorage().QueueSaveChunk(m_Chunks[i]);
+ }
+ } // for i - m_Chunks[]
+}
+
- READ(f, PakVersion);
- if (PakVersion != 1)
- {
- LOGERROR("WRONG PAK VERSION in file \"%s\"!", SourceFile.c_str());
- return NULL;
- }
-
- READ(f, ChunkVersion);
- if (ChunkVersion != 1 )
- {
- LOGERROR("WRONG CHUNK VERSION in file \"%s\"!", SourceFile.c_str());
- return NULL;
- }
- short NumChunks = 0;
- READ(f, NumChunks);
-
- LOG("Num chunks in file \"%s\": %i", SourceFile.c_str(), NumChunks);
- std::auto_ptr<cChunkLayer> Layer(new cChunkLayer(LAYER_SIZE * LAYER_SIZE)); // The auto_ptr deletes the Layer if we exit with an error
- Layer->m_X = a_LayerX;
- Layer->m_Z = a_LayerZ;
-
- cChunkData * OrderedData[LAYER_SIZE * LAYER_SIZE]; // So we can loop over the chunks in the order they were loaded
-
- // Loop over all chunk headers
- for( short i = 0; i < NumChunks; ++i )
- {
- int ChunkX = 0;
- int ChunkZ = 0;
- READ(f, ChunkX);
- READ(f, ChunkZ);
- cChunkData* Data = Layer->GetChunk( ChunkX, ChunkZ );
-
- if (Data == NULL)
- {
- LOGERROR("Chunk with wrong coordinates [%i, %i] in pak file \"%s\"!", ChunkX, ChunkZ, SourceFile.c_str());
- return NULL;
- }
- else
- {
- READ(f, Data->m_CompressedSize);
- READ(f, Data->m_UncompressedSize);
- }
- OrderedData[i] = Data;
- }
- // Loop over chunks again, in the order they were loaded, and load their compressed data
- for( short i = 0; i < NumChunks; ++i )
+void cChunkMap::cChunkLayer::UnloadUnusedChunks(void)
+{
+ for (int i = 0; i < ARRAYCOUNT(m_Chunks); i++)
{
- cChunkData* Data = OrderedData[i];
- Data->m_Compressed = new char[ Data->m_CompressedSize ];
- if (f.Read(Data->m_Compressed, Data->m_CompressedSize) != Data->m_CompressedSize)
+ if ((m_Chunks[i] != NULL) && (m_Chunks[i]->CanUnload()))
{
- LOGERROR("ERROR reading compressed data for chunk #%i from file \"%s\"", i, SourceFile.c_str());
- return NULL;
+ // TODO: Save the chunk if it was changed
+ m_Chunks[i].reset();
}
- }
- return Layer.release();
+ } // for i - m_Chunks[]
}
-int cChunkMap::GetNumChunks()
+int cChunkMap::GetNumChunks(void)
{
+ cCSLock Lock(m_CSLayers);
int NumChunks = 0;
- for( int i = 0; i < m_NumLayers; ++i )
+ for (cChunkLayerList::iterator itr = m_Layers.begin(); itr != m_Layers.end(); ++itr)
{
- NumChunks += m_Layers[i].m_NumChunksLoaded;
+ NumChunks += (*itr)->GetNumChunksLoaded();
}
return NumChunks;
}