summaryrefslogtreecommitdiffstats
path: root/source/cChunkGenerator.cpp
blob: f6a16342cbff9a127f6e9b9cec611de7b9073eaf (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
#include "MemoryLeak.h"
#include "cChunkGenerator.h"
#include "cChunkMap.h"
#include "cChunk.h"
#include "cWorld.h"

#include "cThread.h"
#include "cCriticalSection.h"
#include "cSemaphore.h"

#include "cMCLogger.h"

#include <list>

typedef std::pair<int, int> ChunkCoord;
typedef std::list< ChunkCoord > ChunkCoordList;

#define MAX_SEMAPHORES 1000

struct cChunkGenerator::sChunkGeneratorState
{
	sChunkGeneratorState()
		: pCriticalSection( 0 )
		, pSemaphore( 0 )
		, pThread( 0 )
		, bStop( false )
		, bCurrentlyGenerating( false )
		, pCurrentlyGenerating( false )
		, pChunkCriticalSection( 0 )
	{}
	ChunkCoordList GenerateQueue;			// Protected by pCriticalSection
	ChunkCoord CurrentlyGeneratingCoords;	// Protected by pCriticalSection
	cChunk* pCurrentlyGenerating;			// Protected by pCriticalSection
	bool bCurrentlyGenerating;				// Protected by pCriticalSection

	cCriticalSection* pCriticalSection;
	cSemaphore* pSemaphore;
	cThread* pThread;

	cCriticalSection* pChunkCriticalSection;// Specially for protecting the actual chunk that is currently being generated, and not just the variables in this struct

	bool bStop;
};

cChunkGenerator::cChunkGenerator( cChunkMap* a_pChunkMap )
	: m_pState( new sChunkGeneratorState )
	, m_pChunkMap( a_pChunkMap )
{
	m_pState->pCriticalSection = new cCriticalSection();
	m_pState->pSemaphore = new cSemaphore( MAX_SEMAPHORES, 0 );

	m_pState->pChunkCriticalSection = new cCriticalSection();

	m_pState->pThread = new cThread( GenerateThread, this, "cChunkGenerator::GenerateThread" );
	m_pState->pThread->Start( true );
}

cChunkGenerator::~cChunkGenerator()
{
	m_pState->bStop = true;

	m_pState->pSemaphore->Signal(); // Signal so thread can continue and exit
	delete m_pState->pThread;

	delete m_pState->pSemaphore;
	delete m_pState->pCriticalSection;
	delete m_pState->pChunkCriticalSection;
	delete m_pState; m_pState = 0;
}

void cChunkGenerator::GenerateChunk( int a_X, int a_Z )
{
	m_pState->pCriticalSection->Lock();

	if( m_pState->bCurrentlyGenerating )
	{
		if( m_pState->CurrentlyGeneratingCoords.first == a_X && m_pState->CurrentlyGeneratingCoords.second == a_Z )
		{
			m_pState->pCriticalSection->Unlock();
			return;	// Already generating this chunk, so ignore
		}
	}

	int SizeBefore = m_pState->GenerateQueue.size();

	m_pState->GenerateQueue.remove( ChunkCoord(a_X, a_Z) );
	if( m_pState->GenerateQueue.size() >= MAX_SEMAPHORES )
	{
		LOGWARN("WARNING: Can't add chunk (%i, %i) to generation queue: Queue is too big! (%i)", a_X, a_Z, m_pState->GenerateQueue.size() );
		m_pState->pCriticalSection->Unlock();
		return;
	}
	m_pState->GenerateQueue.push_back( ChunkCoord(a_X, a_Z) );
	int SizeAfter = m_pState->GenerateQueue.size();

	m_pState->pCriticalSection->Unlock();
	if( SizeBefore < SizeAfter )
		m_pState->pSemaphore->Signal();
}

void cChunkGenerator::GenerateThread( void* a_Params )
{
	// Cache some values for easy access (they are all references/pointers)
	cChunkGenerator* self = (cChunkGenerator*)a_Params;
	sChunkGeneratorState* m_pState = self->m_pState;
	ChunkCoordList& GenerateQueue = m_pState->GenerateQueue;
	cChunkMap& ChunkMap = *self->m_pChunkMap;
	cCriticalSection& CriticalSection = *m_pState->pCriticalSection;
	cSemaphore& Semaphore = *m_pState->pSemaphore;

	while( !m_pState->bStop )
	{
		Semaphore.Wait();
		if( m_pState->bStop ) break;

		CriticalSection.Lock();
		if( GenerateQueue.size() == 0 )
		{
			if( !m_pState->bStop ) LOGERROR("ERROR: Semaphore was signaled while GenerateQueue.size == 0");
			CriticalSection.Unlock();
			continue;
		}
		ChunkCoord coord = *GenerateQueue.begin();		// Get next coord from queue
		GenerateQueue.erase( GenerateQueue.begin() );	// Remove coordinate from queue
		m_pState->bCurrentlyGenerating = true;
		m_pState->CurrentlyGeneratingCoords = coord;
		CriticalSection.Unlock();			// Unlock ASAP

		ChunkMap.GetWorld()->LockChunks();
		if( ChunkMap.GetChunk( coord.first, 0, coord.second ) ) // Make sure it has not been loaded in the meantime. Don't want to generate the same chunk twice
		{														// This is possible when forcing the server to generate a chunk in the main thread
			ChunkMap.GetWorld()->UnlockChunks();
			continue;
		}
		ChunkMap.GetWorld()->UnlockChunks();

		LOGINFO("cChunkGenerator generating chunk %i %i", coord.first, coord.second );
		cChunk* Chunk = new cChunk( coord.first, 0, coord.second, ChunkMap.GetWorld() );

		CriticalSection.Lock();
		m_pState->pCurrentlyGenerating = Chunk;
		CriticalSection.Unlock();

		self->Lock(); // Protect the actual chunk
		Chunk->Initialize(); // Generate the chunk
		self->Unlock();

		ChunkMap.GetWorld()->LockChunks();
		ChunkMap.AddChunk( Chunk );
		ChunkMap.GetWorld()->UnlockChunks();

		CriticalSection.Lock();
		m_pState->bCurrentlyGenerating = false;
		m_pState->pCurrentlyGenerating = 0;
		CriticalSection.Unlock();
	}
}

cChunk* cChunkGenerator::GetCurrentlyGenerating()
{
	return m_pState->pCurrentlyGenerating;
}

void cChunkGenerator::Lock()
{
	m_pState->pChunkCriticalSection->Lock();
}

void cChunkGenerator::Unlock()
{
	m_pState->pChunkCriticalSection->Unlock();
}