15 #ifndef RAPIDJSON_ALLOCATORS_H_
16 #define RAPIDJSON_ALLOCATORS_H_
20 RAPIDJSON_NAMESPACE_BEGIN
63 #ifndef RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY
64 #define RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY (64 * 1024)
77 static const bool kNeedFree =
true;
78 void* Malloc(
size_t size) {
80 return std::malloc(size);
84 void* Realloc(
void* originalPtr,
size_t originalSize,
size_t newSize) {
87 std::free(originalPtr);
90 return std::realloc(originalPtr, newSize);
92 static void Free(
void *ptr) { std::free(ptr); }
114 template <
typename BaseAllocator = CrtAllocator>
117 static const bool kNeedFree =
false;
124 chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
138 MemoryPoolAllocator(
void *buffer,
size_t size,
size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) :
139 chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0)
143 chunkHead_ = reinterpret_cast<ChunkHeader*>(buffer);
144 chunkHead_->capacity = size -
sizeof(ChunkHeader);
145 chunkHead_->size = 0;
146 chunkHead_->next = 0;
159 while (chunkHead_ && chunkHead_ != userBuffer_) {
160 ChunkHeader* next = chunkHead_->next;
161 baseAllocator_->Free(chunkHead_);
164 if (chunkHead_ && chunkHead_ == userBuffer_)
165 chunkHead_->size = 0;
173 for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
174 capacity += c->capacity;
183 for (ChunkHeader* c = chunkHead_; c != 0; c = c->next)
194 if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity)
195 if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size))
198 void *buffer = reinterpret_cast<char *>(chunkHead_) +
RAPIDJSON_ALIGN(
sizeof(ChunkHeader)) + chunkHead_->size;
199 chunkHead_->size += size;
204 void*
Realloc(
void* originalPtr,
size_t originalSize,
size_t newSize) {
205 if (originalPtr == 0)
206 return Malloc(newSize);
215 if (originalSize >= newSize)
219 if (originalPtr == reinterpret_cast<char *>(chunkHead_) +
RAPIDJSON_ALIGN(
sizeof(ChunkHeader)) + chunkHead_->size - originalSize) {
220 size_t increment = static_cast<size_t>(newSize - originalSize);
221 if (chunkHead_->size + increment <= chunkHead_->capacity) {
222 chunkHead_->size += increment;
228 if (
void* newBuffer = Malloc(newSize)) {
230 std::memcpy(newBuffer, originalPtr, originalSize);
238 static void Free(
void *ptr) { (void)ptr; }
250 bool AddChunk(
size_t capacity) {
252 ownBaseAllocator_ = baseAllocator_ =
RAPIDJSON_NEW(BaseAllocator)();
253 if (ChunkHeader* chunk = reinterpret_cast<ChunkHeader*>(baseAllocator_->Malloc(
RAPIDJSON_ALIGN(
sizeof(ChunkHeader)) + capacity))) {
254 chunk->capacity = capacity;
256 chunk->next = chunkHead_;
275 ChunkHeader *chunkHead_;
276 size_t chunk_capacity_;
278 BaseAllocator* baseAllocator_;
279 BaseAllocator* ownBaseAllocator_;
282 RAPIDJSON_NAMESPACE_END
284 #endif // RAPIDJSON_ENCODINGS_H_