这是我将云风书中的内存分配器用模板方式实现的,锁是windows的,换成Linux的很容易。可以单独使用CSmallObject_Allocator作为内存分配器,也可以让你的类CA继承SSmall_Alloc,这样“new CA”就会调用该内存分配器来分配内存,很方便。
inline size_t _min(size_t a, size_t b){ return a<b?a:b;}
template< bool bThreadSafe = false >class CSmallObject_Allocator{public: enum{ chunk_limit=16384, max_number=64, align_size=8, chunk_number=chunk_limit/align_size, };private: struct memory_list { memory_list * _next; }; struct chunk_list { chunk_list* _next; memory_list* _data; }; memory_list* _free_list[chunk_number]; long _guard[chunk_number];
chunk_list* _chunk_list; long _chunk_guard;
// CRITICAL_SECTION allocator_lock; // 多线锁#define __SObj_ALLOC_LOCK EnterCriticalSection( &allocator_lock );#define __SObj_ALLOC_UNLOCK LeaveCriticalSection( &allocator_lock );
#define __SObj_ALLOC_INIT InitializeCriticalSection( &allocator_lock );#define __SObj_ALLOC_UNINIT DeleteCriticalSection( &allocator_lock ); //
static CSmallObject_Allocator* _instance; static long _singleton_guard;
static bool _singleton_destroyed; static void create_instance() { //thread_guard guard(&_singleton_guard); if (_instance) return; MY_ASSERT(!_singleton_destroyed); static CSmallObject_Allocator obj; _instance = &obj; }; static size_t chunk_index(size_t bytes) { size_t idx=(bytes-1)/align_size; MY_ASSERT(idx>=0&&idx<chunk_number); return idx; }
CSmallObject_Allocator() { __SObj_ALLOC_INIT _chunk_list=0; _chunk_guard=0; ::memset(_free_list, 0, sizeof(_free_list)); ::memset(_guard, 0, sizeof(_guard)); }; memory_list* alloc_chunk(size_t idx) { const size_t node_size = (idx+1)*align_size; const size_t chunk_size = _min(chunk_limit/node_size*node_size,node_size*max_number);
//thread_guard guard(&_chunk_guard); if ( bThreadSafe ) __SObj_ALLOC_LOCK
memory_list* ¤t_list = _free_list[idx]; if (current_list) return current_list; memory_list *ret=current_list=reinterpret_cast <memory_list*>(::malloc(chunk_size)); memory_list *iter=ret; for(size_t i=0;i<=chunk_size-node_size*2;i+=node_size) { iter=iter->_next=iter+(idx+1)*align_size/sizeof(*iter); } iter->_next=0;
if ( bThreadSafe ) __SObj_ALLOC_UNLOCK
return ret; };public: ~CSmallObject_Allocator() { int s=0; chunk_list *temp = _chunk_list; while(temp) { ++s; temp=temp->_next; }
void **chunk=reinterpret_cast<void**>(malloc(s*sizeof (void*))); temp=_chunk_list; int i=0; while(temp) { chunk[i]=temp->_data; ++i; temp=temp->_next; } for(i=0;i<s;++i) { ::free(chunk[i]); } ::free(chunk); _singleton_destroyed=true; _instance=0; __SObj_ALLOC_UNINIT }; static CSmallObject_Allocator& instance() { if(!_instance) { create_instance(); } return *_instance; } void* allocate(size_t size) {
size_t idx=chunk_index(size); MY_ASSERT(idx<chunk_number);
//thread_guard guard(&_guard[idx]); if ( bThreadSafe ) __SObj_ALLOC_LOCK
memory_list*& temp = _free_list[idx];
if(!temp) { memory_list* new_chunk = alloc_chunk(idx);
chunk_list *chunk_node; if (chunk_index(sizeof(chunk_list))==idx) { chunk_node = reinterpret_cast<chunk_list*>(temp); temp=temp->_next; } else { chunk_node = reinterpret_cast<chunk_list*> (allocate(sizeof(chunk_list))); }
//thread_guard guard(&_chunk_guard); chunk_node->_next = _chunk_list; chunk_node->_data = new_chunk; _chunk_list = chunk_node; }
void *ret = temp; temp = temp->_next;
if ( bThreadSafe ) __SObj_ALLOC_UNLOCK
return ret; }; void deallocate(void*p, size_t size) { size_t idx = chunk_index(size); MY_ASSERT(idx<chunk_number);
memory_list* free_block = reinterpret_cast<memory_list*>(p);
//thread_guard guard(&_guard[idx]); if ( bThreadSafe ) __SObj_ALLOC_LOCK
memory_list*& temp=_free_list[idx]; free_block->_next = temp; temp=free_block;
if ( bThreadSafe ) __SObj_ALLOC_UNLOCK };};//
struct SSmall_Alloc{ virtual ~SSmall_Alloc() {}
static void* operator new(size_t size) { return CSmallObject_Allocator<false>::instance().allocate(size); }
static void operator delete (void *p,size_t size){ CSmallObject_Allocator<false>::instance().deallocate(p,size); }};