3 * Copyright (c) 1996,1997
4 * Silicon Graphics Computer Systems, Inc.
7 * Moscow Center for SPARC Technology
12 * This material is provided "as is", with absolutely no warranty expressed
13 * or implied. Any use is at your own risk.
15 * Permission to use or copy this software for any purpose is hereby granted
16 * without fee, provided the above notices are retained on all copies.
17 * Permission to modify the code and to distribute modified code is granted,
18 * provided the above notices are retained, and a notice that the code was
19 * modified is included with the above copyright notice.
23 #include "stlport_prefix.h"
27 #if defined (__GNUC__) && (defined (__CYGWIN__) || defined (__MINGW32__))
29 //# define _STLP_MALLOC_USABLE_SIZE(__buf) malloc_usable_size(__buf)
32 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
33 # include <pthread_alloc>
37 #include <stl/_threads.h>
39 #include "lock_free_slist.h"
41 #if defined (__WATCOMC__)
43 # pragma warning 367 9
44 # pragma warning 368 9
47 #if defined (_STLP_SGI_THREADS)
48 // We test whether threads are in use before locking.
49 // Perhaps this should be moved into stl_threads.h, but that
50 // probably makes it harder to avoid the procedure call when
53 extern int __us_rsthread_malloc;
57 // Specialised debug form of malloc which does not provide "false"
58 // memory leaks when run with debug CRT libraries.
59 #if defined (_STLP_MSVC) && (_STLP_MSVC >= 1020 && defined (_STLP_DEBUG_ALLOC)) && !defined (_STLP_WCE)
61 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
62 inline void __stlp_chunck_free(void* __p) { _free_dbg(__p, _CRT_BLOCK); }
64 # ifdef _STLP_NODE_ALLOC_USE_MALLOC
66 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
67 inline void __stlp_chunck_free(void* __p) { _STLP_VENDOR_CSTD::free(__p); }
69 inline void* __stlp_chunk_malloc(size_t __bytes) { return _STLP_STD::__stl_new(__bytes); }
70 inline void __stlp_chunck_free(void* __p) { _STLP_STD::__stl_delete(__p); }
74 #define _S_FREELIST_INDEX(__bytes) ((__bytes - size_t(1)) >> (int)_ALIGN_SHIFT)
78 class __malloc_alloc_impl {
80 static void* _S_oom_malloc(size_t __n) {
81 __oom_handler_type __my_malloc_handler;
85 __my_malloc_handler = __oom_handler;
86 if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
87 (*__my_malloc_handler)();
88 __result = malloc(__n);
89 if (__result) return(__result);
91 #if defined (_STLP_NEED_UNREACHABLE_RETURN)
95 static __oom_handler_type __oom_handler;
97 // this one is needed for proper simple_alloc wrapping
98 typedef char value_type;
99 static void* allocate(size_t& __n) {
100 void* __result = malloc(__n);
102 __result = _S_oom_malloc(__n);
104 #if defined (_STLP_MALLOC_USABLE_SIZE)
106 size_t __new_n = _STLP_MALLOC_USABLE_SIZE(__result);
108 if (__n != __new_n) {
109 printf("requested size %d, usable %d\n", __n, __new_n);
117 static void deallocate(void* __p, size_t /* __n */) { free((char*)__p); }
118 static __oom_handler_type set_malloc_handler(__oom_handler_type __f) {
119 __oom_handler_type __old = __oom_handler;
125 // malloc_alloc out-of-memory handling
126 __oom_handler_type __malloc_alloc_impl::__oom_handler = __STATIC_CAST(__oom_handler_type, 0);
128 void* _STLP_CALL __malloc_alloc::allocate(size_t& __n)
129 { return __malloc_alloc_impl::allocate(__n); }
130 __oom_handler_type _STLP_CALL __malloc_alloc::set_malloc_handler(__oom_handler_type __f)
131 { return __malloc_alloc_impl::set_malloc_handler(__f); }
133 // *******************************************************
134 // Default node allocator.
135 // With a reasonable compiler, this should be roughly as fast as the
136 // original STL class-specific allocators, but with less fragmentation.
138 // Important implementation properties:
139 // 1. If the client request an object of size > _MAX_BYTES, the resulting
140 // object will be obtained directly from malloc.
141 // 2. In all other cases, we allocate an object of size exactly
142 // _S_round_up(requested_size). Thus the client has enough size
143 // information that we can return the object to the proper free list
144 // without permanently losing part of the object.
147 #define _STLP_NFREELISTS 16
149 #if defined (_STLP_LEAKS_PEDANTIC) && defined (_STLP_USE_DYNAMIC_LIB)
151 * We can only do cleanup of the node allocator memory pool if we are
152 * sure that the STLport library is used as a shared one as it guaranties
153 * the unicity of the node allocator instance. Without that guaranty node
154 * allocator instances might exchange memory blocks making the implementation
155 * of a cleaning process much more complicated.
157 # define _STLP_DO_CLEAN_NODE_ALLOC
160 /* When STLport is used without multi threaded safety we use the node allocator
161 * implementation with locks as locks becomes no-op. The lock free implementation
162 * always use system specific atomic operations which are slower than 'normal'
165 #if defined (_STLP_THREADS) && \
166 defined (_STLP_HAS_ATOMIC_FREELIST) && defined (_STLP_ATOMIC_ADD)
168 * We have an implementation of the atomic freelist (_STLP_atomic_freelist)
169 * for this architecture and compiler. That means we can use the non-blocking
170 * implementation of the node-allocation engine.*/
171 # define _STLP_USE_LOCK_FREE_IMPLEMENTATION
174 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
175 # if defined (_STLP_THREADS)
177 class _Node_Alloc_Lock {
180 # if defined (_STLP_SGI_THREADS)
181 if (__us_rsthread_malloc)
183 _S_lock._M_acquire_lock();
186 ~_Node_Alloc_Lock() {
187 # if defined (_STLP_SGI_THREADS)
188 if (__us_rsthread_malloc)
190 _S_lock._M_release_lock();
193 static _STLP_STATIC_MUTEX _S_lock;
196 _STLP_STATIC_MUTEX _Node_Alloc_Lock::_S_lock _STLP_MUTEX_INITIALIZER;
199 class _Node_Alloc_Lock {
201 _Node_Alloc_Lock() { }
202 ~_Node_Alloc_Lock() { }
207 struct _Node_alloc_obj {
208 _Node_alloc_obj * _M_next;
212 class __node_alloc_impl {
214 static inline size_t _STLP_CALL _S_round_up(size_t __bytes)
215 { return (((__bytes) + (size_t)_ALIGN-1) & ~((size_t)_ALIGN - 1)); }
217 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
218 typedef _STLP_atomic_freelist::item _Obj;
219 typedef _STLP_atomic_freelist _Freelist;
220 typedef _STLP_atomic_freelist _ChunkList;
222 // Header of blocks of memory that have been allocated as part of
223 // a larger chunk but have not yet been chopped up into nodes.
224 struct _FreeBlockHeader : public _STLP_atomic_freelist::item {
225 char* _M_end; // pointer to end of free memory
228 typedef _Node_alloc_obj _Obj;
229 typedef _Obj* _STLP_VOLATILE _Freelist;
230 typedef _Obj* _ChunkList;
234 // Returns an object of size __n, and optionally adds to size __n free list.
235 static _Obj* _S_refill(size_t __n);
236 // Allocates a chunk for nobjs of size __p_size. nobjs may be reduced
237 // if it is inconvenient to allocate the requested number.
238 static char* _S_chunk_alloc(size_t __p_size, int& __nobjs);
239 // Chunk allocation state.
240 static _Freelist _S_free_list[_STLP_NFREELISTS];
241 // Amount of total allocated memory
242 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
243 static _STLP_VOLATILE __stl_atomic_t _S_heap_size;
245 static size_t _S_heap_size;
248 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
249 // List of blocks of free memory
250 static _STLP_atomic_freelist _S_free_mem_blocks;
252 // Start of the current free memory buffer
253 static char* _S_start_free;
254 // End of the current free memory buffer
255 static char* _S_end_free;
258 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
260 // Methods to report alloc/dealloc calls to the counter system.
261 # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
262 typedef _STLP_VOLATILE __stl_atomic_t _AllocCounter;
264 typedef __stl_atomic_t _AllocCounter;
266 static _AllocCounter& _STLP_CALL _S_alloc_counter();
267 static void _S_alloc_call();
268 static void _S_dealloc_call();
271 // Free all the allocated chuncks of memory
272 static void _S_chunk_dealloc();
273 // Beginning of the linked list of allocated chunks of memory
274 static _ChunkList _S_chunks;
275 #endif /* _STLP_DO_CLEAN_NODE_ALLOC */
278 /* __n must be > 0 */
279 static void* _M_allocate(size_t& __n);
280 /* __p may not be 0 */
281 static void _M_deallocate(void *__p, size_t __n);
284 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
285 void* __node_alloc_impl::_M_allocate(size_t& __n) {
286 __n = _S_round_up(__n);
287 _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
290 // Acquire the lock here with a constructor call.
291 // This ensures that it is released in exit or during stack
293 _Node_Alloc_Lock __lock_instance;
295 if ( (__r = *__my_free_list) != 0 ) {
296 *__my_free_list = __r->_M_next;
298 __r = _S_refill(__n);
300 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
303 // lock is released here
307 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
308 _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
309 _Obj * __pobj = __STATIC_CAST(_Obj*, __p);
312 _Node_Alloc_Lock __lock_instance;
313 __pobj->_M_next = *__my_free_list;
314 *__my_free_list = __pobj;
316 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
319 // lock is released here
322 /* We allocate memory in large chunks in order to avoid fragmenting */
323 /* the malloc heap too much. */
324 /* We assume that size is properly aligned. */
325 /* We hold the allocation lock. */
326 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
328 size_t __total_bytes = _p_size * __nobjs;
329 size_t __bytes_left = _S_end_free - _S_start_free;
331 if (__bytes_left > 0) {
332 if (__bytes_left >= __total_bytes) {
333 __result = _S_start_free;
334 _S_start_free += __total_bytes;
338 if (__bytes_left >= _p_size) {
339 __nobjs = (int)(__bytes_left / _p_size);
340 __total_bytes = _p_size * __nobjs;
341 __result = _S_start_free;
342 _S_start_free += __total_bytes;
346 // Try to make use of the left-over piece.
347 _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__bytes_left);
348 __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = *__my_free_list;
349 *__my_free_list = __REINTERPRET_CAST(_Obj*, _S_start_free);
352 size_t __bytes_to_get =
353 2 * __total_bytes + _S_round_up(_S_heap_size >> 4)
354 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
359 _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
360 if (0 == _S_start_free) {
361 _Obj* _STLP_VOLATILE* __my_free_list;
363 // Try to do with what we have. That can't hurt.
364 // We do not try smaller requests, since that tends
365 // to result in disaster on multi-process machines.
366 for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
367 __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
368 __p = *__my_free_list;
370 *__my_free_list = __p -> _M_next;
371 _S_start_free = __REINTERPRET_CAST(char*, __p);
372 _S_end_free = _S_start_free + __i;
373 return _S_chunk_alloc(_p_size, __nobjs);
374 // Any leftover piece will eventually make it to the
378 _S_end_free = 0; // In case of exception.
379 _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
381 (char*)malloc_alloc::allocate(__bytes_to_get);
384 // This should either throw an
385 // exception or remedy the situation. Thus we assume it
389 _S_heap_size += __bytes_to_get;
390 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
391 __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = _S_chunks;
392 _S_chunks = __REINTERPRET_CAST(_Obj*, _S_start_free);
394 _S_end_free = _S_start_free + __bytes_to_get;
395 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
396 _S_start_free += sizeof(_Obj);
398 return _S_chunk_alloc(_p_size, __nobjs);
401 /* Returns an object of size __n, and optionally adds to size __n free list.*/
402 /* We assume that __n is properly aligned. */
403 /* We hold the allocation lock. */
404 _Node_alloc_obj* __node_alloc_impl::_S_refill(size_t __n) {
406 char* __chunk = _S_chunk_alloc(__n, __nobjs);
408 if (1 == __nobjs) return __REINTERPRET_CAST(_Obj*, __chunk);
410 _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
415 /* Build free list in chunk */
416 __result = __REINTERPRET_CAST(_Obj*, __chunk);
417 *__my_free_list = __next_obj = __REINTERPRET_CAST(_Obj*, __chunk + __n);
418 for (--__nobjs; --__nobjs; ) {
419 __current_obj = __next_obj;
420 __next_obj = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __next_obj) + __n);
421 __current_obj->_M_next = __next_obj;
423 __next_obj->_M_next = 0;
427 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
428 void __node_alloc_impl::_S_alloc_call()
429 { ++_S_alloc_counter(); }
431 void __node_alloc_impl::_S_dealloc_call() {
432 __stl_atomic_t &counter = _S_alloc_counter();
434 { _S_chunk_dealloc(); }
437 /* We deallocate all the memory chunks */
438 void __node_alloc_impl::_S_chunk_dealloc() {
439 _Obj *__pcur = _S_chunks, *__pnext;
440 while (__pcur != 0) {
441 __pnext = __pcur->_M_next;
442 __stlp_chunck_free(__pcur);
446 _S_start_free = _S_end_free = 0;
448 memset(__REINTERPRET_CAST(char*, &_S_free_list[0]), 0, _STLP_NFREELISTS * sizeof(_Obj*));
450 # endif /* _STLP_DO_CLEAN_NODE_ALLOC */
452 #else /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
454 void* __node_alloc_impl::_M_allocate(size_t& __n) {
455 __n = _S_round_up(__n);
456 _Obj* __r = _S_free_list[_S_FREELIST_INDEX(__n)].pop();
458 { __r = _S_refill(__n); }
460 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
466 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
467 _S_free_list[_S_FREELIST_INDEX(__n)].push(__STATIC_CAST(_Obj*, __p));
469 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
474 /* Returns an object of size __n, and optionally adds additional ones to */
475 /* freelist of objects of size __n. */
476 /* We assume that __n is properly aligned. */
477 __node_alloc_impl::_Obj* __node_alloc_impl::_S_refill(size_t __n) {
479 char* __chunk = _S_chunk_alloc(__n, __nobjs);
482 return __REINTERPRET_CAST(_Obj*, __chunk);
484 // Push all new nodes (minus first one) onto freelist
485 _Obj* __result = __REINTERPRET_CAST(_Obj*, __chunk);
486 _Obj* __cur_item = __result;
487 _Freelist* __my_freelist = _S_free_list + _S_FREELIST_INDEX(__n);
488 for (--__nobjs; __nobjs != 0; --__nobjs) {
489 __cur_item = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __cur_item) + __n);
490 __my_freelist->push(__cur_item);
495 /* We allocate memory in large chunks in order to avoid fragmenting */
496 /* the malloc heap too much. */
497 /* We assume that size is properly aligned. */
498 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
499 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
500 //We are going to add a small memory block to keep all the allocated blocks
501 //address, we need to do so respecting the memory alignment. The following
502 //static assert checks that the reserved block is big enough to store a pointer.
503 _STLP_STATIC_ASSERT(sizeof(_Obj) <= _ALIGN)
506 __stl_atomic_t __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
508 _FreeBlockHeader* __block = __STATIC_CAST(_FreeBlockHeader*, _S_free_mem_blocks.pop());
510 // We checked a block out and can now mess with it with impugnity.
511 // We'll put the remainder back into the list if we're done with it below.
512 char* __buf_start = __REINTERPRET_CAST(char*, __block);
513 __stl_atomic_t __bytes_left = __block->_M_end - __buf_start;
515 if ((__bytes_left < __total_bytes) && (__bytes_left >= __STATIC_CAST(__stl_atomic_t, _p_size))) {
516 // There's enough left for at least one object, but not as much as we wanted
517 __result = __buf_start;
518 __nobjs = (int)(__bytes_left/_p_size);
519 __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
520 __bytes_left -= __total_bytes;
521 __buf_start += __total_bytes;
523 else if (__bytes_left >= __total_bytes) {
524 // The block has enough left to satisfy all that was asked for
525 __result = __buf_start;
526 __bytes_left -= __total_bytes;
527 __buf_start += __total_bytes;
530 if (__bytes_left != 0) {
531 // There is still some memory left over in block after we satisfied our request.
532 if ((__result != 0) && (__bytes_left >= sizeof(_FreeBlockHeader))) {
533 // We were able to allocate at least one object and there is still enough
534 // left to put remainder back into list.
535 _FreeBlockHeader* __newblock = __REINTERPRET_CAST(_FreeBlockHeader*, __buf_start);
536 __newblock->_M_end = __block->_M_end;
537 _S_free_mem_blocks.push(__newblock);
540 // We were not able to allocate enough for at least one object.
541 // Shove into freelist of nearest (rounded-down!) size.
542 size_t __rounded_down = _S_round_up(__bytes_left + 1) - (size_t)_ALIGN;
543 if (__rounded_down > 0)
544 _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push((_Obj*)__buf_start);
551 // We couldn't satisfy it from the list of free blocks, get new memory.
552 __stl_atomic_t __bytes_to_get = 2 * __total_bytes + __STATIC_CAST(__stl_atomic_t, _S_round_up(_S_heap_size >> 4))
553 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
558 __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
560 _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
563 // Allocation failed; try to canibalize from freelist of a larger object size.
564 for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
565 _Obj* __p = _S_free_list[_S_FREELIST_INDEX(__i)].pop();
567 if (__i < sizeof(_FreeBlockHeader)) {
568 // Not enough to put into list of free blocks, divvy it up here.
569 // Use as much as possible for this request and shove remainder into freelist.
570 __nobjs = (int)(__i/_p_size);
571 __total_bytes = __nobjs * __STATIC_CAST(__stl_atomic_t, _p_size);
572 size_t __bytes_left = __i - __total_bytes;
573 size_t __rounded_down = _S_round_up(__bytes_left+1) - (size_t)_ALIGN;
574 if (__rounded_down > 0) {
575 _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push(__REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __p) + __total_bytes));
577 return __REINTERPRET_CAST(char*, __p);
580 // Add node to list of available blocks and recursively allocate from it.
581 _FreeBlockHeader* __newblock = (_FreeBlockHeader*)__p;
582 __newblock->_M_end = __REINTERPRET_CAST(char*, __p) + __i;
583 _S_free_mem_blocks.push(__newblock);
584 return _S_chunk_alloc(_p_size, __nobjs);
589 // We were not able to find something in a freelist, try to allocate a smaller amount.
590 __bytes_to_get = __total_bytes
591 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
595 __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
597 _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
599 // This should either throw an exception or remedy the situation.
600 // Thus we assume it succeeded.
603 _STLP_ATOMIC_ADD(&_S_heap_size, __bytes_to_get);
605 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
606 // We have to track the allocated memory chunks for release on exit.
607 _S_chunks.push(__REINTERPRET_CAST(_Obj*, __result));
609 __bytes_to_get -= _ALIGN;
612 if (__bytes_to_get > __total_bytes) {
613 // Push excess memory allocated in this chunk into list of free memory blocks
614 _FreeBlockHeader* __freeblock = __REINTERPRET_CAST(_FreeBlockHeader*, __result + __total_bytes);
615 __freeblock->_M_end = __result + __bytes_to_get;
616 _S_free_mem_blocks.push(__freeblock);
621 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
622 void __node_alloc_impl::_S_alloc_call()
623 { _STLP_ATOMIC_INCREMENT(&_S_alloc_counter()); }
625 void __node_alloc_impl::_S_dealloc_call() {
626 _STLP_VOLATILE __stl_atomic_t *pcounter = &_S_alloc_counter();
627 if (_STLP_ATOMIC_DECREMENT(pcounter) == 0)
631 /* We deallocate all the memory chunks */
632 void __node_alloc_impl::_S_chunk_dealloc() {
633 // Note: The _Node_alloc_helper class ensures that this function
634 // will only be called when the (shared) library is unloaded or the
635 // process is shutdown. It's thus not possible that another thread
636 // is currently trying to allocate a node (we're not thread-safe here).
639 // Clear the free blocks and all freelistst. This makes sure that if
640 // for some reason more memory is allocated again during shutdown
641 // (it'd also be really nasty to leave references to deallocated memory).
642 _S_free_mem_blocks.clear();
645 for (size_t __i = 0; __i < _STLP_NFREELISTS; ++__i) {
646 _S_free_list[__i].clear();
649 // Detach list of chunks and free them all
650 _Obj* __chunk = _S_chunks.clear();
651 while (__chunk != 0) {
652 _Obj* __next = __chunk->_M_next;
653 __stlp_chunck_free(__chunk);
657 # endif /* _STLP_DO_CLEAN_NODE_ALLOC */
659 #endif /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
661 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
662 struct __node_alloc_cleaner {
663 ~__node_alloc_cleaner()
664 { __node_alloc_impl::_S_dealloc_call(); }
667 # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
668 _STLP_VOLATILE __stl_atomic_t& _STLP_CALL
670 __stl_atomic_t& _STLP_CALL
672 __node_alloc_impl::_S_alloc_counter() {
673 static _AllocCounter _S_counter = 1;
674 static __node_alloc_cleaner _S_node_alloc_cleaner;
679 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
680 _Node_alloc_obj * _STLP_VOLATILE
681 __node_alloc_impl::_S_free_list[_STLP_NFREELISTS]
682 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
683 // The 16 zeros are necessary to make version 4.1 of the SunPro
684 // compiler happy. Otherwise it appears to allocate too little
685 // space for the array.
687 _STLP_atomic_freelist __node_alloc_impl::_S_free_list[_STLP_NFREELISTS];
688 _STLP_atomic_freelist __node_alloc_impl::_S_free_mem_blocks;
691 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
692 char *__node_alloc_impl::_S_start_free = 0;
693 char *__node_alloc_impl::_S_end_free = 0;
696 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
697 _STLP_VOLATILE __stl_atomic_t
701 __node_alloc_impl::_S_heap_size = 0;
703 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
704 # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
705 _STLP_atomic_freelist __node_alloc_impl::_S_chunks;
707 _Node_alloc_obj* __node_alloc_impl::_S_chunks = 0;
711 void * _STLP_CALL __node_alloc::_M_allocate(size_t& __n)
712 { return __node_alloc_impl::_M_allocate(__n); }
714 void _STLP_CALL __node_alloc::_M_deallocate(void *__p, size_t __n)
715 { __node_alloc_impl::_M_deallocate(__p, __n); }
717 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
719 # define _STLP_DATA_ALIGNMENT 8
721 _STLP_MOVE_TO_PRIV_NAMESPACE
723 // *******************************************************
724 // __perthread_alloc implementation
725 union _Pthread_alloc_obj {
726 union _Pthread_alloc_obj * __free_list_link;
727 char __client_data[_STLP_DATA_ALIGNMENT]; /* The client sees this. */
730 // Pthread allocators don't appear to the client to have meaningful
731 // instances. We do in fact need to associate some state with each
732 // thread. That state is represented by _Pthread_alloc_per_thread_state.
734 struct _Pthread_alloc_per_thread_state {
735 typedef _Pthread_alloc_obj __obj;
736 enum { _S_NFREELISTS = _MAX_BYTES / _STLP_DATA_ALIGNMENT };
738 // Free list link for list of available per thread structures.
739 // When one of these becomes available for reuse due to thread
740 // termination, any objects in its free list remain associated
741 // with it. The whole structure may then be used by a newly
743 _Pthread_alloc_per_thread_state() : __next(0)
744 { memset((void *)__CONST_CAST(_Pthread_alloc_obj**, __free_list), 0, (size_t)_S_NFREELISTS * sizeof(__obj *)); }
745 // Returns an object of size __n, and possibly adds to size n free list.
746 void *_M_refill(size_t __n);
748 _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
749 _Pthread_alloc_per_thread_state *__next;
750 // this data member is only to be used by per_thread_allocator, which returns memory to the originating thread.
754 // Pthread-specific allocator.
755 class _Pthread_alloc_impl {
756 public: // but only for internal use:
757 typedef _Pthread_alloc_per_thread_state __state_type;
758 typedef char value_type;
760 // Allocates a chunk for nobjs of size size. nobjs may be reduced
761 // if it is inconvenient to allocate the requested number.
762 static char *_S_chunk_alloc(size_t __size, size_t &__nobjs, __state_type*);
764 enum {_S_ALIGN = _STLP_DATA_ALIGNMENT};
766 static size_t _S_round_up(size_t __bytes)
767 { return (((__bytes) + (int)_S_ALIGN - 1) & ~((int)_S_ALIGN - 1)); }
768 static size_t _S_freelist_index(size_t __bytes)
769 { return (((__bytes) + (int)_S_ALIGN - 1) / (int)_S_ALIGN - 1); }
772 // Chunk allocation state. And other shared state.
773 // Protected by _S_chunk_allocator_lock.
774 static _STLP_STATIC_MUTEX _S_chunk_allocator_lock;
775 static char *_S_start_free;
776 static char *_S_end_free;
777 static size_t _S_heap_size;
778 static __state_type *_S_free_per_thread_states;
779 static pthread_key_t _S_key;
780 static bool _S_key_initialized;
781 // Pthread key under which per thread state is stored.
782 // Allocator instances that are currently unclaimed by any thread.
783 static void _S_destructor(void *instance);
784 // Function to be called on thread exit to reclaim per thread
786 static __state_type *_S_new_per_thread_state();
788 // Return a recycled or new per thread state.
789 static __state_type *_S_get_per_thread_state();
791 // ensure that the current thread has an associated
794 friend class _M_lock;
797 _M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); }
798 ~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); }
804 static void * allocate(size_t& __n);
807 static void deallocate(void *__p, size_t __n);
809 // boris : versions for per_thread_allocator
811 static void * allocate(size_t& __n, __state_type* __a);
814 static void deallocate(void *__p, size_t __n, __state_type* __a);
816 static void * reallocate(void *__p, size_t __old_sz, size_t& __new_sz);
819 /* Returns an object of size n, and optionally adds to size n free list.*/
820 /* We assume that n is properly aligned. */
821 /* We hold the allocation lock. */
822 void *_Pthread_alloc_per_thread_state::_M_refill(size_t __n) {
823 typedef _Pthread_alloc_obj __obj;
824 size_t __nobjs = 128;
825 char * __chunk = _Pthread_alloc_impl::_S_chunk_alloc(__n, __nobjs, this);
826 __obj * volatile * __my_free_list;
828 __obj * __current_obj, * __next_obj;
835 __my_free_list = __free_list + _Pthread_alloc_impl::_S_freelist_index(__n);
837 /* Build free list in chunk */
838 __result = (__obj *)__chunk;
839 *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
840 for (__i = 1; ; ++__i) {
841 __current_obj = __next_obj;
842 __next_obj = (__obj *)((char *)__next_obj + __n);
843 if (__nobjs - 1 == __i) {
844 __current_obj -> __free_list_link = 0;
847 __current_obj -> __free_list_link = __next_obj;
853 void _Pthread_alloc_impl::_S_destructor(void *__instance) {
854 _M_lock __lock_instance; // Need to acquire lock here.
855 _Pthread_alloc_per_thread_state* __s = (_Pthread_alloc_per_thread_state*)__instance;
856 __s -> __next = _S_free_per_thread_states;
857 _S_free_per_thread_states = __s;
860 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_new_per_thread_state() {
861 /* lock already held here. */
862 if (0 != _S_free_per_thread_states) {
863 _Pthread_alloc_per_thread_state *__result = _S_free_per_thread_states;
864 _S_free_per_thread_states = _S_free_per_thread_states -> __next;
868 return _STLP_NEW _Pthread_alloc_per_thread_state;
872 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_get_per_thread_state() {
874 __state_type* __result;
876 if (_S_key_initialized && (__result = (__state_type*) pthread_getspecific(_S_key)))
880 _M_lock __lock_instance; // Need to acquire lock here.
881 if (!_S_key_initialized) {
882 if (pthread_key_create(&_S_key, _S_destructor)) {
883 __THROW_BAD_ALLOC; // failed
885 _S_key_initialized = true;
888 __result = _S_new_per_thread_state();
889 __ret_code = pthread_setspecific(_S_key, __result);
891 if (__ret_code == ENOMEM) {
901 /* We allocate memory in large chunks in order to avoid fragmenting */
902 /* the malloc heap too much. */
903 /* We assume that size is properly aligned. */
904 char *_Pthread_alloc_impl::_S_chunk_alloc(size_t __p_size, size_t &__nobjs, _Pthread_alloc_per_thread_state *__a) {
905 typedef _Pthread_alloc_obj __obj;
908 size_t __total_bytes;
911 _M_lock __lock_instance; // Acquire lock for this routine
913 __total_bytes = __p_size * __nobjs;
914 __bytes_left = _S_end_free - _S_start_free;
915 if (__bytes_left >= __total_bytes) {
916 __result = _S_start_free;
917 _S_start_free += __total_bytes;
919 } else if (__bytes_left >= __p_size) {
920 __nobjs = __bytes_left/__p_size;
921 __total_bytes = __p_size * __nobjs;
922 __result = _S_start_free;
923 _S_start_free += __total_bytes;
926 size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
927 // Try to make use of the left-over piece.
928 if (__bytes_left > 0) {
929 __obj * volatile * __my_free_list = __a->__free_list + _S_freelist_index(__bytes_left);
930 ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
931 *__my_free_list = (__obj *)_S_start_free;
934 // Try to get memory that's aligned on something like a
935 // cache line boundary, so as to avoid parceling out
936 // parts of the same line to different threads and thus
937 // possibly different processors.
939 const int __cache_line_size = 128; // probable upper bound
940 __bytes_to_get &= ~(__cache_line_size-1);
941 _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get);
942 if (0 == _S_start_free) {
943 _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
946 # else /* !SGI_SOURCE */
947 _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
949 _S_heap_size += __bytes_to_get;
950 _S_end_free = _S_start_free + __bytes_to_get;
953 // lock is released here
954 return _S_chunk_alloc(__p_size, __nobjs, __a);
959 void *_Pthread_alloc_impl::allocate(size_t& __n) {
960 typedef _Pthread_alloc_obj __obj;
961 __obj * volatile * __my_free_list;
965 if (__n > _MAX_BYTES) {
966 return __malloc_alloc::allocate(__n);
969 __n = _S_round_up(__n);
970 __a = _S_get_per_thread_state();
972 __my_free_list = __a->__free_list + _S_freelist_index(__n);
973 __result = *__my_free_list;
975 void *__r = __a->_M_refill(__n);
978 *__my_free_list = __result->__free_list_link;
983 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n) {
984 typedef _Pthread_alloc_obj __obj;
985 __obj *__q = (__obj *)__p;
986 __obj * volatile * __my_free_list;
989 if (__n > _MAX_BYTES) {
990 __malloc_alloc::deallocate(__p, __n);
994 __a = _S_get_per_thread_state();
996 __my_free_list = __a->__free_list + _S_freelist_index(__n);
997 __q -> __free_list_link = *__my_free_list;
998 *__my_free_list = __q;
1001 // boris : versions for per_thread_allocator
1003 void *_Pthread_alloc_impl::allocate(size_t& __n, __state_type* __a) {
1004 typedef _Pthread_alloc_obj __obj;
1005 __obj * volatile * __my_free_list;
1008 if (__n > _MAX_BYTES) {
1009 return __malloc_alloc::allocate(__n);
1011 __n = _S_round_up(__n);
1013 // boris : here, we have to lock per thread state, as we may be getting memory from
1014 // different thread pool.
1015 _STLP_auto_lock __lock(__a->_M_lock);
1017 __my_free_list = __a->__free_list + _S_freelist_index(__n);
1018 __result = *__my_free_list;
1019 if (__result == 0) {
1020 void *__r = __a->_M_refill(__n);
1023 *__my_free_list = __result->__free_list_link;
1027 /* p may not be 0 */
1028 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n, __state_type* __a) {
1029 typedef _Pthread_alloc_obj __obj;
1030 __obj *__q = (__obj *)__p;
1031 __obj * volatile * __my_free_list;
1033 if (__n > _MAX_BYTES) {
1034 __malloc_alloc::deallocate(__p, __n);
1038 // boris : here, we have to lock per thread state, as we may be returning memory from
1039 // different thread.
1040 _STLP_auto_lock __lock(__a->_M_lock);
1042 __my_free_list = __a->__free_list + _S_freelist_index(__n);
1043 __q -> __free_list_link = *__my_free_list;
1044 *__my_free_list = __q;
1047 void *_Pthread_alloc_impl::reallocate(void *__p, size_t __old_sz, size_t& __new_sz) {
1051 if (__old_sz > _MAX_BYTES && __new_sz > _MAX_BYTES) {
1052 return realloc(__p, __new_sz);
1055 if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return __p;
1056 __result = allocate(__new_sz);
1057 __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
1058 memcpy(__result, __p, __copy_sz);
1059 deallocate(__p, __old_sz);
1063 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_free_per_thread_states = 0;
1064 pthread_key_t _Pthread_alloc_impl::_S_key = 0;
1065 _STLP_STATIC_MUTEX _Pthread_alloc_impl::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER;
1066 bool _Pthread_alloc_impl::_S_key_initialized = false;
1067 char *_Pthread_alloc_impl::_S_start_free = 0;
1068 char *_Pthread_alloc_impl::_S_end_free = 0;
1069 size_t _Pthread_alloc_impl::_S_heap_size = 0;
1071 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n)
1072 { return _Pthread_alloc_impl::allocate(__n); }
1073 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n)
1074 { _Pthread_alloc_impl::deallocate(__p, __n); }
1075 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n, __state_type* __a)
1076 { return _Pthread_alloc_impl::allocate(__n, __a); }
1077 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n, __state_type* __a)
1078 { _Pthread_alloc_impl::deallocate(__p, __n, __a); }
1079 void * _STLP_CALL _Pthread_alloc::reallocate(void *__p, size_t __old_sz, size_t& __new_sz)
1080 { return _Pthread_alloc_impl::reallocate(__p, __old_sz, __new_sz); }
1081 _Pthread_alloc_per_thread_state* _STLP_CALL _Pthread_alloc::_S_get_per_thread_state()
1082 { return _Pthread_alloc_impl::_S_get_per_thread_state(); }
1084 _STLP_MOVE_TO_STD_NAMESPACE
1090 #undef _S_FREELIST_INDEX