/home/ntakagi/work/STLport-5.1.5/src/allocators.cpp

Go to the documentation of this file.
00001 /*
00002  *
00003  * Copyright (c) 1996,1997
00004  * Silicon Graphics Computer Systems, Inc.
00005  *
00006  * Copyright (c) 1997
00007  * Moscow Center for SPARC Technology
00008  *
00009  * Copyright (c) 1999
00010  * Boris Fomitchev
00011  *
00012  * This material is provided "as is", with absolutely no warranty expressed
00013  * or implied. Any use is at your own risk.
00014  *
00015  * Permission to use or copy this software for any purpose is hereby granted
00016  * without fee, provided the above notices are retained on all copies.
00017  * Permission to modify the code and to distribute modified code is granted,
00018  * provided the above notices are retained, and a notice that the code was
00019  * modified is included with the above copyright notice.
00020  *
00021  */
00022 
00023 #include "stlport_prefix.h"
00024 
00025 #include <memory>
00026 
00027 #if defined (__GNUC__) && (defined (__CYGWIN__) || defined (__MINGW32__))
00028 #  include <malloc.h>
00029 //#  define _STLP_MALLOC_USABLE_SIZE(__buf) malloc_usable_size(__buf)
00030 #endif
00031 
00032 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
00033 #  include <pthread_alloc>
00034 #  include <cerrno>
00035 #endif
00036 
00037 #include <stl/_threads.h>
00038 
00039 #include "lock_free_slist.h"
00040 
00041 #if defined (__WATCOMC__)
00042 #  pragma warning 13 9
00043 #  pragma warning 367 9
00044 #  pragma warning 368 9
00045 #endif
00046 
00047 #if defined (_STLP_SGI_THREADS)
00048   // We test whether threads are in use before locking.
00049   // Perhaps this should be moved into stl_threads.h, but that
00050   // probably makes it harder to avoid the procedure call when
00051   // it isn't needed.
00052 extern "C" {
00053   extern int __us_rsthread_malloc;
00054 }
00055 #endif
00056 
00057 // Specialised debug form of malloc which does not provide "false"
00058 // memory leaks when run with debug CRT libraries.
00059 #if defined (_STLP_MSVC) && (_STLP_MSVC >= 1020 && defined (_STLP_DEBUG_ALLOC)) && !defined (_STLP_WCE)
00060 #  include <crtdbg.h>
00061 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
00062 inline void __stlp_chunck_free(void* __p) { _free_dbg(__p, _CRT_BLOCK); }
00063 #else  // !_DEBUG
00064 #  ifdef _STLP_NODE_ALLOC_USE_MALLOC
00065 #    include <cstdlib>
00066 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
00067 inline void __stlp_chunck_free(void* __p) { _STLP_VENDOR_CSTD::free(__p); }
00068 #  else
00069 inline void* __stlp_chunk_malloc(size_t __bytes) { return _STLP_STD::__stl_new(__bytes); }
00070 inline void __stlp_chunck_free(void* __p) { _STLP_STD::__stl_delete(__p); }
00071 #  endif
00072 #endif  // !_DEBUG
00073 
00074 #define _S_FREELIST_INDEX(__bytes) ((__bytes - size_t(1)) >> (int)_ALIGN_SHIFT)
00075 
00076 _STLP_BEGIN_NAMESPACE
00077 
00078 class __malloc_alloc_impl {
00079 private:
00080   static void* _S_oom_malloc(size_t __n) {
00081     __oom_handler_type __my_malloc_handler;
00082     void * __result;
00083 
00084     for (;;) {
00085       __my_malloc_handler = __oom_handler;
00086       if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
00087       (*__my_malloc_handler)();
00088       __result = malloc(__n);
00089       if (__result) return(__result);
00090     }
00091 #if defined (_STLP_NEED_UNREACHABLE_RETURN)
00092     return 0;
00093 #endif
00094   }
00095   static __oom_handler_type __oom_handler;
00096 public:
00097   // this one is needed for proper simple_alloc wrapping
00098   typedef char value_type;
00099   static void* allocate(size_t& __n) {
00100     void* __result = malloc(__n);
00101     if (0 == __result) {
00102       __result = _S_oom_malloc(__n);
00103     }
00104 #if defined (_STLP_MALLOC_USABLE_SIZE)
00105     else {
00106       size_t __new_n = _STLP_MALLOC_USABLE_SIZE(__result);
00107       /*
00108       if (__n != __new_n) {
00109         printf("requested size %d, usable %d\n", __n, __new_n);
00110       }
00111       */
00112       __n = __new_n;
00113     }
00114 #endif
00115     return __result;
00116   }
00117   static void deallocate(void* __p, size_t /* __n */) { free((char*)__p); }
00118   static __oom_handler_type set_malloc_handler(__oom_handler_type __f) {
00119     __oom_handler_type __old = __oom_handler;
00120     __oom_handler = __f;
00121     return __old;
00122   }
00123 };
00124 
00125 // malloc_alloc out-of-memory handling
00126 __oom_handler_type __malloc_alloc_impl::__oom_handler = __STATIC_CAST(__oom_handler_type, 0);
00127 
00128 void* _STLP_CALL __malloc_alloc::allocate(size_t& __n)
00129 { return __malloc_alloc_impl::allocate(__n); }
00130 __oom_handler_type _STLP_CALL __malloc_alloc::set_malloc_handler(__oom_handler_type __f)
00131 { return __malloc_alloc_impl::set_malloc_handler(__f); }
00132 
00133 // *******************************************************
00134 // Default node allocator.
00135 // With a reasonable compiler, this should be roughly as fast as the
00136 // original STL class-specific allocators, but with less fragmentation.
00137 //
00138 // Important implementation properties:
00139 // 1. If the client request an object of size > _MAX_BYTES, the resulting
00140 //    object will be obtained directly from malloc.
00141 // 2. In all other cases, we allocate an object of size exactly
00142 //    _S_round_up(requested_size).  Thus the client has enough size
00143 //    information that we can return the object to the proper free list
00144 //    without permanently losing part of the object.
00145 //
00146 
00147 #define _STLP_NFREELISTS 16
00148 
00149 #if defined (_STLP_LEAKS_PEDANTIC) && defined (_STLP_USE_DYNAMIC_LIB)
00150 /*
00151  * We can only do cleanup of the node allocator memory pool if we are
00152  * sure that the STLport library is used as a shared one as it guaranties
00153  * the unicity of the node allocator instance. Without that guaranty node
00154  * allocator instances might exchange memory blocks making the implementation
00155  * of a cleaning process much more complicated.
00156  */
00157 #  define _STLP_DO_CLEAN_NODE_ALLOC
00158 #endif
00159 
00160 /* When STLport is used without multi threaded safety we use the node allocator
00161  * implementation with locks as locks becomes no-op. The lock free implementation
00162  * always use system specific atomic operations which are slower than 'normal'
00163  * ones.
00164  */
00165 #if defined (_STLP_THREADS) && \
00166     defined (_STLP_HAS_ATOMIC_FREELIST) && defined (_STLP_ATOMIC_ADD)
00167 /*
00168  * We have an implementation of the atomic freelist (_STLP_atomic_freelist)
00169  * for this architecture and compiler.  That means we can use the non-blocking
00170  * implementation of the node-allocation engine.*/
00171 #  define _STLP_USE_LOCK_FREE_IMPLEMENTATION
00172 #endif
00173 
00174 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00175 #  if defined (_STLP_THREADS)
00176 
00177 class _Node_Alloc_Lock {
00178 public:
00179   _Node_Alloc_Lock() {
00180 #  if defined (_STLP_SGI_THREADS)
00181     if (__us_rsthread_malloc)
00182 #  endif
00183       _S_lock._M_acquire_lock();
00184   }
00185 
00186   ~_Node_Alloc_Lock() {
00187 #  if defined (_STLP_SGI_THREADS)
00188     if (__us_rsthread_malloc)
00189 #  endif
00190         _S_lock._M_release_lock();
00191   }
00192 
00193   static _STLP_STATIC_MUTEX _S_lock;
00194 };
00195 
00196 _STLP_STATIC_MUTEX _Node_Alloc_Lock::_S_lock _STLP_MUTEX_INITIALIZER;
00197 #  else
00198 
00199 class _Node_Alloc_Lock {
00200 public:
00201   _Node_Alloc_Lock() { }
00202   ~_Node_Alloc_Lock() { }
00203 };
00204 
00205 #  endif
00206 
00207 struct _Node_alloc_obj {
00208   _Node_alloc_obj * _M_next;
00209 };
00210 #endif
00211 
00212 class __node_alloc_impl {
00213 _STLP_PRIVATE:
00214   static inline size_t _STLP_CALL _S_round_up(size_t __bytes)
00215   { return (((__bytes) + (size_t)_ALIGN-1) & ~((size_t)_ALIGN - 1)); }
00216 
00217 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00218   typedef _STLP_atomic_freelist::item   _Obj;
00219   typedef _STLP_atomic_freelist         _Freelist;
00220   typedef _STLP_atomic_freelist         _ChunkList;
00221 
00222   // Header of blocks of memory that have been allocated as part of
00223   // a larger chunk but have not yet been chopped up into nodes.
00224   struct _FreeBlockHeader : public _STLP_atomic_freelist::item {
00225     char* _M_end;     // pointer to end of free memory
00226   };
00227 #else
00228   typedef _Node_alloc_obj       _Obj;
00229   typedef _Obj* _STLP_VOLATILE  _Freelist;
00230   typedef _Obj*                 _ChunkList;
00231 #endif
00232 
00233 private:
00234   // Returns an object of size __n, and optionally adds to size __n free list.
00235   static _Obj* _S_refill(size_t __n);
00236   // Allocates a chunk for nobjs of size __p_size.  nobjs may be reduced
00237   // if it is inconvenient to allocate the requested number.
00238   static char* _S_chunk_alloc(size_t __p_size, int& __nobjs);
00239   // Chunk allocation state.
00240   static _Freelist _S_free_list[_STLP_NFREELISTS];
00241   // Amount of total allocated memory
00242 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00243   static _STLP_VOLATILE __stl_atomic_t _S_heap_size;
00244 #else
00245   static size_t _S_heap_size;
00246 #endif
00247 
00248 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00249   // List of blocks of free memory
00250   static _STLP_atomic_freelist  _S_free_mem_blocks;
00251 #else
00252   // Start of the current free memory buffer
00253   static char* _S_start_free;
00254   // End of the current free memory buffer
00255   static char* _S_end_free;
00256 #endif
00257 
00258 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00259 public:
00260   // Methods to report alloc/dealloc calls to the counter system.
00261 #  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00262   typedef _STLP_VOLATILE __stl_atomic_t _AllocCounter;
00263 #  else
00264   typedef __stl_atomic_t _AllocCounter;
00265 #  endif
00266   static _AllocCounter& _STLP_CALL _S_alloc_counter();
00267   static void _S_alloc_call();
00268   static void _S_dealloc_call();
00269 
00270 private:
00271   // Free all the allocated chuncks of memory
00272   static void _S_chunk_dealloc();
00273   // Beginning of the linked list of allocated chunks of memory
00274   static _ChunkList _S_chunks;
00275 #endif /* _STLP_DO_CLEAN_NODE_ALLOC */
00276 
00277 public:
00278   /* __n must be > 0      */
00279   static void* _M_allocate(size_t& __n);
00280   /* __p may not be 0 */
00281   static void _M_deallocate(void *__p, size_t __n);
00282 };
00283 
00284 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00285 void* __node_alloc_impl::_M_allocate(size_t& __n) {
00286   __n = _S_round_up(__n);
00287   _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
00288   _Obj *__r;
00289 
00290   // Acquire the lock here with a constructor call.
00291   // This ensures that it is released in exit or during stack
00292   // unwinding.
00293   _Node_Alloc_Lock __lock_instance;
00294 
00295   if ( (__r  = *__my_free_list) != 0 ) {
00296     *__my_free_list = __r->_M_next;
00297   } else {
00298     __r = _S_refill(__n);
00299   }
00300 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00301   _S_alloc_call();
00302 #  endif
00303   // lock is released here
00304   return __r;
00305 }
00306 
00307 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
00308   _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
00309   _Obj * __pobj = __STATIC_CAST(_Obj*, __p);
00310 
00311   // acquire lock
00312   _Node_Alloc_Lock __lock_instance;
00313   __pobj->_M_next = *__my_free_list;
00314   *__my_free_list = __pobj;
00315 
00316 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00317   _S_dealloc_call();
00318 #  endif
00319   // lock is released here
00320 }
00321 
00322 /* We allocate memory in large chunks in order to avoid fragmenting     */
00323 /* the malloc heap too much.                                            */
00324 /* We assume that size is properly aligned.                             */
00325 /* We hold the allocation lock.                                         */
00326 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
00327   char* __result;
00328   size_t __total_bytes = _p_size * __nobjs;
00329   size_t __bytes_left = _S_end_free - _S_start_free;
00330 
00331   if (__bytes_left > 0) {
00332     if (__bytes_left >= __total_bytes) {
00333       __result = _S_start_free;
00334       _S_start_free += __total_bytes;
00335       return __result;
00336     }
00337 
00338     if (__bytes_left >= _p_size) {
00339       __nobjs = (int)(__bytes_left / _p_size);
00340       __total_bytes = _p_size * __nobjs;
00341       __result = _S_start_free;
00342       _S_start_free += __total_bytes;
00343       return __result;
00344     }
00345 
00346     // Try to make use of the left-over piece.
00347     _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__bytes_left);
00348     __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = *__my_free_list;
00349     *__my_free_list = __REINTERPRET_CAST(_Obj*, _S_start_free);
00350   }
00351 
00352   size_t __bytes_to_get =
00353     2 * __total_bytes + _S_round_up(_S_heap_size >> 4)
00354 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00355     + sizeof(_Obj)
00356 #  endif
00357     ;
00358 
00359   _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
00360   if (0 == _S_start_free) {
00361     _Obj* _STLP_VOLATILE* __my_free_list;
00362     _Obj* __p;
00363     // Try to do with what we have.  That can't hurt.
00364     // We do not try smaller requests, since that tends
00365     // to result in disaster on multi-process machines.
00366     for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
00367       __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
00368       __p = *__my_free_list;
00369       if (0 != __p) {
00370         *__my_free_list = __p -> _M_next;
00371         _S_start_free = __REINTERPRET_CAST(char*, __p);
00372         _S_end_free = _S_start_free + __i;
00373         return _S_chunk_alloc(_p_size, __nobjs);
00374         // Any leftover piece will eventually make it to the
00375         // right free list.
00376       }
00377     }
00378     _S_end_free = 0;    // In case of exception.
00379     _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
00380     /*
00381     (char*)malloc_alloc::allocate(__bytes_to_get);
00382     */
00383 
00384     // This should either throw an
00385     // exception or remedy the situation.  Thus we assume it
00386     // succeeded.
00387   }
00388 
00389   _S_heap_size += __bytes_to_get;
00390 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00391   __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = _S_chunks;
00392   _S_chunks = __REINTERPRET_CAST(_Obj*, _S_start_free);
00393 #  endif
00394   _S_end_free = _S_start_free + __bytes_to_get;
00395 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00396   _S_start_free += sizeof(_Obj);
00397 #  endif
00398   return _S_chunk_alloc(_p_size, __nobjs);
00399 }
00400 
00401 /* Returns an object of size __n, and optionally adds to size __n free list.*/
00402 /* We assume that __n is properly aligned.                                  */
00403 /* We hold the allocation lock.                                             */
00404 _Node_alloc_obj* __node_alloc_impl::_S_refill(size_t __n) {
00405   int __nobjs = 20;
00406   char* __chunk = _S_chunk_alloc(__n, __nobjs);
00407 
00408   if (1 == __nobjs) return __REINTERPRET_CAST(_Obj*, __chunk);
00409 
00410   _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
00411   _Obj* __result;
00412   _Obj* __current_obj;
00413   _Obj* __next_obj;
00414 
00415   /* Build free list in chunk */
00416   __result = __REINTERPRET_CAST(_Obj*, __chunk);
00417   *__my_free_list = __next_obj = __REINTERPRET_CAST(_Obj*, __chunk + __n);
00418   for (--__nobjs; --__nobjs; ) {
00419     __current_obj = __next_obj;
00420     __next_obj = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __next_obj) + __n);
00421     __current_obj->_M_next = __next_obj;
00422   }
00423   __next_obj->_M_next = 0;
00424   return __result;
00425 }
00426 
00427 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00428 void __node_alloc_impl::_S_alloc_call()
00429 { ++_S_alloc_counter(); }
00430 
00431 void __node_alloc_impl::_S_dealloc_call() {
00432   __stl_atomic_t &counter = _S_alloc_counter();
00433   if (--counter == 0)
00434   { _S_chunk_dealloc(); }
00435 }
00436 
00437 /* We deallocate all the memory chunks      */
00438 void __node_alloc_impl::_S_chunk_dealloc() {
00439   _Obj *__pcur = _S_chunks, *__pnext;
00440   while (__pcur != 0) {
00441     __pnext = __pcur->_M_next;
00442     __stlp_chunck_free(__pcur);
00443     __pcur = __pnext;
00444   }
00445   _S_chunks = 0;
00446   _S_start_free = _S_end_free = 0;
00447   _S_heap_size = 0;
00448   memset(__REINTERPRET_CAST(char*, __CONST_CAST(_Obj**, &_S_free_list[0])), 0, _STLP_NFREELISTS * sizeof(_Obj*));
00449 }
00450 #  endif /* _STLP_DO_CLEAN_NODE_ALLOC */
00451 
00452 #else /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
00453 
00454 void* __node_alloc_impl::_M_allocate(size_t& __n) {
00455   __n = _S_round_up(__n);
00456   _Obj* __r = _S_free_list[_S_FREELIST_INDEX(__n)].pop();
00457   if (__r  == 0)
00458   { __r = _S_refill(__n); }
00459 
00460 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00461   _S_alloc_call();
00462 #  endif
00463   return __r;
00464 }
00465 
00466 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
00467   _S_free_list[_S_FREELIST_INDEX(__n)].push(__STATIC_CAST(_Obj*, __p));
00468 
00469 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00470   _S_dealloc_call();
00471 #  endif
00472 }
00473 
00474 /* Returns an object of size __n, and optionally adds additional ones to    */
00475 /* freelist of objects of size __n.                                         */
00476 /* We assume that __n is properly aligned.                                  */
00477 __node_alloc_impl::_Obj* __node_alloc_impl::_S_refill(size_t __n) {
00478   int __nobjs = 20;
00479   char* __chunk = _S_chunk_alloc(__n, __nobjs);
00480 
00481   if (__nobjs <= 1)
00482     return __REINTERPRET_CAST(_Obj*, __chunk);
00483 
00484   // Push all new nodes (minus first one) onto freelist
00485   _Obj* __result   = __REINTERPRET_CAST(_Obj*, __chunk);
00486   _Obj* __cur_item = __result;
00487   _Freelist* __my_freelist = _S_free_list + _S_FREELIST_INDEX(__n);
00488   for (--__nobjs; __nobjs != 0; --__nobjs) {
00489     __cur_item  = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __cur_item) + __n);
00490     __my_freelist->push(__cur_item);
00491   }
00492   return __result;
00493 }
00494 
00495 /* We allocate memory in large chunks in order to avoid fragmenting     */
00496 /* the malloc heap too much.                                            */
00497 /* We assume that size is properly aligned.                             */
00498 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
00499 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00500   //We are going to add a small memory block to keep all the allocated blocks
00501   //address, we need to do so respecting the memory alignment. The following
00502   //static assert checks that the reserved block is big enough to store a pointer.
00503   _STLP_STATIC_ASSERT(sizeof(_Obj) <= _ALIGN)
00504 #  endif
00505   char*  __result       = 0;
00506   __stl_atomic_t __total_bytes  = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
00507 
00508   _FreeBlockHeader* __block = __STATIC_CAST(_FreeBlockHeader*, _S_free_mem_blocks.pop());
00509   if (__block != 0) {
00510     // We checked a block out and can now mess with it with impugnity.
00511     // We'll put the remainder back into the list if we're done with it below.
00512     char*  __buf_start  = __REINTERPRET_CAST(char*, __block);
00513     __stl_atomic_t __bytes_left = __block->_M_end - __buf_start;
00514 
00515     if ((__bytes_left < __total_bytes) && (__bytes_left >= __STATIC_CAST(__stl_atomic_t, _p_size))) {
00516       // There's enough left for at least one object, but not as much as we wanted
00517       __result      = __buf_start;
00518       __nobjs       = (int)(__bytes_left/_p_size);
00519       __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
00520       __bytes_left -= __total_bytes;
00521       __buf_start  += __total_bytes;
00522     }
00523     else if (__bytes_left >= __total_bytes) {
00524       // The block has enough left to satisfy all that was asked for
00525       __result      = __buf_start;
00526       __bytes_left -= __total_bytes;
00527       __buf_start  += __total_bytes;
00528     }
00529 
00530     if (__bytes_left != 0) {
00531       // There is still some memory left over in block after we satisfied our request.
00532       if ((__result != 0) && (__bytes_left >= sizeof(_FreeBlockHeader))) {
00533         // We were able to allocate at least one object and there is still enough
00534         // left to put remainder back into list.
00535         _FreeBlockHeader* __newblock = __REINTERPRET_CAST(_FreeBlockHeader*, __buf_start);
00536         __newblock->_M_end  = __block->_M_end;
00537         _S_free_mem_blocks.push(__newblock);
00538       }
00539       else {
00540         // We were not able to allocate enough for at least one object.
00541         // Shove into freelist of nearest (rounded-down!) size.
00542         size_t __rounded_down = _S_round_up(__bytes_left + 1) - (size_t)_ALIGN;
00543         if (__rounded_down > 0)
00544           _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push((_Obj*)__buf_start);
00545       }
00546     }
00547     if (__result != 0)
00548       return __result;
00549   }
00550 
00551   // We couldn't satisfy it from the list of free blocks, get new memory.
00552   __stl_atomic_t __bytes_to_get = 2 * __total_bytes + __STATIC_CAST(__stl_atomic_t, _S_round_up(_S_heap_size >> 4))
00553 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00554     + _ALIGN
00555 #  endif
00556     ;
00557 
00558   __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
00559   // Alignment check
00560   _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
00561 
00562   if (0 == __result) {
00563     // Allocation failed; try to canibalize from freelist of a larger object size.
00564     for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
00565       _Obj* __p  = _S_free_list[_S_FREELIST_INDEX(__i)].pop();
00566       if (0 != __p) {
00567         if (__i < sizeof(_FreeBlockHeader)) {
00568           // Not enough to put into list of free blocks, divvy it up here.
00569           // Use as much as possible for this request and shove remainder into freelist.
00570           __nobjs = (int)(__i/_p_size);
00571           __total_bytes = __nobjs * __STATIC_CAST(__stl_atomic_t, _p_size);
00572           size_t __bytes_left = __i - __total_bytes;
00573           size_t __rounded_down = _S_round_up(__bytes_left+1) - (size_t)_ALIGN;
00574           if (__rounded_down > 0) {
00575             _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push(__REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __p) + __total_bytes));
00576           }
00577           return __REINTERPRET_CAST(char*, __p);
00578         }
00579         else {
00580           // Add node to list of available blocks and recursively allocate from it.
00581           _FreeBlockHeader* __newblock = (_FreeBlockHeader*)__p;
00582           __newblock->_M_end  = __REINTERPRET_CAST(char*, __p) + __i;
00583           _S_free_mem_blocks.push(__newblock);
00584           return _S_chunk_alloc(_p_size, __nobjs);
00585         }
00586       }
00587     }
00588 
00589     // We were not able to find something in a freelist, try to allocate a smaller amount.
00590     __bytes_to_get  = __total_bytes
00591 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00592       + _ALIGN
00593 #  endif
00594       ;
00595     __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
00596     // Alignment check
00597     _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
00598 
00599     // This should either throw an exception or remedy the situation.
00600     // Thus we assume it succeeded.
00601   }
00602 
00603   _STLP_ATOMIC_ADD(&_S_heap_size, __bytes_to_get);
00604 
00605 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00606   // We have to track the allocated memory chunks for release on exit.
00607   _S_chunks.push(__REINTERPRET_CAST(_Obj*, __result));
00608   __result       += _ALIGN;
00609   __bytes_to_get -= _ALIGN;
00610 #  endif
00611 
00612   if (__bytes_to_get > __total_bytes) {
00613     // Push excess memory allocated in this chunk into list of free memory blocks
00614     _FreeBlockHeader* __freeblock = __REINTERPRET_CAST(_FreeBlockHeader*, __result + __total_bytes);
00615     __freeblock->_M_end  = __result + __bytes_to_get;
00616     _S_free_mem_blocks.push(__freeblock);
00617   }
00618   return __result;
00619 }
00620 
00621 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00622 void __node_alloc_impl::_S_alloc_call()
00623 { _STLP_ATOMIC_INCREMENT(&_S_alloc_counter()); }
00624 
00625 void __node_alloc_impl::_S_dealloc_call() {
00626   _STLP_VOLATILE __stl_atomic_t *pcounter = &_S_alloc_counter();
00627   if (_STLP_ATOMIC_DECREMENT(pcounter) == 0)
00628     _S_chunk_dealloc();
00629 }
00630 
00631 /* We deallocate all the memory chunks      */
00632 void __node_alloc_impl::_S_chunk_dealloc() {
00633   // Note: The _Node_alloc_helper class ensures that this function
00634   // will only be called when the (shared) library is unloaded or the
00635   // process is shutdown.  It's thus not possible that another thread
00636   // is currently trying to allocate a node (we're not thread-safe here).
00637   //
00638 
00639   // Clear the free blocks and all freelistst.  This makes sure that if
00640   // for some reason more memory is allocated again during shutdown
00641   // (it'd also be really nasty to leave references to deallocated memory).
00642   _S_free_mem_blocks.clear();
00643   _S_heap_size      = 0;
00644 
00645   for (size_t __i = 0; __i < _STLP_NFREELISTS; ++__i) {
00646     _S_free_list[__i].clear();
00647   }
00648 
00649   // Detach list of chunks and free them all
00650   _Obj* __chunk = _S_chunks.clear();
00651   while (__chunk != 0) {
00652     _Obj* __next = __chunk->_M_next;
00653     __stlp_chunck_free(__chunk);
00654     __chunk  = __next;
00655   }
00656 }
00657 #  endif /* _STLP_DO_CLEAN_NODE_ALLOC */
00658 
00659 #endif /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
00660 
00661 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00662 struct __node_alloc_cleaner {
00663   ~__node_alloc_cleaner()
00664   { __node_alloc_impl::_S_dealloc_call(); }
00665 };
00666 
00667 #  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00668 _STLP_VOLATILE __stl_atomic_t& _STLP_CALL
00669 #  else
00670 __stl_atomic_t& _STLP_CALL
00671 #  endif
00672 __node_alloc_impl::_S_alloc_counter() {
00673   static _AllocCounter _S_counter = 1;
00674   static __node_alloc_cleaner _S_node_alloc_cleaner;
00675   return _S_counter;
00676 }
00677 #endif
00678 
00679 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00680 _Node_alloc_obj * _STLP_VOLATILE
00681 __node_alloc_impl::_S_free_list[_STLP_NFREELISTS]
00682 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
00683 // The 16 zeros are necessary to make version 4.1 of the SunPro
00684 // compiler happy.  Otherwise it appears to allocate too little
00685 // space for the array.
00686 #else
00687 _STLP_atomic_freelist __node_alloc_impl::_S_free_list[_STLP_NFREELISTS];
00688 _STLP_atomic_freelist __node_alloc_impl::_S_free_mem_blocks;
00689 #endif
00690 
00691 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00692 char *__node_alloc_impl::_S_start_free = 0;
00693 char *__node_alloc_impl::_S_end_free = 0;
00694 #endif
00695 
00696 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00697 _STLP_VOLATILE __stl_atomic_t
00698 #else
00699 size_t
00700 #endif
00701 __node_alloc_impl::_S_heap_size = 0;
00702 
00703 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
00704 #  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
00705 _STLP_atomic_freelist __node_alloc_impl::_S_chunks;
00706 #  else
00707 _Node_alloc_obj* __node_alloc_impl::_S_chunks  = 0;
00708 #  endif
00709 #endif
00710 
00711 void * _STLP_CALL __node_alloc::_M_allocate(size_t& __n)
00712 { return __node_alloc_impl::_M_allocate(__n); }
00713 
00714 void _STLP_CALL __node_alloc::_M_deallocate(void *__p, size_t __n)
00715 { __node_alloc_impl::_M_deallocate(__p, __n); }
00716 
00717 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
00718 
00719 #  define _STLP_DATA_ALIGNMENT 8
00720 
00721 _STLP_MOVE_TO_PRIV_NAMESPACE
00722 
00723 // *******************************************************
00724 // __perthread_alloc implementation
00725 union _Pthread_alloc_obj {
00726   union _Pthread_alloc_obj * __free_list_link;
00727   char __client_data[_STLP_DATA_ALIGNMENT];    /* The client sees this.    */
00728 };
00729 
00730 // Pthread allocators don't appear to the client to have meaningful
00731 // instances.  We do in fact need to associate some state with each
00732 // thread.  That state is represented by _Pthread_alloc_per_thread_state.
00733 
00734 struct _Pthread_alloc_per_thread_state {
00735   typedef _Pthread_alloc_obj __obj;
00736   enum { _S_NFREELISTS = _MAX_BYTES / _STLP_DATA_ALIGNMENT };
00737 
00738   // Free list link for list of available per thread structures.
00739   // When one of these becomes available for reuse due to thread
00740   // termination, any objects in its free list remain associated
00741   // with it.  The whole structure may then be used by a newly
00742   // created thread.
00743   _Pthread_alloc_per_thread_state() : __next(0)
00744   { memset((void *)__CONST_CAST(_Pthread_alloc_obj**, __free_list), 0, (size_t)_S_NFREELISTS * sizeof(__obj *)); }
00745   // Returns an object of size __n, and possibly adds to size n free list.
00746   void *_M_refill(size_t __n);
00747 
00748   _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
00749   _Pthread_alloc_per_thread_state *__next;
00750   // this data member is only to be used by per_thread_allocator, which returns memory to the originating thread.
00751   _STLP_mutex _M_lock;
00752 };
00753 
00754 // Pthread-specific allocator.
00755 class _Pthread_alloc_impl {
00756 public: // but only for internal use:
00757   typedef _Pthread_alloc_per_thread_state __state_type;
00758   typedef char value_type;
00759 
00760   // Allocates a chunk for nobjs of size size.  nobjs may be reduced
00761   // if it is inconvenient to allocate the requested number.
00762   static char *_S_chunk_alloc(size_t __size, size_t &__nobjs, __state_type*);
00763 
00764   enum {_S_ALIGN = _STLP_DATA_ALIGNMENT};
00765 
00766   static size_t _S_round_up(size_t __bytes)
00767   { return (((__bytes) + (int)_S_ALIGN - 1) & ~((int)_S_ALIGN - 1)); }
00768   static size_t _S_freelist_index(size_t __bytes)
00769   { return (((__bytes) + (int)_S_ALIGN - 1) / (int)_S_ALIGN - 1); }
00770 
00771 private:
00772   // Chunk allocation state. And other shared state.
00773   // Protected by _S_chunk_allocator_lock.
00774   static _STLP_STATIC_MUTEX _S_chunk_allocator_lock;
00775   static char *_S_start_free;
00776   static char *_S_end_free;
00777   static size_t _S_heap_size;
00778   static __state_type *_S_free_per_thread_states;
00779   static pthread_key_t _S_key;
00780   static bool _S_key_initialized;
00781   // Pthread key under which per thread state is stored.
00782   // Allocator instances that are currently unclaimed by any thread.
00783   static void _S_destructor(void *instance);
00784   // Function to be called on thread exit to reclaim per thread
00785   // state.
00786   static __state_type *_S_new_per_thread_state();
00787 public:
00788   // Return a recycled or new per thread state.
00789   static __state_type *_S_get_per_thread_state();
00790 private:
00791         // ensure that the current thread has an associated
00792         // per thread state.
00793   class _M_lock;
00794   friend class _M_lock;
00795   class _M_lock {
00796   public:
00797     _M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); }
00798     ~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); }
00799   };
00800 
00801 public:
00802 
00803   /* n must be > 0      */
00804   static void * allocate(size_t& __n);
00805 
00806   /* p may not be 0 */
00807   static void deallocate(void *__p, size_t __n);
00808 
00809   // boris : versions for per_thread_allocator
00810   /* n must be > 0      */
00811   static void * allocate(size_t& __n, __state_type* __a);
00812 
00813   /* p may not be 0 */
00814   static void deallocate(void *__p, size_t __n, __state_type* __a);
00815 
00816   static void * reallocate(void *__p, size_t __old_sz, size_t& __new_sz);
00817 };
00818 
00819 /* Returns an object of size n, and optionally adds to size n free list.*/
00820 /* We assume that n is properly aligned.                                */
00821 /* We hold the allocation lock.                                         */
00822 void *_Pthread_alloc_per_thread_state::_M_refill(size_t __n) {
00823   typedef _Pthread_alloc_obj __obj;
00824   size_t __nobjs = 128;
00825   char * __chunk = _Pthread_alloc_impl::_S_chunk_alloc(__n, __nobjs, this);
00826   __obj * volatile * __my_free_list;
00827   __obj * __result;
00828   __obj * __current_obj, * __next_obj;
00829   size_t __i;
00830 
00831   if (1 == __nobjs)  {
00832     return __chunk;
00833   }
00834 
00835   __my_free_list = __free_list + _Pthread_alloc_impl::_S_freelist_index(__n);
00836 
00837   /* Build free list in chunk */
00838   __result = (__obj *)__chunk;
00839   *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
00840   for (__i = 1; ; ++__i) {
00841     __current_obj = __next_obj;
00842     __next_obj = (__obj *)((char *)__next_obj + __n);
00843     if (__nobjs - 1 == __i) {
00844       __current_obj -> __free_list_link = 0;
00845       break;
00846     } else {
00847       __current_obj -> __free_list_link = __next_obj;
00848     }
00849   }
00850   return __result;
00851 }
00852 
00853 void _Pthread_alloc_impl::_S_destructor(void *__instance) {
00854   _M_lock __lock_instance;  // Need to acquire lock here.
00855   _Pthread_alloc_per_thread_state* __s = (_Pthread_alloc_per_thread_state*)__instance;
00856   __s -> __next = _S_free_per_thread_states;
00857   _S_free_per_thread_states = __s;
00858 }
00859 
00860 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_new_per_thread_state() {
00861   /* lock already held here.  */
00862   if (0 != _S_free_per_thread_states) {
00863     _Pthread_alloc_per_thread_state *__result = _S_free_per_thread_states;
00864     _S_free_per_thread_states = _S_free_per_thread_states -> __next;
00865     return __result;
00866   }
00867   else {
00868     return _STLP_NEW _Pthread_alloc_per_thread_state;
00869   }
00870 }
00871 
00872 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_get_per_thread_state() {
00873   int __ret_code;
00874   __state_type* __result;
00875 
00876   if (_S_key_initialized && (__result = (__state_type*) pthread_getspecific(_S_key)))
00877     return __result;
00878 
00879   /*REFERENCED*/
00880   _M_lock __lock_instance;  // Need to acquire lock here.
00881   if (!_S_key_initialized) {
00882     if (pthread_key_create(&_S_key, _S_destructor)) {
00883       __THROW_BAD_ALLOC;  // failed
00884     }
00885     _S_key_initialized = true;
00886   }
00887 
00888   __result = _S_new_per_thread_state();
00889   __ret_code = pthread_setspecific(_S_key, __result);
00890   if (__ret_code) {
00891     if (__ret_code == ENOMEM) {
00892       __THROW_BAD_ALLOC;
00893     } else {
00894   // EINVAL
00895       _STLP_ABORT();
00896     }
00897   }
00898   return __result;
00899 }
00900 
00901 /* We allocate memory in large chunks in order to avoid fragmenting     */
00902 /* the malloc heap too much.                                            */
00903 /* We assume that size is properly aligned.                             */
00904 char *_Pthread_alloc_impl::_S_chunk_alloc(size_t __p_size, size_t &__nobjs, _Pthread_alloc_per_thread_state *__a) {
00905   typedef _Pthread_alloc_obj __obj;
00906   {
00907     char * __result;
00908     size_t __total_bytes;
00909     size_t __bytes_left;
00910     /*REFERENCED*/
00911     _M_lock __lock_instance;         // Acquire lock for this routine
00912 
00913     __total_bytes = __p_size * __nobjs;
00914     __bytes_left = _S_end_free - _S_start_free;
00915     if (__bytes_left >= __total_bytes) {
00916       __result = _S_start_free;
00917       _S_start_free += __total_bytes;
00918       return __result;
00919     } else if (__bytes_left >= __p_size) {
00920       __nobjs = __bytes_left/__p_size;
00921       __total_bytes = __p_size * __nobjs;
00922       __result = _S_start_free;
00923       _S_start_free += __total_bytes;
00924       return __result;
00925     } else {
00926       size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
00927       // Try to make use of the left-over piece.
00928       if (__bytes_left > 0) {
00929         __obj * volatile * __my_free_list = __a->__free_list + _S_freelist_index(__bytes_left);
00930         ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
00931         *__my_free_list = (__obj *)_S_start_free;
00932       }
00933 #  ifdef _SGI_SOURCE
00934       // Try to get memory that's aligned on something like a
00935       // cache line boundary, so as to avoid parceling out
00936       // parts of the same line to different threads and thus
00937       // possibly different processors.
00938       {
00939         const int __cache_line_size = 128;  // probable upper bound
00940         __bytes_to_get &= ~(__cache_line_size-1);
00941         _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get);
00942         if (0 == _S_start_free) {
00943           _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
00944         }
00945       }
00946 #  else  /* !SGI_SOURCE */
00947       _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
00948 #  endif
00949       _S_heap_size += __bytes_to_get;
00950       _S_end_free = _S_start_free + __bytes_to_get;
00951     }
00952   }
00953   // lock is released here
00954   return _S_chunk_alloc(__p_size, __nobjs, __a);
00955 }
00956 
00957 
00958 /* n must be > 0      */
00959 void *_Pthread_alloc_impl::allocate(size_t& __n) {
00960   typedef _Pthread_alloc_obj __obj;
00961   __obj * volatile * __my_free_list;
00962   __obj * __result;
00963   __state_type* __a;
00964 
00965   if (__n > _MAX_BYTES) {
00966     return __malloc_alloc::allocate(__n);
00967   }
00968 
00969   __n = _S_round_up(__n);
00970   __a = _S_get_per_thread_state();
00971 
00972   __my_free_list = __a->__free_list + _S_freelist_index(__n);
00973   __result = *__my_free_list;
00974   if (__result == 0) {
00975     void *__r = __a->_M_refill(__n);
00976     return __r;
00977   }
00978   *__my_free_list = __result->__free_list_link;
00979   return __result;
00980 };
00981 
00982 /* p may not be 0 */
00983 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n) {
00984   typedef _Pthread_alloc_obj __obj;
00985   __obj *__q = (__obj *)__p;
00986   __obj * volatile * __my_free_list;
00987   __state_type* __a;
00988 
00989   if (__n > _MAX_BYTES) {
00990       __malloc_alloc::deallocate(__p, __n);
00991       return;
00992   }
00993 
00994   __a = _S_get_per_thread_state();
00995 
00996   __my_free_list = __a->__free_list + _S_freelist_index(__n);
00997   __q -> __free_list_link = *__my_free_list;
00998   *__my_free_list = __q;
00999 }
01000 
01001 // boris : versions for per_thread_allocator
01002 /* n must be > 0      */
01003 void *_Pthread_alloc_impl::allocate(size_t& __n, __state_type* __a) {
01004   typedef _Pthread_alloc_obj __obj;
01005   __obj * volatile * __my_free_list;
01006   __obj * __result;
01007 
01008   if (__n > _MAX_BYTES) {
01009     return __malloc_alloc::allocate(__n);
01010   }
01011   __n = _S_round_up(__n);
01012 
01013   // boris : here, we have to lock per thread state, as we may be getting memory from
01014   // different thread pool.
01015   _STLP_auto_lock __lock(__a->_M_lock);
01016 
01017   __my_free_list = __a->__free_list + _S_freelist_index(__n);
01018   __result = *__my_free_list;
01019   if (__result == 0) {
01020     void *__r = __a->_M_refill(__n);
01021     return __r;
01022   }
01023   *__my_free_list = __result->__free_list_link;
01024   return __result;
01025 };
01026 
01027 /* p may not be 0 */
01028 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n, __state_type* __a) {
01029   typedef _Pthread_alloc_obj __obj;
01030   __obj *__q = (__obj *)__p;
01031   __obj * volatile * __my_free_list;
01032 
01033   if (__n > _MAX_BYTES) {
01034     __malloc_alloc::deallocate(__p, __n);
01035     return;
01036   }
01037 
01038   // boris : here, we have to lock per thread state, as we may be returning memory from
01039   // different thread.
01040   _STLP_auto_lock __lock(__a->_M_lock);
01041 
01042   __my_free_list = __a->__free_list + _S_freelist_index(__n);
01043   __q -> __free_list_link = *__my_free_list;
01044   *__my_free_list = __q;
01045 }
01046 
01047 void *_Pthread_alloc_impl::reallocate(void *__p, size_t __old_sz, size_t& __new_sz) {
01048   void * __result;
01049   size_t __copy_sz;
01050 
01051   if (__old_sz > _MAX_BYTES && __new_sz > _MAX_BYTES) {
01052     return realloc(__p, __new_sz);
01053   }
01054 
01055   if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return __p;
01056   __result = allocate(__new_sz);
01057   __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
01058   memcpy(__result, __p, __copy_sz);
01059   deallocate(__p, __old_sz);
01060   return __result;
01061 }
01062 
01063 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_free_per_thread_states = 0;
01064 pthread_key_t _Pthread_alloc_impl::_S_key = 0;
01065 _STLP_STATIC_MUTEX _Pthread_alloc_impl::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER;
01066 bool _Pthread_alloc_impl::_S_key_initialized = false;
01067 char *_Pthread_alloc_impl::_S_start_free = 0;
01068 char *_Pthread_alloc_impl::_S_end_free = 0;
01069 size_t _Pthread_alloc_impl::_S_heap_size = 0;
01070 
01071 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n)
01072 { return _Pthread_alloc_impl::allocate(__n); }
01073 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n)
01074 { _Pthread_alloc_impl::deallocate(__p, __n); }
01075 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n, __state_type* __a)
01076 { return _Pthread_alloc_impl::allocate(__n, __a); }
01077 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n, __state_type* __a)
01078 { _Pthread_alloc_impl::deallocate(__p, __n, __a); }
01079 void * _STLP_CALL _Pthread_alloc::reallocate(void *__p, size_t __old_sz, size_t& __new_sz)
01080 { return _Pthread_alloc_impl::reallocate(__p, __old_sz, __new_sz); }
01081 _Pthread_alloc_per_thread_state* _STLP_CALL _Pthread_alloc::_S_get_per_thread_state()
01082 { return _Pthread_alloc_impl::_S_get_per_thread_state(); }
01083 
01084 _STLP_MOVE_TO_STD_NAMESPACE
01085 
01086 #endif
01087 
01088 _STLP_END_NAMESPACE
01089 
01090 #undef _S_FREELIST_INDEX



Generated on Mon Mar 10 15:32:15 2008 by  doxygen 1.5.1