/home/ntakagi/work/STLport-5.1.5/stlport/stl/_threads.h

Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 1997-1999
00003  * Silicon Graphics Computer Systems, Inc.
00004  *
00005  * Copyright (c) 1999
00006  * Boris Fomitchev
00007  *
00008  * This material is provided "as is", with absolutely no warranty expressed
00009  * or implied. Any use is at your own risk.
00010  *
00011  * Permission to use or copy this software for any purpose is hereby granted
00012  * without fee, provided the above notices are retained on all copies.
00013  * Permission to modify the code and to distribute modified code is granted,
00014  * provided the above notices are retained, and a notice that the code was
00015  * modified is included with the above copyright notice.
00016  *
00017  */
00018 
00019 // WARNING: This is an internal header file, included by other C++
00020 // standard library headers.  You should not attempt to use this header
00021 // file directly.
00022 
00023 
00024 #ifndef _STLP_INTERNAL_THREADS_H
00025 #define _STLP_INTERNAL_THREADS_H
00026 
00027 // Supported threading models are native SGI, pthreads, uithreads
00028 // (similar to pthreads, but based on an earlier draft of the Posix
00029 // threads standard), and Win32 threads.  Uithread support by Jochen
00030 // Schlick, 1999, and Solaris threads generalized to them.
00031 
00032 #ifndef _STLP_INTERNAL_CSTDDEF
00033 #  include <stl/_cstddef.h>
00034 #endif
00035 
00036 #ifndef _STLP_INTERNAL_CSTDLIB
00037 #  include <stl/_cstdlib.h>
00038 #endif
00039 
00040 // On SUN and Mac OS X gcc, zero-initialization works just fine...
00041 #if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
00042 #  define _STLP_MUTEX_INITIALIZER
00043 #endif
00044 
00045 /* This header defines the following atomic operation that platform should
00046  * try to support as much as possible. Atomic operation are exposed as macro
00047  * in order to easily test for their existance. They are:
00048  * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
00049  * increment *__ptr by 1 and returns the new value
00050  * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
00051  * decrement  *__ptr by 1 and returns the new value
00052  * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
00053  * assign __val to *__target and returns former *__target value
00054  * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
00055  * assign __ptr to *__target and returns former *__target value
00056  * __stl_atomic_t _STLP_ATOMIC_ADD(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
00057  * does *__target = *__target + __val and returns the old *__target value
00058  */
00059 
00060 #if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
00061 typedef long __stl_atomic_t;
00062 #else
00063 /* Don't import whole namespace!!!! - ptr */
00064 // # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
00065 // // using _STLP_VENDOR_CSTD::size_t;
00066 // using namespace _STLP_VENDOR_CSTD;
00067 // # endif
00068 typedef size_t __stl_atomic_t;
00069 #endif
00070 
00071 #if defined (_STLP_THREADS)
00072 
00073 #  if defined (_STLP_SGI_THREADS)
00074 
00075 #    include <mutex.h>
00076 // Hack for SGI o32 compilers.
00077 #    if !defined(__add_and_fetch) && \
00078         (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
00079 #      define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
00080 #      define __test_and_set(__l,__v)  test_and_set(__l,__v)
00081 #    endif /* o32 */
00082 
00083 #    if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
00084 #      define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
00085 #    else
00086 #      define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
00087 #    endif
00088 
00089 #    define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
00090 #    define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
00091 
00092 #  elif defined (_STLP_PTHREADS)
00093 
00094 #    include <pthread.h>
00095 #    if !defined (_STLP_USE_PTHREAD_SPINLOCK)
00096 #      if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
00097 #        define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
00098 #      endif
00099 //HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
00100 #      if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
00101 #        define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
00102 #      else
00103 #        define _STLP_PTHREAD_ATTR_DEFAULT 0
00104 #      endif
00105 #    else // _STLP_USE_PTHREAD_SPINLOCK
00106 #      if defined (__OpenBSD__)
00107 #        include <spinlock.h>
00108 #      endif
00109 #    endif // _STLP_USE_PTHREAD_SPINLOCK
00110 
00111 #    if defined (__GNUC__) && defined (__i386__)
00112 
00113 #      if !defined (_STLP_ATOMIC_INCREMENT)
00114 inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {
00115   long result;
00116   __asm__ __volatile__
00117     ("lock; xaddl  %1, %0;"
00118     :"=m" (*p), "=r" (result)
00119     :"m" (*p),  "1"  (1)
00120     :"cc");
00121   return result + 1;
00122 }
00123 #        define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
00124 #      endif
00125 
00126 #      if !defined (_STLP_ATOMIC_DECREMENT)
00127 inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {
00128   long result;
00129   __asm__ __volatile__
00130     ("lock; xaddl  %1, %0;"
00131     :"=m" (*p), "=r" (result)
00132     :"m" (*p),  "1"  (-1)
00133     :"cc");
00134   return result - 1;
00135 }
00136 #        define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
00137 #      endif
00138 
00139 #      if !defined (_STLP_ATOMIC_ADD)
00140 inline long _STLP_atomic_add_gcc_x86(long volatile* p, long addend) {
00141   long result;
00142   __asm__ __volatile__
00143     ("lock; xaddl %1, %0;"
00144     :"=m" (*p), "=r" (result)
00145     :"m"  (*p), "1"  (addend)
00146     :"cc");
00147  return result + addend;
00148 }
00149 #        define _STLP_ATOMIC_ADD(__dst, __val)  (_STLP_atomic_add_gcc_x86((long volatile*)__dst, (long)__val))
00150 #      endif
00151 
00152 #    endif /* if defined(__GNUC__) && defined(__i386__) */
00153 
00154 #  elif defined (_STLP_WIN32THREADS)
00155 
00156 #    if !defined (_STLP_ATOMIC_INCREMENT)
00157 #      if !defined (_STLP_NEW_PLATFORM_SDK)
00158 #        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__CONST_CAST(long*, __x))
00159 #        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__CONST_CAST(long*, __x))
00160 #        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__CONST_CAST(long*, __x), __y)
00161 #      else
00162 #        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__x)
00163 #        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__x)
00164 #        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__x, __y)
00165 #      endif
00166 #      define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y)     STLPInterlockedExchangePointer(__x, __y)
00167 /*
00168  * The following functionnality is only available since Windows 98, those that are targeting previous OSes
00169  * should define _WIN32_WINDOWS to a value lower that the one of Win 98, see Platform SDK documentation for
00170  * more informations:
00171  */
00172 #      if defined (_STLP_NEW_PLATFORM_SDK) && (!defined (_STLP_WIN32_VERSION) || (_STLP_WIN32_VERSION >= 0x0410))
00173 #        define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__dst, __val)
00174 #      endif
00175 #    endif
00176 
00177 #  elif defined (__DECC) || defined (__DECCXX)
00178 
00179 #    include <machine/builtins.h>
00180 #    define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
00181 #    define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
00182 #    define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
00183 
00184 #  elif defined(_STLP_SPARC_SOLARIS_THREADS)
00185 
00186 #    include <stl/_sparc_atomic.h>
00187 
00188 #  elif defined (_STLP_UITHREADS)
00189 
00190 // this inclusion is potential hazard to bring up all sorts
00191 // of old-style headers. Let's assume vendor already know how
00192 // to deal with that.
00193 #    ifndef _STLP_INTERNAL_CTIME
00194 #      include <stl/_ctime.h>
00195 #    endif
00196 #    if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
00197 using _STLP_VENDOR_CSTD::time_t;
00198 #    endif
00199 #    include <synch.h>
00200 #    include <cstdio>
00201 #    include <cwchar>
00202 
00203 #  elif defined (_STLP_BETHREADS)
00204 
00205 #    include <OS.h>
00206 #    include <cassert>
00207 #    include <stdio.h>
00208 #    define _STLP_MUTEX_INITIALIZER = { 0 }
00209 
00210 #  elif defined (_STLP_NWTHREADS)
00211 
00212 #    include <nwthread.h>
00213 #    include <nwsemaph.h>
00214 
00215 #  elif defined(_STLP_OS2THREADS)
00216 
00217 #    if defined (__GNUC__)
00218 #      define INCL_DOSSEMAPHORES
00219 #      include <os2.h>
00220 #    else
00221 // This section serves to replace os2.h for VisualAge C++
00222   typedef unsigned long ULONG;
00223 #      if !defined (__HEV__)  /* INCL_SEMAPHORE may also define HEV */
00224 #        define __HEV__
00225   typedef ULONG HEV;
00226   typedef HEV*  PHEV;
00227 #      endif
00228   typedef ULONG APIRET;
00229   typedef ULONG HMTX;
00230   typedef HMTX*  PHMTX;
00231   typedef const char*  PCSZ;
00232   typedef ULONG BOOL32;
00233   APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
00234   APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
00235   APIRET _System DosReleaseMutexSem(HMTX hmtx);
00236   APIRET _System DosCloseMutexSem(HMTX hmtx);
00237 #      define _STLP_MUTEX_INITIALIZER = { 0 }
00238 #    endif /* GNUC */
00239 
00240 #  endif
00241 
00242 #else
00243 /* no threads */
00244 #  define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
00245 #  define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
00246 /* We do not grant other atomic operations as they are useless if STLport do not have
00247  * to be thread safe
00248  */
00249 #endif
00250 
00251 #if !defined (_STLP_MUTEX_INITIALIZER)
00252 #  if defined(_STLP_ATOMIC_EXCHANGE)
00253 #    define _STLP_MUTEX_INITIALIZER = { 0 }
00254 #  elif defined(_STLP_UITHREADS)
00255 #    define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
00256 #  else
00257 #    define _STLP_MUTEX_INITIALIZER
00258 #  endif
00259 #endif
00260 
00261 _STLP_BEGIN_NAMESPACE
00262 
00263 #if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
00264 // Helper struct.  This is a workaround for various compilers that don't
00265 // handle static variables in inline functions properly.
00266 template <int __inst>
00267 struct _STLP_mutex_spin {
00268   enum { __low_max = 30, __high_max = 1000 };
00269   // Low if we suspect uniprocessor, high for multiprocessor.
00270   static unsigned __max;
00271   static unsigned __last;
00272   static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
00273   static void _STLP_CALL _S_nsec_sleep(int __log_nsec);
00274 };
00275 #endif // !_STLP_USE_PTHREAD_SPINLOCK
00276 
00277 // Locking class.  Note that this class *does not have a constructor*.
00278 // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
00279 // or dynamically, by explicitly calling the _M_initialize member function.
00280 // (This is similar to the ways that a pthreads mutex can be initialized.)
00281 // There are explicit member functions for acquiring and releasing the lock.
00282 
00283 // There is no constructor because static initialization is essential for
00284 // some uses, and only a class aggregate (see section 8.5.1 of the C++
00285 // standard) can be initialized that way.  That means we must have no
00286 // constructors, no base classes, no virtual functions, and no private or
00287 // protected members.
00288 
00289 // For non-static cases, clients should use  _STLP_mutex.
00290 
00291 struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {
00292 #if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
00293   // It should be relatively easy to get this to work on any modern Unix.
00294   volatile __stl_atomic_t _M_lock;
00295 #endif
00296 
00297 #if defined (_STLP_THREADS)
00298 #  if defined (_STLP_ATOMIC_EXCHANGE)
00299   inline void _M_initialize() { _M_lock = 0; }
00300   inline void _M_destroy() {}
00301 
00302   void _M_acquire_lock() {
00303     _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
00304   }
00305 
00306   inline void _M_release_lock() {
00307     volatile __stl_atomic_t* __lock = &_M_lock;
00308 #    if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
00309     asm("sync");
00310     *__lock = 0;
00311 #    elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
00312          (defined (_ABIN32) || defined(_ABI64))
00313     __lock_release(__lock);
00314 #    elif defined (_STLP_SPARC_SOLARIS_THREADS)
00315 #      if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
00316     asm("membar #StoreStore ; membar #LoadStore");
00317 #      else
00318     asm(" stbar ");
00319 #      endif
00320     *__lock = 0;
00321 #    else
00322     *__lock = 0;
00323     // This is not sufficient on many multiprocessors, since
00324     // writes to protected variables and the lock may be reordered.
00325 #    endif
00326   }
00327 #  elif defined (_STLP_PTHREADS)
00328 #    if defined (_STLP_USE_PTHREAD_SPINLOCK)
00329 #      if !defined (__OpenBSD__)
00330   pthread_spinlock_t _M_lock;
00331   inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
00332   inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
00333 
00334   // sorry, but no static initializer for pthread_spinlock_t;
00335   // this will not work for compilers that has problems with call
00336   // constructor of static object...
00337 
00338   // _STLP_mutex_base()
00339   //   { pthread_spin_init( &_M_lock, 0 ); }
00340 
00341   // ~_STLP_mutex_base()
00342   //   { pthread_spin_destroy( &_M_lock ); }
00343 
00344   inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }
00345   inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
00346 #      else // __OpenBSD__
00347   spinlock_t _M_lock;
00348   inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }
00349   inline void _M_destroy() { }
00350   inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }
00351   inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }
00352 #      endif // __OpenBSD__
00353 #    else // !_STLP_USE_PTHREAD_SPINLOCK
00354   pthread_mutex_t _M_lock;
00355   inline void _M_initialize()
00356   { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }
00357   inline void _M_destroy()
00358   { pthread_mutex_destroy(&_M_lock); }
00359   inline void _M_acquire_lock() {
00360 #      if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
00361     if (!_M_lock.field1)  _M_initialize();
00362 #      endif
00363     pthread_mutex_lock(&_M_lock);
00364   }
00365   inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
00366 #    endif // !_STLP_USE_PTHREAD_SPINLOCK
00367 
00368 #  elif defined (_STLP_UITHREADS)
00369   mutex_t _M_lock;
00370   inline void _M_initialize()
00371   { mutex_init(&_M_lock, 0, NULL); }
00372   inline void _M_destroy()
00373   { mutex_destroy(&_M_lock); }
00374   inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
00375   inline void _M_release_lock() { mutex_unlock(&_M_lock); }
00376 
00377 #  elif defined (_STLP_OS2THREADS)
00378   HMTX _M_lock;
00379   inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
00380   inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
00381   inline void _M_acquire_lock() {
00382     if (!_M_lock) _M_initialize();
00383     DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
00384   }
00385   inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
00386 #  elif defined (_STLP_BETHREADS)
00387   sem_id sem;
00388   inline void _M_initialize() {
00389     sem = create_sem(1, "STLPort");
00390     assert(sem > 0);
00391   }
00392   inline void _M_destroy() {
00393     int t = delete_sem(sem);
00394     assert(t == B_NO_ERROR);
00395   }
00396   inline void _M_acquire_lock();
00397   inline void _M_release_lock() {
00398     status_t t = release_sem(sem);
00399     assert(t == B_NO_ERROR);
00400   }
00401 #  elif defined (_STLP_NWTHREADS)
00402   LONG _M_lock;
00403   inline void _M_initialize()
00404   { _M_lock = OpenLocalSemaphore(1); }
00405   inline void _M_destroy()
00406   { CloseLocalSemaphore(_M_lock); }
00407   inline void _M_acquire_lock()
00408   { WaitOnLocalSemaphore(_M_lock); }
00409   inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); }
00410 #  else      //*ty 11/24/2001 - added configuration check
00411 #    error "Unknown thread facility configuration"
00412 #  endif
00413 #else /* No threads */
00414   inline void _M_initialize() {}
00415   inline void _M_destroy() {}
00416   inline void _M_acquire_lock() {}
00417   inline void _M_release_lock() {}
00418 #endif // _STLP_PTHREADS
00419 };
00420 
00421 // Locking class.  The constructor initializes the lock, the destructor destroys it.
00422 // Well - behaving class, does not need static initializer
00423 
00424 class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base {
00425   public:
00426     inline _STLP_mutex () { _M_initialize(); }
00427     inline ~_STLP_mutex () { _M_destroy(); }
00428   private:
00429     _STLP_mutex(const _STLP_mutex&);
00430     void operator=(const _STLP_mutex&);
00431 };
00432 
00433 // A locking class that uses _STLP_STATIC_MUTEX.  The constructor takes
00434 // a reference to an _STLP_STATIC_MUTEX, and acquires a lock.  The destructor
00435 // releases the lock.
00436 // It's not clear that this is exactly the right functionality.
00437 // It will probably change in the future.
00438 
00439 struct _STLP_CLASS_DECLSPEC _STLP_auto_lock {
00440   _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
00441   { _M_lock._M_acquire_lock(); }
00442   ~_STLP_auto_lock()
00443   { _M_lock._M_release_lock(); }
00444 
00445 private:
00446   _STLP_STATIC_MUTEX& _M_lock;
00447   void operator=(const _STLP_auto_lock&);
00448   _STLP_auto_lock(const _STLP_auto_lock&);
00449 };
00450 
00451 /*
00452  * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
00453  * _M_ref_count, and member functions _M_incr and _M_decr, which perform
00454  * atomic preincrement/predecrement.  The constructor initializes
00455  * _M_ref_count.
00456  */
00457 class _STLP_CLASS_DECLSPEC _Refcount_Base {
00458   // The data member _M_ref_count
00459 #if defined (__DMC__)
00460 public:
00461 #endif
00462   _STLP_VOLATILE __stl_atomic_t _M_ref_count;
00463 
00464 #if defined (_STLP_THREADS) && \
00465    (!defined (_STLP_ATOMIC_INCREMENT) || !defined (_STLP_ATOMIC_DECREMENT) || \
00466     (defined (_STLP_WIN32_VERSION) && (_STLP_WIN32_VERSION <= 0x0400)))
00467 #  define _STLP_USE_MUTEX
00468   _STLP_mutex _M_mutex;
00469 #endif
00470 
00471   public:
00472   // Constructor
00473   _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
00474 
00475   // _M_incr and _M_decr
00476 #if defined (_STLP_THREADS)
00477 #  if !defined (_STLP_USE_MUTEX)
00478    __stl_atomic_t _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); }
00479    __stl_atomic_t _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); }
00480 #  else
00481 #    undef _STLP_USE_MUTEX
00482   __stl_atomic_t _M_incr() {
00483     _STLP_auto_lock l(_M_mutex);
00484     return ++_M_ref_count;
00485   }
00486   __stl_atomic_t _M_decr() {
00487     _STLP_auto_lock l(_M_mutex);
00488     return --_M_ref_count;
00489   }
00490 #  endif
00491 #else  /* No threads */
00492   __stl_atomic_t _M_incr() { return ++_M_ref_count; }
00493   __stl_atomic_t _M_decr() { return --_M_ref_count; }
00494 #endif
00495 };
00496 
00497 /* Atomic swap on __stl_atomic_t
00498  * This is guaranteed to behave as though it were atomic only if all
00499  * possibly concurrent updates use _Atomic_swap.
00500  * In some cases the operation is emulated with a lock.
00501  * Idem for _Atomic_swap_ptr
00502  */
00503 /* Helper struct to handle following cases:
00504  * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
00505  *   exchange can be done on pointers
00506  * - on platform without atomic operation swap is done in a critical section,
00507  *   portable but inefficient.
00508  */
00509 template <int __use_ptr_atomic_swap>
00510 class _Atomic_swap_struct {
00511 public:
00512 #if defined (_STLP_THREADS) && \
00513     !defined (_STLP_ATOMIC_EXCHANGE) && \
00514     (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
00515      defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
00516 #  define _STLP_USE_ATOMIC_SWAP_MUTEX
00517   static _STLP_STATIC_MUTEX _S_swap_lock;
00518 #endif
00519 
00520   static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
00521 #if defined (_STLP_THREADS)
00522 #  if defined (_STLP_ATOMIC_EXCHANGE)
00523   return _STLP_ATOMIC_EXCHANGE(__p, __q);
00524 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
00525   _S_swap_lock._M_acquire_lock();
00526   __stl_atomic_t __result = *__p;
00527   *__p = __q;
00528   _S_swap_lock._M_release_lock();
00529   return __result;
00530 #  else
00531 #    error Missing atomic swap implementation
00532 #  endif
00533 #else
00534   /* no threads */
00535   __stl_atomic_t __result = *__p;
00536   *__p = __q;
00537   return __result;
00538 #endif // _STLP_THREADS
00539   }
00540 
00541   static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
00542 #if defined (_STLP_THREADS)
00543 #  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
00544   return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
00545 #  elif defined (_STLP_ATOMIC_EXCHANGE)
00546   _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
00547   return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
00548                                                          __REINTERPRET_CAST(__stl_atomic_t, __q))
00549                             );
00550 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
00551   _S_swap_lock._M_acquire_lock();
00552   void *__result = *__p;
00553   *__p = __q;
00554   _S_swap_lock._M_release_lock();
00555   return __result;
00556 #  else
00557 #    error Missing pointer atomic swap implementation
00558 #  endif
00559 #else
00560   /* no thread */
00561   void *__result = *__p;
00562   *__p = __q;
00563   return __result;
00564 #endif
00565   }
00566 };
00567 
00568 _STLP_TEMPLATE_NULL
00569 class _Atomic_swap_struct<0> {
00570 public:
00571 #if defined (_STLP_THREADS) && \
00572     (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
00573     (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
00574      defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
00575 #  define _STLP_USE_ATOMIC_SWAP_MUTEX
00576   static _STLP_STATIC_MUTEX _S_swap_lock;
00577 #endif
00578 
00579   static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
00580 #if defined (_STLP_THREADS)
00581 #  if defined (_STLP_ATOMIC_EXCHANGE)
00582   return _STLP_ATOMIC_EXCHANGE(__p, __q);
00583 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
00584   /* This should be portable, but performance is expected
00585    * to be quite awful.  This really needs platform specific
00586    * code.
00587    */
00588   _S_swap_lock._M_acquire_lock();
00589   __stl_atomic_t __result = *__p;
00590   *__p = __q;
00591   _S_swap_lock._M_release_lock();
00592   return __result;
00593 #  else
00594 #    error Missing atomic swap implementation
00595 #  endif
00596 #else
00597   /* no threads */
00598   __stl_atomic_t __result = *__p;
00599   *__p = __q;
00600   return __result;
00601 #endif // _STLP_THREADS
00602   }
00603 
00604   static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
00605 #if defined (_STLP_THREADS)
00606 #  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
00607   return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
00608 #  elif defined (_STLP_ATOMIC_EXCHANGE)
00609   _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
00610   return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
00611                                                          __REINTERPRET_CAST(__stl_atomic_t, __q))
00612                             );
00613 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
00614   _S_swap_lock._M_acquire_lock();
00615   void *__result = *__p;
00616   *__p = __q;
00617   _S_swap_lock._M_release_lock();
00618   return __result;
00619 #  else
00620 #    error Missing pointer atomic swap implementation
00621 #  endif
00622 #else
00623   /* no thread */
00624   void *__result = *__p;
00625   *__p = __q;
00626   return __result;
00627 #endif
00628   }
00629 };
00630 
00631 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
00632 #  pragma warning (push)
00633 #  pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
00634 #endif
00635 
00636 inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) {
00637   const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
00638   return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q);
00639 }
00640 
00641 inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
00642   const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
00643   return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q);
00644 }
00645 
00646 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
00647 #  pragma warning (pop)
00648 #endif
00649 
00650 #if defined (_STLP_BETHREADS)
00651 template <int __inst>
00652 struct _STLP_beos_static_lock_data {
00653   static bool is_init;
00654   struct mutex_t : public _STLP_mutex {
00655     mutex_t()
00656     { _STLP_beos_static_lock_data<0>::is_init = true; }
00657     ~mutex_t()
00658     { _STLP_beos_static_lock_data<0>::is_init = false; }
00659   };
00660   static mutex_t mut;
00661 };
00662 
00663 template <int __inst>
00664 bool _STLP_beos_static_lock_data<__inst>::is_init = false;
00665 template <int __inst>
00666 typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
00667 
00668 inline void _STLP_mutex_base::_M_acquire_lock() {
00669   if (sem == 0) {
00670     // we need to initialise on demand here
00671     // to prevent race conditions use our global
00672     // mutex if it's available:
00673     if (_STLP_beos_static_lock_data<0>::is_init) {
00674       _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
00675       if (sem == 0) _M_initialize();
00676     }
00677     else {
00678       // no lock available, we must still be
00679       // in startup code, THERE MUST BE ONE THREAD
00680       // ONLY active at this point.
00681       _M_initialize();
00682     }
00683   }
00684   status_t t;
00685   t = acquire_sem(sem);
00686   assert(t == B_NO_ERROR);
00687 }
00688 #endif
00689 
00690 _STLP_END_NAMESPACE
00691 
00692 #if !defined (_STLP_LINK_TIME_INSTANTIATION)
00693 #  include <stl/_threads.c>
00694 #endif
00695 
00696 #endif /* _STLP_INTERNAL_THREADS_H */
00697 
00698 // Local Variables:
00699 // mode:C++
00700 // End:



Generated on Mon Mar 10 15:32:41 2008 by  doxygen 1.5.1