libstdc++
mutex
Go to the documentation of this file.
1 // <mutex> -*- C++ -*-
2 
3 // Copyright (C) 2003-2013 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /** @file include/mutex
26  * This is a Standard C++ Library header.
27  */
28 
29 #ifndef _GLIBCXX_MUTEX
30 #define _GLIBCXX_MUTEX 1
31 
32 #pragma GCC system_header
33 
34 #if __cplusplus < 201103L
35 # include <bits/c++0x_warning.h>
36 #else
37 
38 #include <tuple>
39 #include <chrono>
40 #include <exception>
41 #include <type_traits>
42 #include <functional>
43 #include <system_error>
44 #include <bits/functexcept.h>
45 #include <bits/gthr.h>
46 #include <bits/move.h> // for std::swap
47 
48 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
49 
50 namespace std _GLIBCXX_VISIBILITY(default)
51 {
52 _GLIBCXX_BEGIN_NAMESPACE_VERSION
53 
54  // Common base class for std::mutex and std::timed_mutex
55  class __mutex_base
56  {
57  protected:
58  typedef __gthread_mutex_t __native_type;
59 
60 #ifdef __GTHREAD_MUTEX_INIT
61  __native_type _M_mutex = __GTHREAD_MUTEX_INIT;
62 
63  constexpr __mutex_base() noexcept = default;
64 #else
65  __native_type _M_mutex;
66 
67  __mutex_base() noexcept
68  {
69  // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
70  __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
71  }
72 
73  ~__mutex_base() noexcept { __gthread_mutex_destroy(&_M_mutex); }
74 #endif
75 
76  __mutex_base(const __mutex_base&) = delete;
77  __mutex_base& operator=(const __mutex_base&) = delete;
78  };
79 
80  // Common base class for std::recursive_mutex and std::timed_recursive_mutex
81  class __recursive_mutex_base
82  {
83  protected:
84  typedef __gthread_recursive_mutex_t __native_type;
85 
86  __recursive_mutex_base(const __recursive_mutex_base&) = delete;
87  __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
88 
89 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
90  __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
91 
92  __recursive_mutex_base() = default;
93 #else
94  __native_type _M_mutex;
95 
96  __recursive_mutex_base()
97  {
98  // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
99  __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
100  }
101 
102  ~__recursive_mutex_base()
103  { __gthread_recursive_mutex_destroy(&_M_mutex); }
104 #endif
105  };
106 
107  /**
108  * @defgroup mutexes Mutexes
109  * @ingroup concurrency
110  *
111  * Classes for mutex support.
112  * @{
113  */
114 
115  /// mutex
116  class mutex : private __mutex_base
117  {
118  public:
119  typedef __native_type* native_handle_type;
120 
121 #ifdef __GTHREAD_MUTEX_INIT
122  constexpr
123 #endif
124  mutex() noexcept = default;
125  ~mutex() = default;
126 
127  mutex(const mutex&) = delete;
128  mutex& operator=(const mutex&) = delete;
129 
130  void
131  lock()
132  {
133  int __e = __gthread_mutex_lock(&_M_mutex);
134 
135  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
136  if (__e)
137  __throw_system_error(__e);
138  }
139 
140  bool
141  try_lock() noexcept
142  {
143  // XXX EINVAL, EAGAIN, EBUSY
144  return !__gthread_mutex_trylock(&_M_mutex);
145  }
146 
147  void
148  unlock()
149  {
150  // XXX EINVAL, EAGAIN, EPERM
151  __gthread_mutex_unlock(&_M_mutex);
152  }
153 
154  native_handle_type
155  native_handle()
156  { return &_M_mutex; }
157  };
158 
159  /// recursive_mutex
160  class recursive_mutex : private __recursive_mutex_base
161  {
162  public:
163  typedef __native_type* native_handle_type;
164 
165  recursive_mutex() = default;
166  ~recursive_mutex() = default;
167 
168  recursive_mutex(const recursive_mutex&) = delete;
169  recursive_mutex& operator=(const recursive_mutex&) = delete;
170 
171  void
172  lock()
173  {
174  int __e = __gthread_recursive_mutex_lock(&_M_mutex);
175 
176  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
177  if (__e)
178  __throw_system_error(__e);
179  }
180 
181  bool
182  try_lock() noexcept
183  {
184  // XXX EINVAL, EAGAIN, EBUSY
185  return !__gthread_recursive_mutex_trylock(&_M_mutex);
186  }
187 
188  void
189  unlock()
190  {
191  // XXX EINVAL, EAGAIN, EBUSY
192  __gthread_recursive_mutex_unlock(&_M_mutex);
193  }
194 
195  native_handle_type
196  native_handle()
197  { return &_M_mutex; }
198  };
199 
200 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
201  /// timed_mutex
202  class timed_mutex : private __mutex_base
203  {
204 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
205  typedef chrono::steady_clock __clock_t;
206 #else
207  typedef chrono::high_resolution_clock __clock_t;
208 #endif
209 
210  public:
211  typedef __native_type* native_handle_type;
212 
213  timed_mutex() = default;
214  ~timed_mutex() = default;
215 
216  timed_mutex(const timed_mutex&) = delete;
217  timed_mutex& operator=(const timed_mutex&) = delete;
218 
219  void
220  lock()
221  {
222  int __e = __gthread_mutex_lock(&_M_mutex);
223 
224  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
225  if (__e)
226  __throw_system_error(__e);
227  }
228 
229  bool
230  try_lock() noexcept
231  {
232  // XXX EINVAL, EAGAIN, EBUSY
233  return !__gthread_mutex_trylock(&_M_mutex);
234  }
235 
236  template <class _Rep, class _Period>
237  bool
238  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
239  { return __try_lock_for_impl(__rtime); }
240 
241  template <class _Clock, class _Duration>
242  bool
243  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
244  {
245  chrono::time_point<_Clock, chrono::seconds> __s =
247 
248  chrono::nanoseconds __ns =
250 
251  __gthread_time_t __ts = {
252  static_cast<std::time_t>(__s.time_since_epoch().count()),
253  static_cast<long>(__ns.count())
254  };
255 
256  return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
257  }
258 
259  void
260  unlock()
261  {
262  // XXX EINVAL, EAGAIN, EBUSY
263  __gthread_mutex_unlock(&_M_mutex);
264  }
265 
266  native_handle_type
267  native_handle()
268  { return &_M_mutex; }
269 
270  private:
271  template<typename _Rep, typename _Period>
272  typename enable_if<
273  ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
274  __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
275  {
276  __clock_t::time_point __atime = __clock_t::now()
277  + chrono::duration_cast<__clock_t::duration>(__rtime);
278 
279  return try_lock_until(__atime);
280  }
281 
282  template <typename _Rep, typename _Period>
283  typename enable_if<
284  !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
285  __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
286  {
287  __clock_t::time_point __atime = __clock_t::now()
288  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
289 
290  return try_lock_until(__atime);
291  }
292  };
293 
294  /// recursive_timed_mutex
295  class recursive_timed_mutex : private __recursive_mutex_base
296  {
297 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
298  typedef chrono::steady_clock __clock_t;
299 #else
300  typedef chrono::high_resolution_clock __clock_t;
301 #endif
302 
303  public:
304  typedef __native_type* native_handle_type;
305 
306  recursive_timed_mutex() = default;
307  ~recursive_timed_mutex() = default;
308 
309  recursive_timed_mutex(const recursive_timed_mutex&) = delete;
310  recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
311 
312  void
313  lock()
314  {
315  int __e = __gthread_recursive_mutex_lock(&_M_mutex);
316 
317  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
318  if (__e)
319  __throw_system_error(__e);
320  }
321 
322  bool
323  try_lock() noexcept
324  {
325  // XXX EINVAL, EAGAIN, EBUSY
326  return !__gthread_recursive_mutex_trylock(&_M_mutex);
327  }
328 
329  template <class _Rep, class _Period>
330  bool
331  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
332  { return __try_lock_for_impl(__rtime); }
333 
334  template <class _Clock, class _Duration>
335  bool
336  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
337  {
338  chrono::time_point<_Clock, chrono::seconds> __s =
340 
341  chrono::nanoseconds __ns =
343 
344  __gthread_time_t __ts = {
345  static_cast<std::time_t>(__s.time_since_epoch().count()),
346  static_cast<long>(__ns.count())
347  };
348 
349  return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
350  }
351 
352  void
353  unlock()
354  {
355  // XXX EINVAL, EAGAIN, EBUSY
356  __gthread_recursive_mutex_unlock(&_M_mutex);
357  }
358 
359  native_handle_type
360  native_handle()
361  { return &_M_mutex; }
362 
363  private:
364  template<typename _Rep, typename _Period>
365  typename enable_if<
366  ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
367  __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
368  {
369  __clock_t::time_point __atime = __clock_t::now()
370  + chrono::duration_cast<__clock_t::duration>(__rtime);
371 
372  return try_lock_until(__atime);
373  }
374 
375  template <typename _Rep, typename _Period>
376  typename enable_if<
377  !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
378  __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
379  {
380  __clock_t::time_point __atime = __clock_t::now()
381  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
382 
383  return try_lock_until(__atime);
384  }
385  };
386 #endif
387 
388  /// Do not acquire ownership of the mutex.
389  struct defer_lock_t { };
390 
391  /// Try to acquire ownership of the mutex without blocking.
392  struct try_to_lock_t { };
393 
394  /// Assume the calling thread has already obtained mutex ownership
395  /// and manage it.
396  struct adopt_lock_t { };
397 
398  constexpr defer_lock_t defer_lock { };
399  constexpr try_to_lock_t try_to_lock { };
400  constexpr adopt_lock_t adopt_lock { };
401 
402  /// @brief Scoped lock idiom.
403  // Acquire the mutex here with a constructor call, then release with
404  // the destructor call in accordance with RAII style.
405  template<typename _Mutex>
407  {
408  public:
409  typedef _Mutex mutex_type;
410 
411  explicit lock_guard(mutex_type& __m) : _M_device(__m)
412  { _M_device.lock(); }
413 
414  lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
415  { } // calling thread owns mutex
416 
417  ~lock_guard()
418  { _M_device.unlock(); }
419 
420  lock_guard(const lock_guard&) = delete;
421  lock_guard& operator=(const lock_guard&) = delete;
422 
423  private:
424  mutex_type& _M_device;
425  };
426 
427  /// unique_lock
428  template<typename _Mutex>
430  {
431  public:
432  typedef _Mutex mutex_type;
433 
434  unique_lock() noexcept
435  : _M_device(0), _M_owns(false)
436  { }
437 
438  explicit unique_lock(mutex_type& __m)
439  : _M_device(&__m), _M_owns(false)
440  {
441  lock();
442  _M_owns = true;
443  }
444 
445  unique_lock(mutex_type& __m, defer_lock_t) noexcept
446  : _M_device(&__m), _M_owns(false)
447  { }
448 
449  unique_lock(mutex_type& __m, try_to_lock_t)
450  : _M_device(&__m), _M_owns(_M_device->try_lock())
451  { }
452 
453  unique_lock(mutex_type& __m, adopt_lock_t)
454  : _M_device(&__m), _M_owns(true)
455  {
456  // XXX calling thread owns mutex
457  }
458 
459  template<typename _Clock, typename _Duration>
460  unique_lock(mutex_type& __m,
462  : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
463  { }
464 
465  template<typename _Rep, typename _Period>
466  unique_lock(mutex_type& __m,
467  const chrono::duration<_Rep, _Period>& __rtime)
468  : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
469  { }
470 
471  ~unique_lock()
472  {
473  if (_M_owns)
474  unlock();
475  }
476 
477  unique_lock(const unique_lock&) = delete;
478  unique_lock& operator=(const unique_lock&) = delete;
479 
480  unique_lock(unique_lock&& __u) noexcept
481  : _M_device(__u._M_device), _M_owns(__u._M_owns)
482  {
483  __u._M_device = 0;
484  __u._M_owns = false;
485  }
486 
487  unique_lock& operator=(unique_lock&& __u) noexcept
488  {
489  if(_M_owns)
490  unlock();
491 
492  unique_lock(std::move(__u)).swap(*this);
493 
494  __u._M_device = 0;
495  __u._M_owns = false;
496 
497  return *this;
498  }
499 
500  void
501  lock()
502  {
503  if (!_M_device)
504  __throw_system_error(int(errc::operation_not_permitted));
505  else if (_M_owns)
506  __throw_system_error(int(errc::resource_deadlock_would_occur));
507  else
508  {
509  _M_device->lock();
510  _M_owns = true;
511  }
512  }
513 
514  bool
515  try_lock()
516  {
517  if (!_M_device)
518  __throw_system_error(int(errc::operation_not_permitted));
519  else if (_M_owns)
520  __throw_system_error(int(errc::resource_deadlock_would_occur));
521  else
522  {
523  _M_owns = _M_device->try_lock();
524  return _M_owns;
525  }
526  }
527 
528  template<typename _Clock, typename _Duration>
529  bool
530  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
531  {
532  if (!_M_device)
533  __throw_system_error(int(errc::operation_not_permitted));
534  else if (_M_owns)
535  __throw_system_error(int(errc::resource_deadlock_would_occur));
536  else
537  {
538  _M_owns = _M_device->try_lock_until(__atime);
539  return _M_owns;
540  }
541  }
542 
543  template<typename _Rep, typename _Period>
544  bool
545  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
546  {
547  if (!_M_device)
548  __throw_system_error(int(errc::operation_not_permitted));
549  else if (_M_owns)
550  __throw_system_error(int(errc::resource_deadlock_would_occur));
551  else
552  {
553  _M_owns = _M_device->try_lock_for(__rtime);
554  return _M_owns;
555  }
556  }
557 
558  void
559  unlock()
560  {
561  if (!_M_owns)
562  __throw_system_error(int(errc::operation_not_permitted));
563  else if (_M_device)
564  {
565  _M_device->unlock();
566  _M_owns = false;
567  }
568  }
569 
570  void
571  swap(unique_lock& __u) noexcept
572  {
573  std::swap(_M_device, __u._M_device);
574  std::swap(_M_owns, __u._M_owns);
575  }
576 
577  mutex_type*
578  release() noexcept
579  {
580  mutex_type* __ret = _M_device;
581  _M_device = 0;
582  _M_owns = false;
583  return __ret;
584  }
585 
586  bool
587  owns_lock() const noexcept
588  { return _M_owns; }
589 
590  explicit operator bool() const noexcept
591  { return owns_lock(); }
592 
593  mutex_type*
594  mutex() const noexcept
595  { return _M_device; }
596 
597  private:
598  mutex_type* _M_device;
599  bool _M_owns; // XXX use atomic_bool
600  };
601 
602  /// Partial specialization for unique_lock objects.
603  template<typename _Mutex>
604  inline void
605  swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
606  { __x.swap(__y); }
607 
608  template<int _Idx>
609  struct __unlock_impl
610  {
611  template<typename... _Lock>
612  static void
613  __do_unlock(tuple<_Lock&...>& __locks)
614  {
615  std::get<_Idx>(__locks).unlock();
616  __unlock_impl<_Idx - 1>::__do_unlock(__locks);
617  }
618  };
619 
620  template<>
621  struct __unlock_impl<-1>
622  {
623  template<typename... _Lock>
624  static void
625  __do_unlock(tuple<_Lock&...>&)
626  { }
627  };
628 
629  template<typename _Lock>
630  unique_lock<_Lock>
631  __try_to_lock(_Lock& __l)
632  { return unique_lock<_Lock>(__l, try_to_lock); }
633 
634  template<int _Idx, bool _Continue = true>
635  struct __try_lock_impl
636  {
637  template<typename... _Lock>
638  static void
639  __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
640  {
641  __idx = _Idx;
642  auto __lock = __try_to_lock(std::get<_Idx>(__locks));
643  if (__lock.owns_lock())
644  {
645  __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
646  __do_try_lock(__locks, __idx);
647  if (__idx == -1)
648  __lock.release();
649  }
650  }
651  };
652 
653  template<int _Idx>
654  struct __try_lock_impl<_Idx, false>
655  {
656  template<typename... _Lock>
657  static void
658  __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
659  {
660  __idx = _Idx;
661  auto __lock = __try_to_lock(std::get<_Idx>(__locks));
662  if (__lock.owns_lock())
663  {
664  __idx = -1;
665  __lock.release();
666  }
667  }
668  };
669 
670  /** @brief Generic try_lock.
671  * @param __l1 Meets Mutex requirements (try_lock() may throw).
672  * @param __l2 Meets Mutex requirements (try_lock() may throw).
673  * @param __l3 Meets Mutex requirements (try_lock() may throw).
674  * @return Returns -1 if all try_lock() calls return true. Otherwise returns
675  * a 0-based index corresponding to the argument that returned false.
676  * @post Either all arguments are locked, or none will be.
677  *
678  * Sequentially calls try_lock() on each argument.
679  */
680  template<typename _Lock1, typename _Lock2, typename... _Lock3>
681  int
682  try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
683  {
684  int __idx;
685  auto __locks = std::tie(__l1, __l2, __l3...);
686  __try
687  { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
688  __catch(...)
689  { }
690  return __idx;
691  }
692 
693  /** @brief Generic lock.
694  * @param __l1 Meets Mutex requirements (try_lock() may throw).
695  * @param __l2 Meets Mutex requirements (try_lock() may throw).
696  * @param __l3 Meets Mutex requirements (try_lock() may throw).
697  * @throw An exception thrown by an argument's lock() or try_lock() member.
698  * @post All arguments are locked.
699  *
700  * All arguments are locked via a sequence of calls to lock(), try_lock()
701  * and unlock(). If the call exits via an exception any locks that were
702  * obtained will be released.
703  */
704  template<typename _L1, typename _L2, typename ..._L3>
705  void
706  lock(_L1& __l1, _L2& __l2, _L3&... __l3)
707  {
708  while (true)
709  {
710  unique_lock<_L1> __first(__l1);
711  int __idx;
712  auto __locks = std::tie(__l2, __l3...);
713  __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
714  if (__idx == -1)
715  {
716  __first.release();
717  return;
718  }
719  }
720  }
721 
722  /// once_flag
723  struct once_flag
724  {
725  private:
726  typedef __gthread_once_t __native_type;
727  __native_type _M_once = __GTHREAD_ONCE_INIT;
728 
729  public:
730  /// Constructor
731  constexpr once_flag() noexcept = default;
732 
733  /// Deleted copy constructor
734  once_flag(const once_flag&) = delete;
735  /// Deleted assignment operator
736  once_flag& operator=(const once_flag&) = delete;
737 
738  template<typename _Callable, typename... _Args>
739  friend void
740  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
741  };
742 
743 #ifdef _GLIBCXX_HAVE_TLS
744  extern __thread void* __once_callable;
745  extern __thread void (*__once_call)();
746 
747  template<typename _Callable>
748  inline void
749  __once_call_impl()
750  {
751  (*(_Callable*)__once_callable)();
752  }
753 #else
754  extern function<void()> __once_functor;
755 
756  extern void
757  __set_once_functor_lock_ptr(unique_lock<mutex>*);
758 
759  extern mutex&
760  __get_once_mutex();
761 #endif
762 
763  extern "C" void __once_proxy(void);
764 
765  /// call_once
766  template<typename _Callable, typename... _Args>
767  void
768  call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
769  {
770 #ifdef _GLIBCXX_HAVE_TLS
771  auto __bound_functor = std::__bind_simple(std::forward<_Callable>(__f),
772  std::forward<_Args>(__args)...);
773  __once_callable = &__bound_functor;
774  __once_call = &__once_call_impl<decltype(__bound_functor)>;
775 #else
776  unique_lock<mutex> __functor_lock(__get_once_mutex());
777  auto __callable = std::__bind_simple(std::forward<_Callable>(__f),
778  std::forward<_Args>(__args)...);
779  __once_functor = [&]() { __callable(); };
780  __set_once_functor_lock_ptr(&__functor_lock);
781 #endif
782 
783  int __e = __gthread_once(&(__once._M_once), &__once_proxy);
784 
785 #ifndef _GLIBCXX_HAVE_TLS
786  if (__functor_lock)
787  __set_once_functor_lock_ptr(0);
788 #endif
789 
790  if (__e)
791  __throw_system_error(__e);
792  }
793 
794  // @} group mutexes
795 _GLIBCXX_END_NAMESPACE_VERSION
796 } // namespace
797 
798 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
799 
800 #endif // C++11
801 
802 #endif // _GLIBCXX_MUTEX