ops_gcc_atomic.hpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. /*
  2. * Distributed under the Boost Software License, Version 1.0.
  3. * (See accompanying file LICENSE_1_0.txt or copy at
  4. * http://www.boost.org/LICENSE_1_0.txt)
  5. *
  6. * Copyright (c) 2014 Andrey Semashev
  7. */
  8. /*!
  9. * \file atomic/detail/ops_gcc_atomic.hpp
  10. *
  11. * This header contains implementation of the \c operations template.
  12. */
  13. #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
  14. #define BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_
  15. #include <cstddef>
  16. #include <boost/memory_order.hpp>
  17. #include <boost/atomic/detail/config.hpp>
  18. #include <boost/atomic/detail/storage_traits.hpp>
  19. #include <boost/atomic/detail/operations_fwd.hpp>
  20. #include <boost/atomic/capabilities.hpp>
  21. #if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && (defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B) || defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B))
  22. #include <boost/atomic/detail/ops_gcc_x86_dcas.hpp>
  23. #include <boost/atomic/detail/ops_cas_based.hpp>
  24. #endif
  25. #if __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE ||\
  26. __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE || __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE ||\
  27. __GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE || __GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE ||\
  28. __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE
  29. // There are platforms where we need to use larger storage types
  30. #include <boost/atomic/detail/int_sizes.hpp>
  31. #include <boost/atomic/detail/ops_extending_cas_based.hpp>
  32. #endif
  33. #ifdef BOOST_HAS_PRAGMA_ONCE
  34. #pragma once
  35. #endif
  36. #if defined(__INTEL_COMPILER)
  37. // This is used to suppress warning #32013 described below for Intel Compiler.
  38. // In debug builds the compiler does not inline any functions, so basically
  39. // every atomic function call results in this warning. I don't know any other
  40. // way to selectively disable just this one warning.
  41. #pragma system_header
  42. #endif
  43. namespace boost {
  44. namespace atomics {
  45. namespace detail {
  46. /*!
  47. * The function converts \c boost::memory_order values to the compiler-specific constants.
  48. *
  49. * NOTE: The intention is that the function is optimized away by the compiler, and the
  50. * compiler-specific constants are passed to the intrinsics. Unfortunately, constexpr doesn't
  51. * work in this case because the standard atomics interface require memory ordering
  52. * constants to be passed as function arguments, at which point they stop being constexpr.
  53. * However, it is crucial that the compiler sees constants and not runtime values,
  54. * because otherwise it just ignores the ordering value and always uses seq_cst.
  55. * This is the case with Intel C++ Compiler 14.0.3 (Composer XE 2013 SP1, update 3) and
  56. * gcc 4.8.2. Intel Compiler issues a warning in this case:
  57. *
  58. * warning #32013: Invalid memory order specified. Defaulting to seq_cst memory order.
  59. *
  60. * while gcc acts silently.
  61. *
  62. * To mitigate the problem ALL functions, including the atomic<> members must be
  63. * declared with BOOST_FORCEINLINE. In this case the compilers are able to see that
  64. * all functions are called with constant orderings and call intrinstcts properly.
  65. *
  66. * Unfortunately, this still doesn't work in debug mode as the compiler doesn't
  67. * propagate constants even when functions are marked with BOOST_FORCEINLINE. In this case
  68. * all atomic operaions will be executed with seq_cst semantics.
  69. */
  70. BOOST_FORCEINLINE BOOST_CONSTEXPR int convert_memory_order_to_gcc(memory_order order) BOOST_NOEXCEPT
  71. {
  72. return (order == memory_order_relaxed ? __ATOMIC_RELAXED : (order == memory_order_consume ? __ATOMIC_CONSUME :
  73. (order == memory_order_acquire ? __ATOMIC_ACQUIRE : (order == memory_order_release ? __ATOMIC_RELEASE :
  74. (order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_SEQ_CST)))));
  75. }
  76. template< std::size_t Size, bool Signed >
  77. struct gcc_atomic_operations
  78. {
  79. typedef typename storage_traits< Size >::type storage_type;
  80. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = Size;
  81. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = storage_traits< Size >::alignment;
  82. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  83. static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
  84. // Note: In the current implementation, gcc_atomic_operations are used only when the particularly sized __atomic
  85. // intrinsics are always lock-free (i.e. the corresponding LOCK_FREE macro is 2). Therefore it is safe to
  86. // always set is_always_lock_free to true here.
  87. static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
  88. static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  89. {
  90. __atomic_store_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  91. }
  92. static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
  93. {
  94. return __atomic_load_n(&storage, atomics::detail::convert_memory_order_to_gcc(order));
  95. }
  96. static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  97. {
  98. return __atomic_fetch_add(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  99. }
  100. static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  101. {
  102. return __atomic_fetch_sub(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  103. }
  104. static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  105. {
  106. return __atomic_exchange_n(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  107. }
  108. static BOOST_FORCEINLINE bool compare_exchange_strong(
  109. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
  110. {
  111. return __atomic_compare_exchange_n
  112. (
  113. &storage, &expected, desired, false,
  114. atomics::detail::convert_memory_order_to_gcc(success_order),
  115. atomics::detail::convert_memory_order_to_gcc(failure_order)
  116. );
  117. }
  118. static BOOST_FORCEINLINE bool compare_exchange_weak(
  119. storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
  120. {
  121. return __atomic_compare_exchange_n
  122. (
  123. &storage, &expected, desired, true,
  124. atomics::detail::convert_memory_order_to_gcc(success_order),
  125. atomics::detail::convert_memory_order_to_gcc(failure_order)
  126. );
  127. }
  128. static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  129. {
  130. return __atomic_fetch_and(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  131. }
  132. static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  133. {
  134. return __atomic_fetch_or(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  135. }
  136. static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
  137. {
  138. return __atomic_fetch_xor(&storage, v, atomics::detail::convert_memory_order_to_gcc(order));
  139. }
  140. static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
  141. {
  142. return __atomic_test_and_set(&storage, atomics::detail::convert_memory_order_to_gcc(order));
  143. }
  144. static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
  145. {
  146. __atomic_clear(const_cast< storage_type* >(&storage), atomics::detail::convert_memory_order_to_gcc(order));
  147. }
  148. };
  149. #if BOOST_ATOMIC_INT128_LOCK_FREE > 0
  150. #if (defined(__clang__) || (defined(BOOST_GCC) && (BOOST_GCC+0) >= 70000)) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG16B)
  151. // Workaround for clang bug: http://llvm.org/bugs/show_bug.cgi?id=19149
  152. // Clang 3.4 does not implement 128-bit __atomic* intrinsics even though it defines __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
  153. // A similar problem exists with gcc 7 as well, as it requires to link with libatomic to use 16-byte intrinsics:
  154. // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80878
  155. template< bool Signed >
  156. struct operations< 16u, Signed > :
  157. public cas_based_operations< gcc_dcas_x86_64< Signed > >
  158. {
  159. };
  160. #else
  161. template< bool Signed >
  162. struct operations< 16u, Signed > :
  163. public gcc_atomic_operations< 16u, Signed >
  164. {
  165. };
  166. #endif
  167. #endif
  168. #if BOOST_ATOMIC_INT64_LOCK_FREE > 0
  169. #if defined(__clang__) && defined(BOOST_ATOMIC_DETAIL_X86_HAS_CMPXCHG8B)
  170. // Workaround for clang bug http://llvm.org/bugs/show_bug.cgi?id=19355
  171. template< bool Signed >
  172. struct operations< 8u, Signed > :
  173. public cas_based_operations< gcc_dcas_x86< Signed > >
  174. {
  175. static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
  176. static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
  177. };
  178. #elif (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 8 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
  179. (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 8 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
  180. (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 8 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
  181. (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 8 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
  182. (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 8 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
  183. #define BOOST_ATOMIC_DETAIL_INT64_EXTENDED
  184. template< bool Signed >
  185. struct operations< 8u, Signed > :
  186. public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 8u, Signed >
  187. {
  188. };
  189. #else
  190. template< bool Signed >
  191. struct operations< 8u, Signed > :
  192. public gcc_atomic_operations< 8u, Signed >
  193. {
  194. };
  195. #endif
  196. #endif
  197. #if BOOST_ATOMIC_INT32_LOCK_FREE > 0
  198. #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 4 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
  199. (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 4 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
  200. (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 4 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
  201. (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 4 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
  202. (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 4 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
  203. #define BOOST_ATOMIC_DETAIL_INT32_EXTENDED
  204. #if !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  205. template< bool Signed >
  206. struct operations< 4u, Signed > :
  207. public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 4u, Signed >
  208. {
  209. };
  210. #else // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  211. template< bool Signed >
  212. struct operations< 4u, Signed > :
  213. public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 4u, Signed >
  214. {
  215. };
  216. #endif // !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  217. #else
  218. template< bool Signed >
  219. struct operations< 4u, Signed > :
  220. public gcc_atomic_operations< 4u, Signed >
  221. {
  222. };
  223. #endif
  224. #endif
  225. #if BOOST_ATOMIC_INT16_LOCK_FREE > 0
  226. #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 2 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
  227. (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 2 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
  228. (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 2 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
  229. (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 2 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
  230. (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 2 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE)
  231. #define BOOST_ATOMIC_DETAIL_INT16_EXTENDED
  232. #if !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
  233. template< bool Signed >
  234. struct operations< 2u, Signed > :
  235. public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 2u, Signed >
  236. {
  237. };
  238. #elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  239. template< bool Signed >
  240. struct operations< 2u, Signed > :
  241. public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 2u, Signed >
  242. {
  243. };
  244. #else
  245. template< bool Signed >
  246. struct operations< 2u, Signed > :
  247. public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 2u, Signed >
  248. {
  249. };
  250. #endif
  251. #else
  252. template< bool Signed >
  253. struct operations< 2u, Signed > :
  254. public gcc_atomic_operations< 2u, Signed >
  255. {
  256. };
  257. #endif
  258. #endif
  259. #if BOOST_ATOMIC_INT8_LOCK_FREE > 0
  260. #if (BOOST_ATOMIC_DETAIL_SIZEOF_LLONG == 1 && __GCC_ATOMIC_LLONG_LOCK_FREE != BOOST_ATOMIC_LLONG_LOCK_FREE) ||\
  261. (BOOST_ATOMIC_DETAIL_SIZEOF_LONG == 1 && __GCC_ATOMIC_LONG_LOCK_FREE != BOOST_ATOMIC_LONG_LOCK_FREE) ||\
  262. (BOOST_ATOMIC_DETAIL_SIZEOF_INT == 1 && __GCC_ATOMIC_INT_LOCK_FREE != BOOST_ATOMIC_INT_LOCK_FREE) ||\
  263. (BOOST_ATOMIC_DETAIL_SIZEOF_SHORT == 1 && __GCC_ATOMIC_SHORT_LOCK_FREE != BOOST_ATOMIC_SHORT_LOCK_FREE) ||\
  264. (BOOST_ATOMIC_DETAIL_SIZEOF_WCHAR_T == 1 && __GCC_ATOMIC_WCHAR_T_LOCK_FREE != BOOST_ATOMIC_WCHAR_T_LOCK_FREE) ||\
  265. (__GCC_ATOMIC_CHAR_LOCK_FREE != BOOST_ATOMIC_CHAR_LOCK_FREE) ||\
  266. (__GCC_ATOMIC_BOOL_LOCK_FREE != BOOST_ATOMIC_BOOL_LOCK_FREE)
  267. #if !defined(BOOST_ATOMIC_DETAIL_INT16_EXTENDED)
  268. template< bool Signed >
  269. struct operations< 1u, Signed > :
  270. public extending_cas_based_operations< gcc_atomic_operations< 2u, Signed >, 1u, Signed >
  271. {
  272. };
  273. #elif !defined(BOOST_ATOMIC_DETAIL_INT32_EXTENDED)
  274. template< bool Signed >
  275. struct operations< 1u, Signed > :
  276. public extending_cas_based_operations< gcc_atomic_operations< 4u, Signed >, 1u, Signed >
  277. {
  278. };
  279. #elif !defined(BOOST_ATOMIC_DETAIL_INT64_EXTENDED)
  280. template< bool Signed >
  281. struct operations< 1u, Signed > :
  282. public extending_cas_based_operations< gcc_atomic_operations< 8u, Signed >, 1u, Signed >
  283. {
  284. };
  285. #else
  286. template< bool Signed >
  287. struct operations< 1u, Signed > :
  288. public extending_cas_based_operations< gcc_atomic_operations< 16u, Signed >, 1u, Signed >
  289. {
  290. };
  291. #endif
  292. #else
  293. template< bool Signed >
  294. struct operations< 1u, Signed > :
  295. public gcc_atomic_operations< 1u, Signed >
  296. {
  297. };
  298. #endif
  299. #endif
  300. #undef BOOST_ATOMIC_DETAIL_INT16_EXTENDED
  301. #undef BOOST_ATOMIC_DETAIL_INT32_EXTENDED
  302. #undef BOOST_ATOMIC_DETAIL_INT64_EXTENDED
  303. BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
  304. {
  305. __atomic_thread_fence(atomics::detail::convert_memory_order_to_gcc(order));
  306. }
  307. BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
  308. {
  309. __atomic_signal_fence(atomics::detail::convert_memory_order_to_gcc(order));
  310. }
  311. } // namespace detail
  312. } // namespace atomics
  313. } // namespace boost
  314. #endif // BOOST_ATOMIC_DETAIL_OPS_GCC_ATOMIC_HPP_INCLUDED_