[atomic] Unify typedef
Removes volatile from fallback implementation. That was handwavy
anyway.
diff --git a/src/hb-atomic-private.hh b/src/hb-atomic-private.hh
index a888c46..73dbaef 100644
--- a/src/hb-atomic-private.hh
+++ b/src/hb-atomic-private.hh
@@ -35,7 +35,12 @@
#include "hb-private.hh"
-/* atomic_int */
+/*
+ * Atomic integers and pointers.
+ */
+
+
+typedef int hb_atomic_int_impl_t;
/* We need external help for these */
@@ -43,14 +48,13 @@
&& defined(hb_atomic_ptr_impl_get) \
&& defined(hb_atomic_ptr_impl_cmpexch)
-/* Defined externally, i.e. in config.h; must have typedef'ed hb_atomic_int_impl_t as well. */
+/* Defined externally, i.e. in config.h. */
#elif !defined(HB_NO_MT) && defined(__ATOMIC_CONSUME)
/* C++11-style GCC primitives. */
-typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) __atomic_fetch_add ((AI), (V), __ATOMIC_ACQ_REL)
#define hb_atomic_int_impl_set_relaxed(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELAXED)
#define hb_atomic_int_impl_get_relaxed(AI) __atomic_load_n ((AI), __ATOMIC_RELAXED)
@@ -70,7 +74,6 @@
#include <atomic>
-typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->fetch_add ((V), std::memory_order_acq_rel))
#define hb_atomic_int_impl_set_relaxed(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V), std::memory_order_relaxed))
#define hb_atomic_int_impl_get_relaxed(AI) (reinterpret_cast<std::atomic<int> *> (AI)->load (std::memory_order_relaxed))
@@ -101,7 +104,6 @@
}
#define _hb_memory_barrier() _hb_memory_barrier ()
-typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) InterlockedExchangeAdd ((unsigned *) (AI), (V))
#define hb_atomic_ptr_impl_cmpexch(P,O,N) (InterlockedCompareExchangePointer ((void **) (P), (void *) (N), (void *) (O)) == (void *) (O))
@@ -111,7 +113,6 @@
#define _hb_memory_barrier() __sync_synchronize ()
-typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) __sync_fetch_and_add ((AI), (V))
#define hb_atomic_ptr_impl_cmpexch(P,O,N) __sync_bool_compare_and_swap ((P), (O), (N))
@@ -126,8 +127,6 @@
#define _hb_memory_w_barrier() __machine_w_barrier ()
#define _hb_memory_barrier() __machine_rw_barrier ()
-typedef int hb_atomic_int_impl_t;
-
static inline int _hb_fetch_and_add (hb_atomic_int_impl_t *AI, int V)
{
_hb_memory_w_barrier ();
@@ -159,7 +158,6 @@
#define _hb_memory_barrier() OSMemoryBarrier ()
-typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) (OSAtomicAdd32Barrier ((V), (AI)) - (V))
#if (MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_4 || __IPHONE_VERSION_MIN_REQUIRED >= 20100)
@@ -179,8 +177,6 @@
#define _hb_memory_barrier() __lwsync ()
-typedef int hb_atomic_int_impl_t;
-
static inline int _hb_fetch_and_add (hb_atomic_int_impl_t *AI, int V)
{
_hb_memory_barrier ();
@@ -208,15 +204,13 @@
#define _hb_memory_barrier()
-typedef volatile int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) ((*(AI) += (V)) - (V))
-#define hb_atomic_ptr_impl_cmpexch(P,O,N) (* (void * volatile *) (P) == (void *) (O) ? (* (void * volatile *) (P) = (void *) (N), true) : false)
+#define hb_atomic_ptr_impl_cmpexch(P,O,N) (* (void **) (P) == (void *) (O) ? (* (void **) (P) = (void *) (N), true) : false)
#else /* HB_NO_MT */
-typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) ((*(AI) += (V)) - (V))
#define _hb_memory_barrier()