[atomic-int] Add operators for relaxed ops
diff --git a/src/hb-atomic.hh b/src/hb-atomic.hh
index ebed242..14c6fb3 100644
--- a/src/hb-atomic.hh
+++ b/src/hb-atomic.hh
@@ -159,6 +159,9 @@
hb_atomic_int_t () = default;
constexpr hb_atomic_int_t (int v) : v (v) {}
+ hb_atomic_int_t& operator = (int v_) { set_relaxed (v_); return *this; }
+ operator int () const { return get_relaxed (); }
+
void set_relaxed (int v_) { hb_atomic_int_impl_set_relaxed (&v, v_); }
void set_release (int v_) { hb_atomic_int_impl_set (&v, v_); }
int get_relaxed () const { return hb_atomic_int_impl_get_relaxed (&v); }
diff --git a/src/hb-bit-set.hh b/src/hb-bit-set.hh
index 4765af6..a63887e 100644
--- a/src/hb-bit-set.hh
+++ b/src/hb-bit-set.hh
@@ -56,7 +56,7 @@
{
successful = true;
population = 0;
- last_page_lookup.set_relaxed (0);
+ last_page_lookup = 0;
page_map.init ();
pages.init ();
}
@@ -614,7 +614,7 @@
const auto* page_map_array = page_map.arrayZ;
unsigned int major = get_major (*codepoint);
- unsigned int i = last_page_lookup.get_relaxed ();
+ unsigned int i = last_page_lookup;
if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{
@@ -632,7 +632,7 @@
if (pages_array[current.index].next (codepoint))
{
*codepoint += current.major * page_t::PAGE_BITS;
- last_page_lookup.set_relaxed (i);
+ last_page_lookup = i;
return true;
}
i++;
@@ -645,11 +645,11 @@
if (m != INVALID)
{
*codepoint = current.major * page_t::PAGE_BITS + m;
- last_page_lookup.set_relaxed (i);
+ last_page_lookup = i;
return true;
}
}
- last_page_lookup.set_relaxed (0);
+ last_page_lookup = 0;
*codepoint = INVALID;
return false;
}
@@ -732,7 +732,7 @@
{
const auto* page_map_array = page_map.arrayZ;
unsigned int major = get_major (codepoint);
- unsigned int i = last_page_lookup.get_relaxed ();
+ unsigned int i = last_page_lookup;
if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{
page_map.bfind (major, &i, HB_NOT_FOUND_STORE_CLOSEST);
@@ -773,7 +773,7 @@
{
const auto* page_map_array = page_map.arrayZ;
unsigned int major = get_major (codepoint);
- unsigned int i = last_page_lookup.get_relaxed ();
+ unsigned int i = last_page_lookup;
if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{
page_map.bfind(major, &i, HB_NOT_FOUND_STORE_CLOSEST);
@@ -900,7 +900,7 @@
/* The extra page_map length is necessary; can't just rely on vector here,
* since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actualy major==0 page... */
- unsigned i = last_page_lookup.get_relaxed ();
+ unsigned i = last_page_lookup;
if (likely (i < page_map.length))
{
auto &cached_page = page_map.arrayZ[i];
@@ -924,7 +924,7 @@
page_map[i] = map;
}
- last_page_lookup.set_relaxed (i);
+ last_page_lookup = i;
return &pages[page_map[i].index];
}
const page_t *page_for (hb_codepoint_t g) const
@@ -934,7 +934,7 @@
/* The extra page_map length is necessary; can't just rely on vector here,
* since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actualy major==0 page... */
- unsigned i = last_page_lookup.get_relaxed ();
+ unsigned i = last_page_lookup;
if (likely (i < page_map.length))
{
auto &cached_page = page_map.arrayZ[i];
@@ -946,7 +946,7 @@
if (!page_map.bfind (key, &i))
return nullptr;
- last_page_lookup.set_relaxed (i);
+ last_page_lookup = i;
return &pages[page_map[i].index];
}
page_t &page_at (unsigned int i) { return pages[page_map[i].index]; }
diff --git a/src/hb-cache.hh b/src/hb-cache.hh
index d6b229e..55a8002 100644
--- a/src/hb-cache.hh
+++ b/src/hb-cache.hh
@@ -45,13 +45,13 @@
void clear ()
{
for (unsigned i = 0; i < ARRAY_LENGTH (values); i++)
- values[i].set_relaxed (-1);
+ values[i] = -1;
}
bool get (unsigned int key, unsigned int *value) const
{
unsigned int k = key & ((1u<<cache_bits)-1);
- unsigned int v = values[k].get_relaxed ();
+ unsigned int v = values[k];
if ((key_bits + value_bits - cache_bits == 8 * sizeof (hb_atomic_int_t) && v == (unsigned int) -1) ||
(v >> value_bits) != (key >> cache_bits))
return false;
@@ -65,7 +65,7 @@
return false; /* Overflows */
unsigned int k = key & ((1u<<cache_bits)-1);
unsigned int v = ((key>>cache_bits)<<value_bits) | value;
- values[k].set_relaxed (v);
+ values[k] = v;
return true;
}
diff --git a/src/hb-common.cc b/src/hb-common.cc
index e651287..bbb6cd5 100644
--- a/src/hb-common.cc
+++ b/src/hb-common.cc
@@ -99,7 +99,7 @@
}
/* This is idempotent and threadsafe. */
- _hb_options.set_relaxed (u.i);
+ _hb_options = u.i;
}
diff --git a/src/hb-debug.hh b/src/hb-debug.hh
index 905a46a..cbe13e5 100644
--- a/src/hb-debug.hh
+++ b/src/hb-debug.hh
@@ -67,12 +67,12 @@
#endif
/* Make a local copy, so we can access bitfield threadsafely. */
hb_options_union_t u;
- u.i = _hb_options.get_relaxed ();
+ u.i = _hb_options;
if (unlikely (!u.i))
{
_hb_options_init ();
- u.i = _hb_options.get_relaxed ();
+ u.i = _hb_options;
}
return u.opts;
diff --git a/src/hb-face.cc b/src/hb-face.cc
index 61adbdd..2160d6a 100644
--- a/src/hb-face.cc
+++ b/src/hb-face.cc
@@ -132,7 +132,7 @@
face->user_data = user_data;
face->destroy = destroy;
- face->num_glyphs.set_relaxed (-1);
+ face->num_glyphs = -1;
face->data.init0 (face);
face->table.init0 (face);
@@ -479,7 +479,7 @@
if (hb_object_is_immutable (face))
return;
- face->upem.set_relaxed (upem);
+ face->upem = upem;
}
/**
@@ -514,7 +514,7 @@
if (hb_object_is_immutable (face))
return;
- face->num_glyphs.set_relaxed (glyph_count);
+ face->num_glyphs = glyph_count;
}
/**
diff --git a/src/hb-face.hh b/src/hb-face.hh
index 765f272..12e10d0 100644
--- a/src/hb-face.hh
+++ b/src/hb-face.hh
@@ -83,7 +83,7 @@
unsigned int get_upem () const
{
- unsigned int ret = upem.get_relaxed ();
+ unsigned int ret = upem;
if (unlikely (!ret))
{
return load_upem ();
@@ -93,7 +93,7 @@
unsigned int get_num_glyphs () const
{
- unsigned int ret = num_glyphs.get_relaxed ();
+ unsigned int ret = num_glyphs;
if (unlikely (ret == UINT_MAX))
return load_num_glyphs ();
return ret;
diff --git a/src/hb-object.hh b/src/hb-object.hh
index aed3b12..8cbf7a8 100644
--- a/src/hb-object.hh
+++ b/src/hb-object.hh
@@ -144,14 +144,14 @@
{
mutable hb_atomic_int_t ref_count;
- void init (int v = 1) { ref_count.set_relaxed (v); }
- int get_relaxed () const { return ref_count.get_relaxed (); }
+ void init (int v = 1) { ref_count = v; }
+ int get_relaxed () const { return ref_count; }
int inc () const { return ref_count.inc (); }
int dec () const { return ref_count.dec (); }
- void fini () { ref_count.set_relaxed (-0x0000DEAD); }
+ void fini () { ref_count = -0x0000DEAD; }
- bool is_inert () const { return !ref_count.get_relaxed (); }
- bool is_valid () const { return ref_count.get_relaxed () > 0; }
+ bool is_inert () const { return !ref_count; }
+ bool is_valid () const { return ref_count > 0; }
};
@@ -233,7 +233,7 @@
static inline void hb_object_init (Type *obj)
{
obj->header.ref_count.init ();
- obj->header.writable.set_relaxed (true);
+ obj->header.writable = true;
obj->header.user_data.init ();
}
template <typename Type>
@@ -244,12 +244,12 @@
template <typename Type>
static inline bool hb_object_is_immutable (const Type *obj)
{
- return !obj->header.writable.get_relaxed ();
+ return !obj->header.writable;
}
template <typename Type>
static inline void hb_object_make_immutable (const Type *obj)
{
- obj->header.writable.set_relaxed (false);
+ obj->header.writable = false;
}
template <typename Type>
static inline Type *hb_object_reference (Type *obj)
diff --git a/src/hb-ot-shaper-indic.cc b/src/hb-ot-shaper-indic.cc
index 6eb400a..55509c1 100644
--- a/src/hb-ot-shaper-indic.cc
+++ b/src/hb-ot-shaper-indic.cc
@@ -276,7 +276,7 @@
{
bool load_virama_glyph (hb_font_t *font, hb_codepoint_t *pglyph) const
{
- hb_codepoint_t glyph = virama_glyph.get_relaxed ();
+ hb_codepoint_t glyph = virama_glyph;
if (unlikely (glyph == (hb_codepoint_t) -1))
{
if (!config->virama || !font->get_nominal_glyph (config->virama, &glyph))
@@ -286,7 +286,7 @@
/* Our get_nominal_glyph() function needs a font, so we can't get the virama glyph
* during shape planning... Instead, overwrite it here. */
- virama_glyph.set_relaxed ((int) glyph);
+ virama_glyph = (int) glyph;
}
*pglyph = glyph;
@@ -330,7 +330,7 @@
#ifndef HB_NO_UNISCRIBE_BUG_COMPATIBLE
indic_plan->uniscribe_bug_compatible = hb_options ().uniscribe_bug_compatible;
#endif
- indic_plan->virama_glyph.set_relaxed (-1);
+ indic_plan->virama_glyph = -1;
/* Use zero-context would_substitute() matching for new-spec of the main
* Indic scripts, and scripts with one spec only, but not for old-specs.
@@ -992,7 +992,7 @@
* class of I_Cat(H) is desired but has been lost. */
/* We don't call load_virama_glyph(), since we know it's already
* loaded. */
- hb_codepoint_t virama_glyph = indic_plan->virama_glyph.get_relaxed ();
+ hb_codepoint_t virama_glyph = indic_plan->virama_glyph;
if (virama_glyph)
{
for (unsigned int i = start; i < end; i++)
diff --git a/src/hb-ot-tag.cc b/src/hb-ot-tag.cc
index ceb3bf6..1b18270 100644
--- a/src/hb-ot-tag.cc
+++ b/src/hb-ot-tag.cc
@@ -307,12 +307,12 @@
hb_tag_t lang_tag = hb_tag_from_string (lang_str, first_len);
static hb_atomic_int_t last_tag_idx; /* Poor man's cache. */
- unsigned tag_idx = last_tag_idx.get_relaxed ();
+ unsigned tag_idx = last_tag_idx;
if (likely (tag_idx < ot_languages_len && ot_languages[tag_idx].language == lang_tag) ||
hb_sorted_array (ot_languages, ot_languages_len).bfind (lang_tag, &tag_idx))
{
- last_tag_idx.set_relaxed (tag_idx);
+ last_tag_idx = tag_idx;
unsigned int i;
while (tag_idx != 0 &&
ot_languages[tag_idx].language == ot_languages[tag_idx - 1].language)
diff --git a/src/hb-static.cc b/src/hb-static.cc
index af95615..5d4c7cd 100644
--- a/src/hb-static.cc
+++ b/src/hb-static.cc
@@ -94,7 +94,7 @@
ret = hb_max (ret, load_num_glyphs_from_maxp (this));
- num_glyphs.set_relaxed (ret);
+ num_glyphs = ret;
return ret;
}
@@ -102,7 +102,7 @@
hb_face_t::load_upem () const
{
unsigned int ret = table.head->get_upem ();
- upem.set_relaxed (ret);
+ upem = ret;
return ret;
}