aboutsummaryrefslogtreecommitdiffstats
path: root/chilbert
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2018-08-18 21:49:38 +0200
committerDavid Robillard <d@drobilla.net>2018-09-29 14:47:17 +0200
commit81f652c80c086ea008a4cd325f4e73764a9aa10a (patch)
treefe83b4c4c2d8338dc3d37873662b798b960ddfea /chilbert
parent0dc0ebb495dd2be014e705674d1acdf114eb594d (diff)
downloadchilbert-81f652c80c086ea008a4cd325f4e73764a9aa10a.tar.gz
chilbert-81f652c80c086ea008a4cd325f4e73764a9aa10a.tar.bz2
chilbert-81f652c80c086ea008a4cd325f4e73764a9aa10a.zip
Add mask interface and isolate rack details from algorithm
Diffstat (limited to 'chilbert')
-rw-r--r--chilbert/BigBitVec.hpp258
-rw-r--r--chilbert/FixBitVec.hpp184
-rw-r--r--chilbert/GrayCodeRank.hpp114
3 files changed, 385 insertions, 171 deletions
diff --git a/chilbert/BigBitVec.hpp b/chilbert/BigBitVec.hpp
index 7302880..29d9a5a 100644
--- a/chilbert/BigBitVec.hpp
+++ b/chilbert/BigBitVec.hpp
@@ -39,6 +39,50 @@ public:
static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
+ /** Mask for a bit that can be incremented like an index.
+ *
+ * This enables fast iteration while setting or resetting bits since it is
+ * internally stored as a mask which avoids repeated index math.
+ */
+ class Mask
+ {
+ public:
+ void operator++()
+ {
+ if ((m_mask <<= 1) == 0) {
+ m_mask = 1;
+ ++m_rack;
+ }
+ }
+
+ void operator--()
+ {
+ if ((m_mask >>= 1) == 0) {
+ m_mask = Rack{1} << (bits_per_rack - 1);
+ --m_rack;
+ }
+ }
+
+ bool operator==(const Mask& mask) const
+ {
+ return m_rack == mask.m_rack && m_mask == mask.m_mask;
+ }
+
+ bool operator!=(const Mask& mask) const { return !operator==(mask); }
+
+ private:
+ friend class CBigBitVec;
+
+ Mask(const size_t index)
+ : m_rack{index / bits_per_rack}
+ , m_mask{Rack{1} << (index - m_rack * bits_per_rack)}
+ {
+ }
+
+ size_t m_rack;
+ Rack m_mask;
+ };
+
explicit CBigBitVec(const size_t bits)
: m_pcRacks{make_racks(num_racks(bits))}
, m_iRacks{bits == 0 ? 0 : num_racks(bits)}
@@ -83,16 +127,11 @@ public:
/// Truncate to a given precision in bits (zero MSBs)
CBigBitVec& truncate(const size_t bits)
{
- assert(bits <= size());
- const Ref ref(bits);
- if (ref.rack >= m_iRacks) {
- return *this;
- }
+ const Mask m = mask(bits);
- // Truncate rack that contains the split point
- m_pcRacks[ref.rack] &= ((Rack{1} << ref.bit) - 1);
+ m_pcRacks[m.m_rack] &= (m.m_mask - 1);
- for (size_t i = ref.rack + 1; i < m_iRacks; ++i) {
+ for (size_t i = m.m_rack + 1; i < m_iRacks; ++i) {
m_pcRacks[i] = 0;
}
@@ -145,50 +184,59 @@ public:
CBigBitVec& operator=(CBigBitVec&& vec) = default;
+ /// Return the value of the bit covered by `mask`
+ bool test(const Mask mask) const
+ {
+ return m_pcRacks[mask.m_rack] & mask.m_mask;
+ }
+
/// Return the value of the `index`th bit
- bool test(const size_t index) const
+ bool test(const size_t index) const { return test(mask(index)); }
+
+ /// Set the bit covered by `mask` to 1
+ CBigBitVec& set(const Mask mask)
{
- assert(index < size());
- const Ref ref(index);
- return testBit(m_pcRacks[ref.rack], ref.bit);
+ m_pcRacks[mask.m_rack] |= mask.m_mask;
+ return *this;
}
/// Set the `index`th bit to 1
- CBigBitVec& set(const size_t index)
+ CBigBitVec& set(const size_t index) { return set(mask(index)); }
+
+ /// Reset the bit covered by `mask` to 0
+ CBigBitVec& reset(const Mask mask)
{
- assert(index < size());
- const Ref ref(index);
- setBit(m_pcRacks[ref.rack], ref.bit);
+ m_pcRacks[mask.m_rack] &= ~mask.m_mask;
return *this;
}
/// Reset the `index`th bit to 0
- CBigBitVec& reset(const size_t index)
+ CBigBitVec& reset(const size_t index) { return reset(mask(index)); }
+
+ /// Set the bit covered by `mask` to `value`
+ CBigBitVec& set(const Mask mask, const bool value)
{
- assert(index < size());
- const Ref ref(index);
- m_pcRacks[ref.rack] &= ~(Rack{1} << ref.bit);
+ auto& rack = m_pcRacks[mask.m_rack];
+ rack ^= (-Rack{value} ^ rack) & mask.m_mask;
return *this;
}
/// Set the `index`th bit to `value`
CBigBitVec& set(const size_t index, const bool value)
{
- assert(index < size());
- const Ref ref(index);
- setBit(m_pcRacks[ref.rack], ref.bit, value);
- return *this;
+ return set(mask(index), value);
}
- /// Flip the value of the `index`th bit
- CBigBitVec& flip(const size_t index)
+ /// Flip the value of the bit covered by `mask`
+ CBigBitVec& flip(const Mask mask)
{
- assert(index < size());
- const Ref ref(index);
- m_pcRacks[ref.rack] ^= (Rack{1} << ref.bit);
+ m_pcRacks[mask.m_rack] ^= mask.m_mask;
return *this;
}
+ /// Flip the value of the `index`th bit
+ CBigBitVec& flip(const size_t index) { return flip(mask(index)); }
+
CBigBitVec& operator&=(const CBigBitVec& vec)
{
for (size_t i = 0; i < std::min(m_iRacks, vec.m_iRacks); ++i) {
@@ -249,40 +297,34 @@ public:
CBigBitVec& operator<<=(const size_t bits)
{
- assert(bits < size());
-
- // No shift?
if (bits == 0) {
return *this;
- }
-
- const Ref ref(bits);
-
- // All racks?
- if (ref.rack >= m_iRacks) {
+ } else if (bits >= size()) {
reset();
return *this;
}
- // Do rack shifts.
- if (ref.rack > 0) {
- for (size_t i = m_iRacks - 1; i >= ref.rack; --i) {
- m_pcRacks[i] = m_pcRacks[i - ref.rack];
+ const Index index{bits};
+
+ if (index.rack > 0) {
+ // Shift entire racks
+ for (size_t i = m_iRacks - 1; i >= index.rack; --i) {
+ m_pcRacks[i] = m_pcRacks[i - index.rack];
}
- for (size_t i = 0; i < ref.rack; ++i) {
+ for (size_t i = 0; i < index.rack; ++i) {
m_pcRacks[i] = 0;
}
}
- // Do bit shifts.
- if (ref.bit > 0) {
- size_t bi = bits_per_rack - ref.bit;
+ if (index.bit > 0) {
+ // Shift bits within racks
+ size_t bi = bits_per_rack - index.bit;
size_t i;
- for (i = m_iRacks - 1; i >= ref.rack + 1; --i) {
- m_pcRacks[i] <<= ref.bit;
+ for (i = m_iRacks - 1; i >= index.rack + 1; --i) {
+ m_pcRacks[i] <<= index.bit;
m_pcRacks[i] |= m_pcRacks[i - 1] >> bi;
}
- m_pcRacks[i] <<= ref.bit;
+ m_pcRacks[i] <<= index.bit;
}
return *this;
@@ -297,41 +339,35 @@ public:
CBigBitVec& operator>>=(const size_t bits)
{
- assert(bits < size());
-
- // No shift?
if (bits == 0) {
return *this;
- }
-
- const Ref ref(bits);
-
- // All racks?
- if (ref.rack >= m_iRacks) {
+ } else if (bits >= size()) {
reset();
return *this;
}
- // Do rack shifts.
- if (ref.rack > 0) {
+ const Index index{bits};
+
+ if (index.rack > 0) {
+ // Shift entire racks
size_t i;
- for (i = 0; i < m_iRacks - ref.rack; ++i) {
- m_pcRacks[i] = m_pcRacks[i + ref.rack];
+ for (i = 0; i < m_iRacks - index.rack; ++i) {
+ m_pcRacks[i] = m_pcRacks[i + index.rack];
}
for (; i < m_iRacks; ++i) {
m_pcRacks[i] = 0;
}
}
- // Do bit shifts.
- if (ref.bit > 0) {
- size_t bi = bits_per_rack - ref.bit;
+ if (index.bit > 0) {
+ // Shift bits within racks
+ size_t bi = bits_per_rack - index.bit;
size_t i;
- for (i = 0; i < m_iRacks - ref.rack - 1; ++i) {
- m_pcRacks[i] >>= ref.bit;
+ for (i = 0; i < m_iRacks - index.rack - 1; ++i) {
+ m_pcRacks[i] >>= index.bit;
m_pcRacks[i] |= m_pcRacks[i + 1] << bi;
}
- m_pcRacks[i] >>= ref.bit;
+ m_pcRacks[i] >>= index.bit;
}
return *this;
@@ -451,10 +487,92 @@ public:
/// Return the number of racks
size_t rackCount() const { return m_iRacks; }
+ template <class BitVec>
+ class iterator_base : public Mask
+ {
+ public:
+ iterator_base& operator++()
+ {
+ Mask::operator++();
+ return *this;
+ }
+
+ iterator_base& operator--()
+ {
+ Mask::operator--();
+ return *this;
+ }
+
+ bool operator==(const iterator_base& iterator_base) const
+ {
+ return m_vec == iterator_base.m_vec &&
+ Mask::operator==(iterator_base);
+ }
+
+ bool operator!=(const iterator_base& iterator_base) const
+ {
+ return !operator==(iterator_base);
+ }
+
+ bool operator*() const { return m_vec->test(*this); }
+
+ protected:
+ iterator_base(BitVec& vec, const size_t index)
+ : Mask{index}
+ , m_vec{&vec}
+ {
+ }
+
+ BitVec* m_vec;
+ };
+
+ class iterator : public iterator_base<CBigBitVec>
+ {
+ public:
+ void set() { m_vec->set(*this); }
+ void reset() { m_vec->reset(*this); }
+
+ private:
+ friend class CBigBitVec;
+
+ iterator(CBigBitVec& vec, const size_t index)
+ : iterator_base{vec, index}
+ {
+ }
+ };
+
+ class const_iterator : public iterator_base<const CBigBitVec>
+ {
+ private:
+ friend class CBigBitVec;
+
+ const_iterator(const CBigBitVec& vec, const size_t index)
+ : iterator_base{vec, index}
+ {
+ }
+ };
+
+ Mask mask(const size_t i = 0) const
+ {
+ assert(i <= size());
+ return Mask{i};
+ }
+
+ iterator begin(const size_t i = 0) { return iterator(*this, i); }
+
+ iterator end() { return iterator(*this, size()); }
+
+ const_iterator begin(const size_t i = 0) const
+ {
+ return const_iterator(*this, i);
+ }
+
+ const_iterator end() const { return const_iterator(*this, size()); }
+
private:
- struct Ref
+ struct Index
{
- Ref(const size_t bits)
+ Index(const size_t bits)
: rack{bits / bits_per_rack}
, bit{bits - rack * bits_per_rack}
{
diff --git a/chilbert/FixBitVec.hpp b/chilbert/FixBitVec.hpp
index 36667f3..0dab483 100644
--- a/chilbert/FixBitVec.hpp
+++ b/chilbert/FixBitVec.hpp
@@ -47,6 +47,38 @@ public:
static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
+ /** Mask for a bit that can be incremented like an index.
+ *
+ * This enables fast iteration while setting or resetting bits since it is
+ * internally stored as a mask which avoids repeated index math.
+ */
+ class Mask
+ {
+ public:
+ void operator++() { m_mask <<= 1; }
+ void operator--() { m_mask >>= 1; }
+
+ bool operator==(const Mask& mask) const
+ {
+ return m_mask == mask.m_mask;
+ }
+
+ bool operator!=(const Mask& mask) const
+ {
+ return m_mask == mask.m_mask;
+ }
+
+ private:
+ friend class CFixBitVec;
+
+ Mask(const size_t index)
+ : m_mask{index < bits_per_rack ? Rack{1} << index : 0}
+ {
+ }
+
+ Rack m_mask;
+ };
+
CFixBitVec(const size_t bits = bits_per_rack)
: m_rack{0}
{
@@ -76,45 +108,55 @@ public:
return *this;
}
+ /// Return the value of the bit covered by `mask`
+ bool test(const Mask mask) const { return m_rack & mask.m_mask; }
+
/// Return the value of the `index`th bit
- bool test(const size_t index) const
+ bool test(const size_t index) const { return test(mask(index)); }
+
+ /// Set the bit covered by `mask` to 1
+ CFixBitVec& set(const Mask mask)
{
- assert(index < bits_per_rack);
- return ((m_rack & (FBV1 << index)) > 0);
+ m_rack |= mask.m_mask;
+ return *this;
}
/// Set the `index`th bit to 1
- CFixBitVec& set(const size_t index)
+ CFixBitVec& set(const size_t index) { return set(mask(index)); }
+
+ /// Reset the bit covered by `mask` to 0
+ CFixBitVec& reset(const Mask mask)
{
- assert(index < bits_per_rack);
- m_rack |= (Rack{1} << index);
+ m_rack &= ~mask.m_mask;
return *this;
}
/// Reset the `index`th bit to 0
- CFixBitVec& reset(const size_t index)
+ CFixBitVec& reset(const size_t index) { return reset(mask(index)); }
+
+ /// Set the bit covered by `mask` to `value`
+ CFixBitVec& set(const Mask mask, const bool value)
{
- assert(index < bits_per_rack);
- m_rack &= ~(Rack{1} << index);
+ m_rack ^= (-Rack{value} ^ m_rack) & mask.m_mask;
return *this;
}
/// Set the `index`th bit to `value`
CFixBitVec& set(const size_t index, const bool value)
{
- assert(index < bits_per_rack);
- m_rack ^= (-Rack{value} ^ m_rack) & (Rack{1U} << index);
- return *this;
+ return set(mask(index), value);
}
- /// Flip the value of the `index`th bit
- CFixBitVec& flip(const size_t index)
+ /// Flip the value of the bit covered by `mask`
+ CFixBitVec& flip(const Mask mask)
{
- assert(index < bits_per_rack);
- m_rack ^= (FBV1 << index);
+ m_rack ^= mask.m_mask;
return *this;
}
+ /// Flip the value of the `index`th bit
+ CFixBitVec& flip(const size_t index) { return flip(mask(index)); }
+
bool operator==(const CFixBitVec& vec) const
{
return m_rack == vec.m_rack;
@@ -216,22 +258,30 @@ public:
/// Right-rotate the least significant `width` bits by `bits` positions
CFixBitVec& rotr(const size_t bits, const size_t width)
{
- assert(width > 0);
- assert(bits < width);
- m_rack &= FBVN1S(width);
- m_rack = (m_rack >> bits) | (m_rack << (width - bits));
- m_rack &= FBVN1S(width);
+ if (bits) {
+ assert(width > 0);
+ assert(bits < width);
+ assert(bits < bits_per_rack);
+ assert((width - bits) < bits_per_rack);
+ m_rack &= FBVN1S(width);
+ m_rack = (m_rack >> bits) | (m_rack << (width - bits));
+ m_rack &= FBVN1S(width);
+ }
return *this;
}
/// Left-rotate the least significant `width` bits by `bits` positions
CFixBitVec& rotl(const size_t bits, const size_t width)
{
- assert(width > 0);
- assert(bits < width);
- m_rack &= FBVN1S(width);
- m_rack = (m_rack << bits) | (m_rack >> (width - bits));
- m_rack &= FBVN1S(width);
+ if (bits > 0) {
+ assert(width > 0);
+ assert(bits < width);
+ assert(bits < bits_per_rack);
+ assert((width - bits) < bits_per_rack);
+ m_rack &= FBVN1S(width);
+ m_rack = (m_rack << bits) | (m_rack >> (width - bits));
+ m_rack &= FBVN1S(width);
+ }
return *this;
}
@@ -265,6 +315,88 @@ public:
/// Return the number of racks
int rackCount() const { return 1; }
+ template <class BitVec>
+ class iterator_base : public Mask
+ {
+ public:
+ iterator_base& operator++()
+ {
+ Mask::operator++();
+ return *this;
+ }
+
+ iterator_base& operator--()
+ {
+ Mask::operator--();
+ return *this;
+ }
+
+ bool operator==(const iterator_base& iterator_base) const
+ {
+ return m_vec == iterator_base.m_vec &&
+ Mask::operator==(iterator_base);
+ }
+
+ bool operator!=(const iterator_base& iterator_base) const
+ {
+ return !operator==(iterator_base);
+ }
+
+ bool operator*() const { return m_vec->test(*this); }
+
+ protected:
+ iterator_base(BitVec& vec, const size_t index)
+ : Mask{index}
+ , m_vec{&vec}
+ {
+ }
+
+ BitVec* m_vec;
+ };
+
+ class iterator : public iterator_base<CFixBitVec>
+ {
+ public:
+ void set() { m_vec->set(*this); }
+ void reset() { m_vec->reset(*this); }
+
+ private:
+ friend class CFixBitVec;
+
+ iterator(CFixBitVec& vec, const size_t index)
+ : iterator_base{vec, index}
+ {
+ }
+ };
+
+ class const_iterator : public iterator_base<const CFixBitVec>
+ {
+ private:
+ friend class CFixBitVec;
+
+ const_iterator(const CFixBitVec& vec, const size_t index)
+ : iterator_base{vec, index}
+ {
+ }
+ };
+
+ Mask mask(const size_t i = 0) const
+ {
+ assert(i <= size());
+ return Mask{i};
+ }
+
+ iterator begin(const size_t i = 0) { return iterator(*this, i); }
+
+ iterator end() { return iterator(*this, size()); }
+
+ const_iterator begin(const size_t i = 0) const
+ {
+ return const_iterator(*this, i);
+ }
+
+ const_iterator end() const { return const_iterator(*this, size()); }
+
private:
static_assert(8 * sizeof(Rack) == bits_per_rack, "");
static_assert((sizeof(Rack) == 4) || (sizeof(Rack) == 8), "");
diff --git a/chilbert/GrayCodeRank.hpp b/chilbert/GrayCodeRank.hpp
index 60e644a..adb953a 100644
--- a/chilbert/GrayCodeRank.hpp
+++ b/chilbert/GrayCodeRank.hpp
@@ -19,17 +19,8 @@
#ifndef CHILBERT_GRAYCODERANK_HPP
#define CHILBERT_GRAYCODERANK_HPP
-#include "chilbert/BigBitVec.hpp"
-#include "chilbert/FixBitVec.hpp"
-
#include <cassert>
-
-#define MODSPLIT(r, b, k) \
- { \
- b = (k); \
- r = b / FBV_BITS; \
- b -= r * FBV_BITS; \
- }
+#include <cstddef>
namespace chilbert {
@@ -48,8 +39,8 @@ compactIndex(const size_t* const ms,
{
resetBits(hc);
- size_t hi = 0;
- size_t hci = 0;
+ auto hm = h.mask(0);
+ auto hcm = hc.mask(0);
// Run through the levels of precision
for (size_t i = 0; i < m; i++) {
@@ -58,16 +49,16 @@ compactIndex(const size_t* const ms,
do {
// This dimension contributes a bit?
if (ms[j] > i) {
- if (testBit(h, hi)) {
- setBit(hc, hci);
+ if (h.test(hm)) {
+ hc.set(hcm);
}
- ++hci;
+ ++hcm;
}
if (++j == n) {
j = 0;
}
- ++hi;
+ ++hm;
} while (j != ds[i]);
}
}
@@ -78,26 +69,16 @@ grayCodeRank(const I& mask, const I& gi, const size_t n, I& r)
{
r.reset();
- int jr = 0;
- FBV_UINT jm = 1;
- int ir = 0;
- FBV_UINT im = 1;
- for (size_t i = 0; i < n; ++i) {
- if (mask.racks()[ir] & im) {
- if (gi.racks()[ir] & im) {
- r.racks()[jr] |= jm;
- }
- jm <<= 1;
- if (jm == 0) {
- jm = 1;
- ++jr;
- }
- }
+ auto mi = mask.begin();
+ auto gii = gi.begin();
+ auto ri = r.begin();
- im <<= 1;
- if (im == 0) {
- im = 1;
- ++ir;
+ for (size_t i = 0; i < n; ++i, ++mi, ++gii) {
+ if (*mi) {
+ if (*gii) {
+ ri.set();
+ }
+ ++ri;
}
}
}
@@ -115,54 +96,42 @@ grayCodeRankInv(const I& mask,
g.reset();
gi.reset();
- size_t ir, jr;
- FBV_UINT im, jm;
-
- intptr_t i = static_cast<intptr_t>(n - 1);
- MODSPLIT(ir, im, (n - 1));
- im = (FBV1 << im);
+ assert(ptrn.size() == mask.size());
+ assert(g.size() == mask.size());
+ assert(gi.size() == mask.size());
- size_t j = b - 1;
- MODSPLIT(jr, jm, j);
- jm = (FBV1 << jm);
+ auto m = mask.mask(n - 1);
+ auto ri = r.begin(b - 1);
- FBV_UINT gi0, gi1, g0;
- gi1 = gi0 = g0 = 0;
+ typename I::Rack gi0 = 0;
+ typename I::Rack gi1 = 0;
+ typename I::Rack g0 = 0;
- for (; i >= 0; --i) {
- // Unconstrained bit?
- if (mask.racks()[ir] & im) {
+ for (size_t i = 0; i < n; ++i) {
+ if (mask.test(m)) { // Unconstrained bit
gi1 = gi0;
- gi0 = (r.racks()[jr] & jm) > 0;
+ gi0 = *ri;
g0 = gi0 ^ gi1;
if (gi0) {
- gi.racks()[ir] |= im;
+ gi.set(m);
}
if (g0) {
- g.racks()[ir] |= im;
+ g.set(m);
}
- jm >>= 1;
- if (jm == 0) {
- jm = (FBV_UINT{1}) << (FBV_BITS - 1);
- --jr;
- }
- } else {
- g0 = (ptrn.racks()[ir] & im) > 0;
+ --ri;
+ } else { // Constrained bit
+ g0 = (ptrn.test(m) > 0);
gi1 = gi0;
gi0 = g0 ^ gi1;
if (gi0) {
- gi.racks()[ir] |= im;
+ gi.set(m);
}
if (g0) {
- g.racks()[ir] |= im;
+ g.set(m);
}
}
- im >>= 1;
- if (im == 0) {
- im = (FBV_UINT{1}) << (FBV_BITS - 1);
- --ir;
- }
+ --m;
}
}
@@ -180,20 +149,15 @@ extractMask(const size_t* const ms,
mask.reset();
b = 0;
- FBV_UINT jm = 1;
- size_t jr = 0;
- size_t j = d; // #D j = (d==n-1) ? 0 : d+1;
+ auto mi = mask.begin();
+ size_t j = d; // #D j = (d==n-1) ? 0 : d+1;
do {
if (ms[j] > i) {
- mask.racks()[jr] |= jm;
+ mi.set();
++b;
}
- jm <<= 1;
- if (jm == 0) {
- jm = 1;
- ++jr;
- }
+ ++mi;
if (++j == n) {
j = 0;
}