aboutsummaryrefslogtreecommitdiffstats
path: root/include/chilbert
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2022-09-16 21:19:57 -0400
committerDavid Robillard <d@drobilla.net>2022-09-16 22:31:06 -0400
commit2e4d666eec1a372d4b64c527d6c4945ad98d2b58 (patch)
treea3a61d346c3d4e5b590b08db446d17307df0e3c5 /include/chilbert
parentbf05ef949750cfca6e001b0f767cd2173441fc8c (diff)
downloadchilbert-2e4d666eec1a372d4b64c527d6c4945ad98d2b58.tar.gz
chilbert-2e4d666eec1a372d4b64c527d6c4945ad98d2b58.tar.bz2
chilbert-2e4d666eec1a372d4b64c527d6c4945ad98d2b58.zip
Update clang-format configuration
Diffstat (limited to 'include/chilbert')
-rw-r--r--include/chilbert/BoundedBitVec.hpp112
-rw-r--r--include/chilbert/DynamicBitVec.hpp186
-rw-r--r--include/chilbert/SmallBitVec.hpp606
-rw-r--r--include/chilbert/StaticBitVec.hpp100
-rw-r--r--include/chilbert/chilbert.hpp40
-rw-r--r--include/chilbert/chilbert.ipp584
-rw-r--r--include/chilbert/detail/BitVecIndex.hpp31
-rw-r--r--include/chilbert/detail/BitVecIterator.hpp79
-rw-r--r--include/chilbert/detail/BitVecMask.hpp58
-rw-r--r--include/chilbert/detail/MultiBitVec.hpp672
-rw-r--r--include/chilbert/detail/gray_code_rank.hpp220
-rw-r--r--include/chilbert/detail/operations.hpp92
-rw-r--r--include/chilbert/detail/traits.hpp9
-rw-r--r--include/chilbert/operators.hpp61
14 files changed, 1434 insertions, 1416 deletions
diff --git a/include/chilbert/BoundedBitVec.hpp b/include/chilbert/BoundedBitVec.hpp
index 4f9ef60..6fe5d2a 100644
--- a/include/chilbert/BoundedBitVec.hpp
+++ b/include/chilbert/BoundedBitVec.hpp
@@ -41,81 +41,103 @@ namespace chilbert {
*
* @tparam MaxN Maximum number of bits.
*/
-template <size_t MaxN>
+template<size_t MaxN>
class BoundedBitVec : public detail::MultiBitVec<BoundedBitVec<MaxN>>
{
public:
- using Rack = typename detail::MultiBitVec<BoundedBitVec<MaxN>>::Rack;
+ using Rack = typename detail::MultiBitVec<BoundedBitVec<MaxN>>::Rack;
- using detail::MultiBitVec<BoundedBitVec<MaxN>>::bits_per_rack;
+ using detail::MultiBitVec<BoundedBitVec<MaxN>>::bits_per_rack;
- BoundedBitVec() = default;
+ BoundedBitVec() = default;
- explicit BoundedBitVec(const size_t bits)
- : m_size{bits}
- {
- assert(bits <= MaxN);
- }
+ explicit BoundedBitVec(const size_t bits)
+ : m_size{bits}
+ {
+ assert(bits <= MaxN);
+ }
- BoundedBitVec(const size_t bits, const Rack value)
- : BoundedBitVec{bits}
- {
- m_racks[0] = value;
- }
+ BoundedBitVec(const size_t bits, const Rack value)
+ : BoundedBitVec{bits}
+ {
+ m_racks[0] = value;
+ }
- /// Return the size in bits
- size_t size() const { return m_size; }
+ /// Return the size in bits
+ size_t size() const { return m_size; }
- /// Return a reference to the `index`th rack
+ /// Return a reference to the `index`th rack
#ifndef NDEBUG
- const auto& rack(const size_t index) const { return m_racks.at(index); }
- auto& rack(const size_t index) { return m_racks.at(index); }
+ const auto& rack(const size_t index) const
+ {
+ return m_racks.at(index);
+ }
+ auto& rack(const size_t index)
+ {
+ return m_racks.at(index);
+ }
#else
- const auto& rack(const size_t index) const { return m_racks[index]; }
- auto& rack(const size_t index) { return m_racks[index]; }
+ const auto& rack(const size_t index) const
+ {
+ return m_racks[index];
+ }
+ auto& rack(const size_t index)
+ {
+ return m_racks[index];
+ }
#endif
- /// Return a raw pointer to the racks
- Rack* data() { return m_racks.data(); }
- const Rack* data() const { return m_racks.data(); }
-
- /// Return the total size of all racks in bytes
- size_t data_size() const { return num_racks() * sizeof(Rack); }
-
- /// Return the number of racks
- size_t num_racks() const { return calculate_num_racks(m_size); }
+ /// Return a raw pointer to the racks
+ Rack* data()
+ {
+ return m_racks.data();
+ }
+ const Rack* data() const
+ {
+ return m_racks.data();
+ }
+
+ /// Return the total size of all racks in bytes
+ size_t data_size() const
+ {
+ return num_racks() * sizeof(Rack);
+ }
+
+ /// Return the number of racks
+ size_t num_racks() const
+ {
+ return calculate_num_racks(m_size);
+ }
private:
- static constexpr size_t calculate_num_racks(const size_t bits)
- {
- return (std::max(bits, size_t(1)) + bits_per_rack - 1) / bits_per_rack;
- }
+ static constexpr size_t calculate_num_racks(const size_t bits)
+ {
+ return (std::max(bits, size_t(1)) + bits_per_rack - 1) / bits_per_rack;
+ }
- std::array<Rack, calculate_num_racks(MaxN)> m_racks{};
- size_t m_size{};
+ std::array<Rack, calculate_num_racks(MaxN)> m_racks{};
+ size_t m_size{};
};
namespace detail {
-template <size_t MaxN>
-struct is_bitvec<BoundedBitVec<MaxN>>
-{
- constexpr static bool value = true;
+template<size_t MaxN>
+struct is_bitvec<BoundedBitVec<MaxN>> {
+ constexpr static bool value = true;
};
-template <size_t MaxN>
+template<size_t MaxN>
void
gray_code(BoundedBitVec<MaxN>& value)
{
- gray_code(static_cast<detail::MultiBitVec<BoundedBitVec<MaxN>>&>(value));
+ gray_code(static_cast<detail::MultiBitVec<BoundedBitVec<MaxN>>&>(value));
}
-template <size_t MaxN>
+template<size_t MaxN>
void
gray_code_inv(BoundedBitVec<MaxN>& value)
{
- gray_code_inv(
- static_cast<detail::MultiBitVec<BoundedBitVec<MaxN>>&>(value));
+ gray_code_inv(static_cast<detail::MultiBitVec<BoundedBitVec<MaxN>>&>(value));
}
} // namespace detail
diff --git a/include/chilbert/DynamicBitVec.hpp b/include/chilbert/DynamicBitVec.hpp
index fe7a535..895860c 100644
--- a/include/chilbert/DynamicBitVec.hpp
+++ b/include/chilbert/DynamicBitVec.hpp
@@ -41,117 +41,113 @@ namespace chilbert {
class DynamicBitVec : public detail::MultiBitVec<DynamicBitVec>
{
public:
- struct RacksDeleter
- {
- void operator()(Rack* const racks) { free(racks); }
- };
-
- struct NullDeleter
- {
- void operator()(const Rack* const) {}
- };
-
- using RacksPtr = std::unique_ptr<Rack[], RacksDeleter>;
- using ConstRacksPtr = std::unique_ptr<const Rack[], NullDeleter>;
-
- explicit DynamicBitVec(const size_t bits)
- : m_racks{make_racks(calculate_num_racks(bits))}
- , m_size{bits}
- {
- }
-
- DynamicBitVec(const size_t bits, const Rack value)
- : DynamicBitVec{bits}
- {
- m_racks[0] = value;
- }
-
- DynamicBitVec(const DynamicBitVec& vec)
- : m_racks{make_racks(vec.num_racks())}
- , m_size{vec.m_size}
- {
- if (vec.data()) {
- memcpy(data(), vec.data(), data_size());
- }
- }
-
- DynamicBitVec(DynamicBitVec&&) = default;
-
- DynamicBitVec& operator=(const DynamicBitVec& vec)
- {
- if (&vec != this) {
- if (num_racks() < vec.num_racks()) {
- m_racks = make_racks(vec.num_racks());
- m_size = vec.m_size;
- memcpy(data(), vec.data(), data_size());
- } else if (vec.num_racks() > 0) {
- m_size = vec.m_size;
- memcpy(data(), vec.data(), data_size());
- } else {
- m_size = 0;
- m_racks.reset();
- }
- }
-
- return *this;
- }
-
- DynamicBitVec& operator=(DynamicBitVec&&) = default;
-
- ~DynamicBitVec() = default;
-
- /// Return the size in bits
- size_t size() const { return m_size; }
-
- /// Return a reference to the `index`th rack
- const Rack& rack(const size_t index) const { return m_racks[index]; }
- Rack& rack(const size_t index) { return m_racks[index]; }
-
- /// Return a raw pointer to the racks
- Rack* data() { return m_racks.get(); }
- const Rack* data() const { return m_racks.get(); }
-
- /// Return the total size of all racks in bytes
- size_t data_size() const { return num_racks() * sizeof(Rack); }
-
- /// Return the number of racks
- size_t num_racks() const { return calculate_num_racks(m_size); }
+ struct RacksDeleter {
+ void operator()(Rack* const racks) { free(racks); }
+ };
+
+ struct NullDeleter {
+ void operator()(const Rack* const) {}
+ };
+
+ using RacksPtr = std::unique_ptr<Rack[], RacksDeleter>;
+ using ConstRacksPtr = std::unique_ptr<const Rack[], NullDeleter>;
+
+ explicit DynamicBitVec(const size_t bits)
+ : m_racks{make_racks(calculate_num_racks(bits))}
+ , m_size{bits}
+ {}
+
+ DynamicBitVec(const size_t bits, const Rack value)
+ : DynamicBitVec{bits}
+ {
+ m_racks[0] = value;
+ }
+
+ DynamicBitVec(const DynamicBitVec& vec)
+ : m_racks{make_racks(vec.num_racks())}
+ , m_size{vec.m_size}
+ {
+ if (vec.data()) {
+ memcpy(data(), vec.data(), data_size());
+ }
+ }
+
+ DynamicBitVec(DynamicBitVec&&) = default;
+
+ DynamicBitVec& operator=(const DynamicBitVec& vec)
+ {
+ if (&vec != this) {
+ if (num_racks() < vec.num_racks()) {
+ m_racks = make_racks(vec.num_racks());
+ m_size = vec.m_size;
+ memcpy(data(), vec.data(), data_size());
+ } else if (vec.num_racks() > 0) {
+ m_size = vec.m_size;
+ memcpy(data(), vec.data(), data_size());
+ } else {
+ m_size = 0;
+ m_racks.reset();
+ }
+ }
+
+ return *this;
+ }
+
+ DynamicBitVec& operator=(DynamicBitVec&&) = default;
+
+ ~DynamicBitVec() = default;
+
+ /// Return the size in bits
+ size_t size() const { return m_size; }
+
+ /// Return a reference to the `index`th rack
+ const Rack& rack(const size_t index) const { return m_racks[index]; }
+ Rack& rack(const size_t index) { return m_racks[index]; }
+
+ /// Return a raw pointer to the racks
+ Rack* data() { return m_racks.get(); }
+ const Rack* data() const { return m_racks.get(); }
+
+ /// Return the total size of all racks in bytes
+ size_t data_size() const { return num_racks() * sizeof(Rack); }
+
+ /// Return the number of racks
+ size_t num_racks() const { return calculate_num_racks(m_size); }
private:
- static size_t calculate_num_racks(const size_t bits)
- {
- return (std::max(bits, size_t(1)) + bits_per_rack - 1) / bits_per_rack;
- }
-
- static RacksPtr make_racks(const size_t n)
- {
- return RacksPtr{static_cast<Rack*>(calloc(n, sizeof(Rack)))};
- }
-
- RacksPtr m_racks;
- size_t m_size;
+ static size_t calculate_num_racks(const size_t bits)
+ {
+ return (std::max(bits, size_t(1)) + bits_per_rack - 1) / bits_per_rack;
+ }
+
+ static RacksPtr make_racks(const size_t n)
+ {
+ return RacksPtr{static_cast<Rack*>(calloc(n, sizeof(Rack)))};
+ }
+
+ RacksPtr m_racks;
+ size_t m_size;
};
namespace detail {
-template <>
-struct is_bitvec<DynamicBitVec>
-{
- constexpr static bool value = true;
+template<>
+struct is_bitvec<DynamicBitVec> {
+ constexpr static bool value = true;
};
-template <>
+template<>
inline void
gray_code(DynamicBitVec& value)
{
- gray_code(static_cast<MultiBitVec<DynamicBitVec>&>(value));
+ gray_code(static_cast<MultiBitVec<DynamicBitVec>&>(value));
}
-template <>
+template<>
inline void
gray_code_inv(DynamicBitVec& value)
{
- gray_code_inv(static_cast<MultiBitVec<DynamicBitVec>&>(value));
+ gray_code_inv(static_cast<MultiBitVec<DynamicBitVec>&>(value));
}
} // namespace detail
diff --git a/include/chilbert/SmallBitVec.hpp b/include/chilbert/SmallBitVec.hpp
index 6f04965..485ca9e 100644
--- a/include/chilbert/SmallBitVec.hpp
+++ b/include/chilbert/SmallBitVec.hpp
@@ -33,336 +33,316 @@ namespace chilbert {
class SmallBitVec
{
public:
- using Rack = uintptr_t;
-
- static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
-
- /** Mask for a bit that can be incremented like an index.
- *
- * This enables fast iteration while setting or resetting bits since it is
- * internally stored as a mask which avoids repeated index math.
- */
- class Mask
- {
- public:
- void operator++() { m_mask <<= 1U; }
- void operator--() { m_mask >>= 1U; }
-
- bool operator==(const Mask& mask) const
- {
- return m_mask == mask.m_mask;
- }
-
- bool operator!=(const Mask& mask) const
- {
- return m_mask == mask.m_mask;
- }
-
- private:
- friend class SmallBitVec;
-
- explicit Mask(const size_t index)
- : m_mask{index < bits_per_rack ? Rack{1} << index : 0}
- {
- }
-
- Rack m_mask;
- };
-
- explicit SmallBitVec(const size_t bits)
- : m_size{bits}
- {
- assert(bits <= bits_per_rack);
- }
-
- SmallBitVec(const size_t bits, const Rack value)
- : m_rack{value}
- , m_size{bits}
- {
- assert(bits <= bits_per_rack);
- }
-
- /// Return the size in bits
- size_t size() const { return m_size; }
-
- /// Set all bits to one
- SmallBitVec& set()
- {
- m_rack = ~Rack{0} >> (bits_per_rack - m_size);
- return *this;
- }
-
- /// Set all bits to zero
- SmallBitVec& reset()
- {
- m_rack = 0;
- return *this;
- }
-
- /// Return the value of the bit covered by `mask`
- bool test(const Mask mask) const { return m_rack & mask.m_mask; }
-
- /// Return the value of the `index`th bit
- bool test(const size_t index) const { return test(mask(index)); }
-
- /// Set the bit covered by `mask` to 1
- SmallBitVec& set(const Mask mask)
- {
- m_rack |= mask.m_mask;
- return *this;
- }
-
- /// Set the `index`th bit to 1
- SmallBitVec& set(const size_t index) { return set(mask(index)); }
-
- /// Reset the bit covered by `mask` to 0
- SmallBitVec& reset(const Mask mask)
- {
- m_rack &= ~mask.m_mask;
- return *this;
- }
-
- /// Reset the `index`th bit to 0
- SmallBitVec& reset(const size_t index) { return reset(mask(index)); }
-
- /// Set the bit covered by `mask` to `value`
- SmallBitVec& set(const Mask mask, const bool value)
- {
- m_rack ^= (-Rack{value} ^ m_rack) & mask.m_mask;
- return *this;
- }
-
- /// Set the `index`th bit to `value`
- SmallBitVec& set(const size_t index, const bool value)
- {
- return set(mask(index), value);
- }
-
- /// Flip the value of the bit covered by `mask`
- SmallBitVec& flip(const Mask mask)
- {
- m_rack ^= mask.m_mask;
- return *this;
- }
-
- /// Flip the value of the `index`th bit
- SmallBitVec& flip(const size_t index) { return flip(mask(index)); }
-
- bool operator==(const SmallBitVec& vec) const
- {
- return m_rack == vec.m_rack;
- }
-
- bool operator!=(const SmallBitVec& vec) const
- {
- return m_rack != vec.m_rack;
- }
-
- bool operator<(const SmallBitVec& vec) const { return m_rack < vec.m_rack; }
-
- SmallBitVec& operator=(const Rack i)
- {
- m_rack = i;
- return *this;
- }
-
- SmallBitVec& operator&=(const SmallBitVec& vec)
- {
- m_rack &= vec.m_rack;
- return *this;
- }
-
- SmallBitVec& operator|=(const SmallBitVec& vec)
- {
- m_rack |= vec.m_rack;
- return *this;
- }
-
- SmallBitVec& operator^=(const SmallBitVec& vec)
- {
- m_rack ^= vec.m_rack;
- return *this;
- }
-
- SmallBitVec& operator^=(const Rack i)
- {
- m_rack ^= i;
- return *this;
- }
-
- SmallBitVec& operator<<=(const size_t bits)
- {
- assert(bits < size());
- m_rack <<= bits;
- return *this;
- }
-
- SmallBitVec& operator>>=(const size_t bits)
- {
- assert(bits < size());
- m_rack >>= bits;
- return *this;
- }
-
- /// Right-rotate by `bits` positions
- SmallBitVec& rotr(const size_t bits)
- {
- if (bits > 0 && bits < size()) {
- assert(bits <= bits_per_rack);
- m_rack = (m_rack >> bits) | (m_rack << (size() - bits));
- m_rack &= (~Rack{0} >> (bits_per_rack - size()));
- }
- return *this;
- }
-
- /// Left-rotate by `bits` positions
- SmallBitVec& rotl(const size_t bits)
- {
- if (bits > 0 && bits < size()) {
- assert(bits <= bits_per_rack);
- m_rack = (m_rack << bits) | (m_rack >> (size() - bits));
- m_rack &= (~Rack{0} >> (bits_per_rack - size()));
- }
- return *this;
- }
-
- /// Return true iff all bits are zero
- bool none() const { return m_rack == 0; }
-
- /// Return 1 + the index of the first set bit, or 0 if there are none
- size_t find_first() const
- {
- return static_cast<size_t>(detail::find_first(m_rack));
- }
-
- /// Return the number of set bits
- size_t count() const
- {
- return static_cast<size_t>(detail::pop_count(m_rack));
- }
-
- /// Flip all bits (one's complement)
- SmallBitVec& flip()
- {
- m_rack = ~m_rack;
- return *this;
- }
-
- /// Return the first rack
- Rack& rack() { return m_rack; }
- Rack rack() const { return m_rack; }
-
- /// Return a raw pointer to the racks
- Rack* data() { return &m_rack; }
- const Rack* data() const { return &m_rack; }
-
- /// Return the number of racks
- size_t num_racks() const { return 1; }
-
- template <class BitVec>
- class iterator_base : public Mask
- {
- public:
- iterator_base& operator++()
- {
- Mask::operator++();
- return *this;
- }
-
- iterator_base& operator--()
- {
- Mask::operator--();
- return *this;
- }
-
- bool operator==(const iterator_base& rhs) const
- {
- return m_vec == rhs.m_vec && Mask::operator==(rhs);
- }
-
- bool operator!=(const iterator_base& rhs) const
- {
- return !operator==(rhs);
- }
-
- bool operator*() const { return m_vec->test(*this); }
-
- protected:
- iterator_base(BitVec& vec, const size_t index)
- : Mask{index}
- , m_vec{&vec}
- {
- }
-
- BitVec* m_vec;
- };
-
- class iterator : public iterator_base<SmallBitVec>
- {
- public:
- void set() { m_vec->set(*this); }
- void reset() { m_vec->reset(*this); }
-
- private:
- friend class SmallBitVec;
-
- iterator(SmallBitVec& vec, const size_t index)
- : iterator_base{vec, index}
- {
- }
- };
-
- class const_iterator : public iterator_base<const SmallBitVec>
- {
- private:
- friend class SmallBitVec;
-
- const_iterator(const SmallBitVec& vec, const size_t index)
- : iterator_base{vec, index}
- {
- }
- };
-
- Mask mask(const size_t i = 0) const
- {
- assert(i <= size());
- return Mask{i};
- }
-
- iterator begin(const size_t i = 0) { return {*this, i}; }
- iterator end() { return {*this, size()}; }
-
- const_iterator begin(const size_t i = 0) const { return {*this, i}; }
- const_iterator end() const { return {*this, size()}; }
+ using Rack = uintptr_t;
+
+ static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
+
+ /** Mask for a bit that can be incremented like an index.
+ *
+ * This enables fast iteration while setting or resetting bits since it is
+ * internally stored as a mask which avoids repeated index math.
+ */
+ class Mask
+ {
+ public:
+ void operator++() { m_mask <<= 1U; }
+ void operator--() { m_mask >>= 1U; }
+
+ bool operator==(const Mask& mask) const { return m_mask == mask.m_mask; }
+
+ bool operator!=(const Mask& mask) const { return m_mask == mask.m_mask; }
+
+ private:
+ friend class SmallBitVec;
+
+ explicit Mask(const size_t index)
+ : m_mask{index < bits_per_rack ? Rack{1} << index : 0}
+ {}
+
+ Rack m_mask;
+ };
+
+ explicit SmallBitVec(const size_t bits)
+ : m_size{bits}
+ {
+ assert(bits <= bits_per_rack);
+ }
+
+ SmallBitVec(const size_t bits, const Rack value)
+ : m_rack{value}
+ , m_size{bits}
+ {
+ assert(bits <= bits_per_rack);
+ }
+
+ /// Return the size in bits
+ size_t size() const { return m_size; }
+
+ /// Set all bits to one
+ SmallBitVec& set()
+ {
+ m_rack = ~Rack{0} >> (bits_per_rack - m_size);
+ return *this;
+ }
+
+ /// Set all bits to zero
+ SmallBitVec& reset()
+ {
+ m_rack = 0;
+ return *this;
+ }
+
+ /// Return the value of the bit covered by `mask`
+ bool test(const Mask mask) const { return m_rack & mask.m_mask; }
+
+ /// Return the value of the `index`th bit
+ bool test(const size_t index) const { return test(mask(index)); }
+
+ /// Set the bit covered by `mask` to 1
+ SmallBitVec& set(const Mask mask)
+ {
+ m_rack |= mask.m_mask;
+ return *this;
+ }
+
+ /// Set the `index`th bit to 1
+ SmallBitVec& set(const size_t index) { return set(mask(index)); }
+
+ /// Reset the bit covered by `mask` to 0
+ SmallBitVec& reset(const Mask mask)
+ {
+ m_rack &= ~mask.m_mask;
+ return *this;
+ }
+
+ /// Reset the `index`th bit to 0
+ SmallBitVec& reset(const size_t index) { return reset(mask(index)); }
+
+ /// Set the bit covered by `mask` to `value`
+ SmallBitVec& set(const Mask mask, const bool value)
+ {
+ m_rack ^= (-Rack{value} ^ m_rack) & mask.m_mask;
+ return *this;
+ }
+
+ /// Set the `index`th bit to `value`
+ SmallBitVec& set(const size_t index, const bool value)
+ {
+ return set(mask(index), value);
+ }
+
+ /// Flip the value of the bit covered by `mask`
+ SmallBitVec& flip(const Mask mask)
+ {
+ m_rack ^= mask.m_mask;
+ return *this;
+ }
+
+ /// Flip the value of the `index`th bit
+ SmallBitVec& flip(const size_t index) { return flip(mask(index)); }
+
+ bool operator==(const SmallBitVec& vec) const { return m_rack == vec.m_rack; }
+
+ bool operator!=(const SmallBitVec& vec) const { return m_rack != vec.m_rack; }
+
+ bool operator<(const SmallBitVec& vec) const { return m_rack < vec.m_rack; }
+
+ SmallBitVec& operator=(const Rack i)
+ {
+ m_rack = i;
+ return *this;
+ }
+
+ SmallBitVec& operator&=(const SmallBitVec& vec)
+ {
+ m_rack &= vec.m_rack;
+ return *this;
+ }
+
+ SmallBitVec& operator|=(const SmallBitVec& vec)
+ {
+ m_rack |= vec.m_rack;
+ return *this;
+ }
+
+ SmallBitVec& operator^=(const SmallBitVec& vec)
+ {
+ m_rack ^= vec.m_rack;
+ return *this;
+ }
+
+ SmallBitVec& operator^=(const Rack i)
+ {
+ m_rack ^= i;
+ return *this;
+ }
+
+ SmallBitVec& operator<<=(const size_t bits)
+ {
+ assert(bits < size());
+ m_rack <<= bits;
+ return *this;
+ }
+
+ SmallBitVec& operator>>=(const size_t bits)
+ {
+ assert(bits < size());
+ m_rack >>= bits;
+ return *this;
+ }
+
+ /// Right-rotate by `bits` positions
+ SmallBitVec& rotr(const size_t bits)
+ {
+ if (bits > 0 && bits < size()) {
+ assert(bits <= bits_per_rack);
+ m_rack = (m_rack >> bits) | (m_rack << (size() - bits));
+ m_rack &= (~Rack{0} >> (bits_per_rack - size()));
+ }
+ return *this;
+ }
+
+ /// Left-rotate by `bits` positions
+ SmallBitVec& rotl(const size_t bits)
+ {
+ if (bits > 0 && bits < size()) {
+ assert(bits <= bits_per_rack);
+ m_rack = (m_rack << bits) | (m_rack >> (size() - bits));
+ m_rack &= (~Rack{0} >> (bits_per_rack - size()));
+ }
+ return *this;
+ }
+
+ /// Return true iff all bits are zero
+ bool none() const { return m_rack == 0; }
+
+ /// Return 1 + the index of the first set bit, or 0 if there are none
+ size_t find_first() const
+ {
+ return static_cast<size_t>(detail::find_first(m_rack));
+ }
+
+ /// Return the number of set bits
+ size_t count() const
+ {
+ return static_cast<size_t>(detail::pop_count(m_rack));
+ }
+
+ /// Flip all bits (one's complement)
+ SmallBitVec& flip()
+ {
+ m_rack = ~m_rack;
+ return *this;
+ }
+
+ /// Return the first rack
+ Rack& rack() { return m_rack; }
+ Rack rack() const { return m_rack; }
+
+ /// Return a raw pointer to the racks
+ Rack* data() { return &m_rack; }
+ const Rack* data() const { return &m_rack; }
+
+ /// Return the number of racks
+ size_t num_racks() const { return 1; }
+
+ template<class BitVec>
+ class iterator_base : public Mask
+ {
+ public:
+ iterator_base& operator++()
+ {
+ Mask::operator++();
+ return *this;
+ }
+
+ iterator_base& operator--()
+ {
+ Mask::operator--();
+ return *this;
+ }
+
+ bool operator==(const iterator_base& rhs) const
+ {
+ return m_vec == rhs.m_vec && Mask::operator==(rhs);
+ }
+
+ bool operator!=(const iterator_base& rhs) const { return !operator==(rhs); }
+
+ bool operator*() const { return m_vec->test(*this); }
+
+ protected:
+ iterator_base(BitVec& vec, const size_t index)
+ : Mask{index}
+ , m_vec{&vec}
+ {}
+
+ BitVec* m_vec;
+ };
+
+ class iterator : public iterator_base<SmallBitVec>
+ {
+ public:
+ void set() { m_vec->set(*this); }
+ void reset() { m_vec->reset(*this); }
+
+ private:
+ friend class SmallBitVec;
+
+ iterator(SmallBitVec& vec, const size_t index)
+ : iterator_base{vec, index}
+ {}
+ };
+
+ class const_iterator : public iterator_base<const SmallBitVec>
+ {
+ private:
+ friend class SmallBitVec;
+
+ const_iterator(const SmallBitVec& vec, const size_t index)
+ : iterator_base{vec, index}
+ {}
+ };
+
+ Mask mask(const size_t i = 0) const
+ {
+ assert(i <= size());
+ return Mask{i};
+ }
+
+ iterator begin(const size_t i = 0) { return {*this, i}; }
+ iterator end() { return {*this, size()}; }
+
+ const_iterator begin(const size_t i = 0) const { return {*this, i}; }
+ const_iterator end() const { return {*this, size()}; }
private:
- static_assert(8 * sizeof(Rack) == bits_per_rack, "");
- static_assert((sizeof(Rack) == 4) || (sizeof(Rack) == 8), "");
+ static_assert(8 * sizeof(Rack) == bits_per_rack, "");
+ static_assert((sizeof(Rack) == 4) || (sizeof(Rack) == 8), "");
- Rack m_rack{};
- size_t m_size{};
+ Rack m_rack{};
+ size_t m_size{};
};
namespace detail {
-template <>
-struct is_bitvec<SmallBitVec>
-{
- constexpr static bool value = true;
+template<>
+struct is_bitvec<SmallBitVec> {
+ constexpr static bool value = true;
};
-template <>
+template<>
inline void
gray_code(SmallBitVec& value)
{
- value.rack() ^= (value.rack() >> 1U);
+ value.rack() ^= (value.rack() >> 1U);
}
-template <>
+template<>
inline void
gray_code_inv(SmallBitVec& value)
{
- gray_code_inv<SmallBitVec::Rack>(value.rack());
+ gray_code_inv<SmallBitVec::Rack>(value.rack());
}
} // namespace detail
diff --git a/include/chilbert/StaticBitVec.hpp b/include/chilbert/StaticBitVec.hpp
index 9aff3ad..1cd59b0 100644
--- a/include/chilbert/StaticBitVec.hpp
+++ b/include/chilbert/StaticBitVec.hpp
@@ -42,75 +42,95 @@ namespace chilbert {
*
* @tparam N Number of bits.
*/
-template <size_t N>
+template<size_t N>
class StaticBitVec : public detail::MultiBitVec<StaticBitVec<N>>
{
public:
- using Rack = typename detail::MultiBitVec<StaticBitVec<N>>::Rack;
+ using Rack = typename detail::MultiBitVec<StaticBitVec<N>>::Rack;
- using detail::MultiBitVec<StaticBitVec<N>>::bits_per_rack;
+ using detail::MultiBitVec<StaticBitVec<N>>::bits_per_rack;
- StaticBitVec() = default;
+ StaticBitVec() = default;
- /// Constructor for compatibility with DynamicBitVec
- explicit StaticBitVec(const size_t bits) { assert(bits == size()); }
+ /// Constructor for compatibility with DynamicBitVec
+ explicit StaticBitVec(const size_t bits) { assert(bits == size()); }
- /// Constructor for compatibility with DynamicBitVec
- StaticBitVec(const size_t bits, const Rack value)
- : StaticBitVec{bits}
- {
- m_racks[0] = value;
- }
+ /// Constructor for compatibility with DynamicBitVec
+ StaticBitVec(const size_t bits, const Rack value)
+ : StaticBitVec{bits}
+ {
+ m_racks[0] = value;
+ }
- /// Return the size in bits
- size_t size() const { return N; }
+ /// Return the size in bits
+ size_t size() const { return N; }
- /// Return a reference to the `index`th rack
+ /// Return a reference to the `index`th rack
#ifndef NDEBUG
- const auto& rack(const size_t index) const { return m_racks.at(index); }
- auto& rack(const size_t index) { return m_racks.at(index); }
+ const auto& rack(const size_t index) const
+ {
+ return m_racks.at(index);
+ }
+ auto& rack(const size_t index)
+ {
+ return m_racks.at(index);
+ }
#else
- const auto& rack(const size_t index) const { return m_racks[index]; }
- auto& rack(const size_t index) { return m_racks[index]; }
+ const auto& rack(const size_t index) const
+ {
+ return m_racks[index];
+ }
+ auto& rack(const size_t index)
+ {
+ return m_racks[index];
+ }
#endif
- /// Return a raw pointer to the racks
- Rack* data() { return m_racks.data(); }
- const Rack* data() const { return m_racks.data(); }
-
- /// Return the total size of all racks in bytes
- static constexpr size_t data_size() { return num_racks() * sizeof(Rack); }
-
- /// Return the number of racks
- static constexpr size_t num_racks()
- {
- return (std::max(N, size_t(1)) + bits_per_rack - 1) / bits_per_rack;
- }
+ /// Return a raw pointer to the racks
+ Rack* data()
+ {
+ return m_racks.data();
+ }
+ const Rack* data() const
+ {
+ return m_racks.data();
+ }
+
+ /// Return the total size of all racks in bytes
+ static constexpr size_t data_size()
+ {
+ return num_racks() * sizeof(Rack);
+ }
+
+ /// Return the number of racks
+ static constexpr size_t num_racks()
+ {
+ return (std::max(N, size_t(1)) + bits_per_rack - 1) / bits_per_rack;
+ }
private:
- std::array<Rack, num_racks()> m_racks{};
+ std::array<Rack, num_racks()> m_racks{};
};
namespace detail {
-template <size_t N>
-struct is_bitvec<StaticBitVec<N>>
-{
- constexpr static bool value = true;
+template<size_t N>
+struct is_bitvec<StaticBitVec<N>> {
+ constexpr static bool value = true;
};
-template <size_t N>
+template<size_t N>
void
gray_code(StaticBitVec<N>& value)
{
- gray_code(static_cast<MultiBitVec<StaticBitVec<N>>&>(value));
+ gray_code(static_cast<MultiBitVec<StaticBitVec<N>>&>(value));
}
-template <size_t N>
+template<size_t N>
void
gray_code_inv(StaticBitVec<N>& value)
{
- gray_code_inv(static_cast<MultiBitVec<StaticBitVec<N>>&>(value));
+ gray_code_inv(static_cast<MultiBitVec<StaticBitVec<N>>&>(value));
}
} // namespace detail
diff --git a/include/chilbert/chilbert.hpp b/include/chilbert/chilbert.hpp
index c55fc14..36b94cc 100644
--- a/include/chilbert/chilbert.hpp
+++ b/include/chilbert/chilbert.hpp
@@ -33,8 +33,9 @@ namespace chilbert {
* @param n Number of dimensions.
* @param[out] h Hilbert Index.
*/
-template <class P, class H>
-inline void coords_to_index(const P& p, size_t m, size_t n, H& h);
+template<class P, class H>
+inline void
+coords_to_index(const P& p, size_t m, size_t n, H& h);
/** Map the Hilbert Index `p` to a point.
*
@@ -46,8 +47,9 @@ inline void coords_to_index(const P& p, size_t m, size_t n, H& h);
* @param n Number of dimensions.
* @param h Hilbert Index.
*/
-template <class P, class H>
-inline void index_to_coords(P& p, size_t m, size_t n, const H& h);
+template<class P, class H>
+inline void
+index_to_coords(P& p, size_t m, size_t n, const H& h);
/** Map the point `p` to a Compact Hilbert Index.
*
@@ -61,13 +63,14 @@ inline void index_to_coords(P& p, size_t m, size_t n, const H& h);
* @param M Optional net precision (sum of `ms`), the size of `hc` in bits.
* @param m Optional largest precision in `m`.
*/
-template <class P, class H>
-inline void coords_to_compact_index(const P& p,
- const size_t* ms,
- size_t n,
- H& hc,
- size_t M = 0,
- size_t m = 0);
+template<class P, class H>
+inline void
+coords_to_compact_index(const P& p,
+ const size_t* ms,
+ size_t n,
+ H& hc,
+ size_t M = 0,
+ size_t m = 0);
/** Map the Compact Hilbert Index `hc` to a point.
*
@@ -81,13 +84,14 @@ inline void coords_to_compact_index(const P& p,
* @param M Optional net precision (sum of `ms`), the size of `hc` in bits.
* @param m Optional largest precision in `m`.
*/
-template <class P, class H>
-inline void compact_index_to_coords(P& p,
- const size_t* ms,
- size_t n,
- const H& hc,
- size_t M = 0,
- size_t m = 0);
+template<class P, class H>
+inline void
+compact_index_to_coords(P& p,
+ const size_t* ms,
+ size_t n,
+ const H& hc,
+ size_t M = 0,
+ size_t m = 0);
} // namespace chilbert
diff --git a/include/chilbert/chilbert.ipp b/include/chilbert/chilbert.ipp
index 5c18570..f333630 100644
--- a/include/chilbert/chilbert.ipp
+++ b/include/chilbert/chilbert.ipp
@@ -41,34 +41,34 @@ namespace detail {
*/
static constexpr size_t D0 = 1;
-template <class T>
+template<class T>
size_t
num_bits(const T&, std::enable_if_t<std::is_integral<T>::value>* = nullptr)
{
- return sizeof(T) * CHAR_BIT;
+ return sizeof(T) * CHAR_BIT;
}
-template <class T>
+template<class T>
size_t
num_bits(const T& vec,
std::enable_if_t<std::is_same<T, SmallBitVec>::value ||
std::is_same<T, DynamicBitVec>::value>* = nullptr)
{
- return vec.size();
+ return vec.size();
}
-template <size_t N>
+template<size_t N>
size_t
num_bits(const StaticBitVec<N>&, void* = nullptr)
{
- return N;
+ return N;
}
-template <size_t MaxN>
+template<size_t MaxN>
size_t
num_bits(const BoundedBitVec<MaxN>& vec, void* = nullptr)
{
- return vec.size();
+ return vec.size();
}
/** Copy a range of bits from one field to the start of another.
@@ -78,13 +78,13 @@ num_bits(const BoundedBitVec<MaxN>& vec, void* = nullptr)
* @param i Start bit index in source
* @param[out] w Destination bit field
*/
-template <class H, class I>
+template<class H, class I>
inline void
get_bits(const H& h, const size_t n, const size_t i, I& w)
{
- for (size_t j = 0; j < n; ++j) {
- set_bit(w, j, test_bit(h, i + j));
- }
+ for (size_t j = 0; j < n; ++j) {
+ set_bit(w, j, test_bit(h, i + j));
+ }
}
/** Set a range of bits in one field from the start of another.
@@ -94,13 +94,13 @@ get_bits(const H& h, const size_t n, const size_t i, I& w)
* @param i Start bit index in destination
* @param w Source bit field
*/
-template <class H, class I>
+template<class H, class I>
inline void
set_bits(H& h, const size_t n, const size_t i, const I& w)
{
- for (size_t j = 0; j < n; ++j) {
- set_bit(h, i + j, test_bit(w, j));
- }
+ for (size_t j = 0; j < n; ++j) {
+ set_bit(h, i + j, test_bit(w, j));
+ }
}
/** Set the leading bits in `l` to bit `i` from the corresponding value in `p`.
@@ -110,13 +110,13 @@ set_bits(H& h, const size_t n, const size_t i, const I& w)
* @param i Bit position to copy from values in `p`.
* @param[out] l Output bits.
*/
-template <class P, class I>
+template<class P, class I>
inline void
get_location(const P& p, const size_t n, const size_t i, I& l)
{
- for (size_t j = 0; j < n; ++j) {
- set_bit(l, j, test_bit(p[j], i));
- }
+ for (size_t j = 0; j < n; ++j) {
+ set_bit(l, j, test_bit(p[j], i));
+ }
}
/** Set bit `i` in values in `p` to the corresponding leadings bits in `l`.
@@ -126,81 +126,81 @@ get_location(const P& p, const size_t n, const size_t i, I& l)
* @param i Bit position to set in values in `p`.
* @param l Input bits.
*/
-template <class P, class I>
+template<class P, class I>
inline void
set_location(P& p, const size_t n, const size_t i, const I& l)
{
- for (size_t j = 0; j < n; ++j) {
- set_bit(p[j], i, test_bit(l, j));
- }
+ for (size_t j = 0; j < n; ++j) {
+ set_bit(p[j], i, test_bit(l, j));
+ }
}
// 'Transforms' a point.
-template <class I>
+template<class I>
inline void
transform(const I& e, const size_t d, const size_t n, I& a)
{
- (void)n;
- assert(a.size() == n);
- a ^= e;
- a.rotr(d);
+ (void)n;
+ assert(a.size() == n);
+ a ^= e;
+ a.rotr(d);
}
// Inverse 'transforms' a point.
-template <class I>
+template<class I>
inline void
transform_inv(const I& e, const size_t d, const size_t n, I& a)
{
- assert(a.size() == n);
- a.rotl(d);
- a ^= e;
+ assert(a.size() == n);
+ a.rotl(d);
+ a ^= e;
}
// Update for method 1 (GrayCodeInv in the loop)
-template <class I>
+template<class I>
inline void
update1(const I& l, const I& t, const I& w, const size_t n, I& e, size_t& d)
{
- assert(d < n);
- e = l;
- e.flip(d); //#D d == n-1 ? 0 : d+1 );
-
- // Update direction
- d += 1 + t.find_first();
- if (d >= n) {
- d -= n;
- }
- if (d >= n) {
- d -= n;
- }
- assert(d < n);
-
- if (!w.test(0)) {
- e.flip(d == 0 ? n - 1 : d - 1); //#D d );
- }
+ assert(d < n);
+ e = l;
+ e.flip(d); //#D d == n-1 ? 0 : d+1 );
+
+ // Update direction
+ d += 1 + t.find_first();
+ if (d >= n) {
+ d -= n;
+ }
+ if (d >= n) {
+ d -= n;
+ }
+ assert(d < n);
+
+ if (!w.test(0)) {
+ e.flip(d == 0 ? n - 1 : d - 1); //#D d );
+ }
}
// Update for method 2 (GrayCodeInv out of loop)
-template <class I>
+template<class I>
inline void
update2(const I& l, const I& t, const size_t n, I& e, size_t& d)
{
- assert(d < n);
- e = l;
- e.flip(d); //#D d == n-1 ? 0 : d+1 );
-
- // Update direction
- d += 1 + t.find_first();
- if (d >= n) {
- d -= n;
- }
- if (d >= n) {
- d -= n;
- }
- assert(d < n);
+ assert(d < n);
+ e = l;
+ e.flip(d); //#D d == n-1 ? 0 : d+1 );
+
+ // Update direction
+ d += 1 + t.find_first();
+ if (d >= n) {
+ d -= n;
+ }
+ if (d >= n) {
+ d -= n;
+ }
+ assert(d < n);
}
-template <class P, class H, class I>
+template<class P, class H, class I>
inline void
coords_to_index(const P& p,
const size_t m,
@@ -209,91 +209,91 @@ coords_to_index(const P& p,
I&& scratch,
size_t* const ds = nullptr)
{
- I e{std::forward<I>(scratch)};
- I l{e};
- I t{e};
- I w{e};
-
- // Initialize
- e.reset();
- l.reset();
- reset_bits(h);
-
- // Work from MSB to LSB
- size_t d = D0;
- size_t ho = m * n;
- for (auto i = static_cast<intptr_t>(m - 1); i >= 0; --i) {
- if (ds) {
- ds[i] = d;
- }
-
- // Get corner of sub-hypercube where point lies.
- get_location<P, I>(p, n, static_cast<size_t>(i), l);
-
- // Mirror and reflect the location.
- // t = T_{(e,d)}(l)
- t = l;
- transform<I>(e, d, n, t);
-
- w = t;
- if (static_cast<size_t>(i) < m - 1) {
- w.flip(n - 1);
- }
-
- // Concatenate to the index.
- ho -= n;
- set_bits<H, I>(h, n, ho, w);
-
- // Update the entry point and direction.
- update2<I>(l, t, n, e, d);
- }
-
- gray_code_inv(h);
+ I e{std::forward<I>(scratch)};
+ I l{e};
+ I t{e};
+ I w{e};
+
+ // Initialize
+ e.reset();
+ l.reset();
+ reset_bits(h);
+
+ // Work from MSB to LSB
+ size_t d = D0;
+ size_t ho = m * n;
+ for (auto i = static_cast<intptr_t>(m - 1); i >= 0; --i) {
+ if (ds) {
+ ds[i] = d;
+ }
+
+ // Get corner of sub-hypercube where point lies.
+ get_location<P, I>(p, n, static_cast<size_t>(i), l);
+
+ // Mirror and reflect the location.
+ // t = T_{(e,d)}(l)
+ t = l;
+ transform<I>(e, d, n, t);
+
+ w = t;
+ if (static_cast<size_t>(i) < m - 1) {
+ w.flip(n - 1);
+ }
+
+ // Concatenate to the index.
+ ho -= n;
+ set_bits<H, I>(h, n, ho, w);
+
+ // Update the entry point and direction.
+ update2<I>(l, t, n, e, d);
+ }
+
+ gray_code_inv(h);
}
-template <class P, class H, class I>
+template<class P, class H, class I>
inline void
index_to_coords(P& p, const size_t m, const size_t n, const H& h, I&& scratch)
{
- I e{std::forward<I>(scratch)};
- I l{e};
- I t{e};
- I w{e};
-
- // Initialize
- e.reset();
- l.reset();
- for (size_t j = 0; j < n; ++j) {
- reset_bits(p[j]);
- }
-
- // Work from MSB to LSB
- size_t d = D0;
- size_t ho = m * n;
- for (auto i = static_cast<intptr_t>(m - 1); i >= 0; --i) {
- // Get the Hilbert index bits
- ho -= n;
- get_bits<H, I>(h, n, ho, w);
-
- // t = GrayCode(w)
- t = w;
- gray_code(t);
-
- // Reverse the transform
- // l = T^{-1}_{(e,d)}(t)
- l = t;
- transform_inv<I>(e, d, n, l);
-
- // Distribute these bits
- // to the coordinates.
- set_location<P, I>(p, n, static_cast<size_t>(i), l);
-
- // Update the entry point and direction.
- update1<I>(l, t, w, n, e, d);
- }
+ I e{std::forward<I>(scratch)};
+ I l{e};
+ I t{e};
+ I w{e};
+
+ // Initialize
+ e.reset();
+ l.reset();
+ for (size_t j = 0; j < n; ++j) {
+ reset_bits(p[j]);
+ }
+
+ // Work from MSB to LSB
+ size_t d = D0;
+ size_t ho = m * n;
+ for (auto i = static_cast<intptr_t>(m - 1); i >= 0; --i) {
+ // Get the Hilbert index bits
+ ho -= n;
+ get_bits<H, I>(h, n, ho, w);
+
+ // t = GrayCode(w)
+ t = w;
+ gray_code(t);
+
+ // Reverse the transform
+ // l = T^{-1}_{(e,d)}(t)
+ l = t;
+ transform_inv<I>(e, d, n, l);
+
+ // Distribute these bits
+ // to the coordinates.
+ set_location<P, I>(p, n, static_cast<size_t>(i), l);
+
+ // Update the entry point and direction.
+ update1<I>(l, t, w, n, e, d);
+ }
}
-template <class P, class HC, class I>
+template<class P, class HC, class I>
inline void
coords_to_compact_index(const P& p,
const size_t* const ms,
@@ -303,43 +303,43 @@ coords_to_compact_index(const P& p,
size_t M = 0,
size_t m = 0)
{
- // Get total precision and max precision if not supplied
- if (M == 0 || m == 0) {
- M = m = 0;
- for (size_t i = 0; i < n; ++i) {
- assert(num_bits(p[i]) >= ms[i]);
- if (ms[i] > m) {
- m = ms[i];
- }
- M += ms[i];
- }
- }
-
- const size_t mn = m * n;
-
- assert(num_bits(hc) >= M);
-
- // If we could avoid allocation altogether (ie: have a
- // fixed buffer allocated on the stack) then this increases
- // speed by a bit (4% when n=4, m=20)
- auto* const ds = new size_t[m];
-
- if (mn > SmallBitVec::bits_per_rack) {
- DynamicBitVec h(mn);
- detail::coords_to_index<P, DynamicBitVec, I>(
- p, m, n, h, std::forward<I>(scratch), ds);
- compact_index<DynamicBitVec, HC>(ms, ds, n, m, h, hc);
- } else {
- SmallBitVec h(mn);
- detail::coords_to_index<P, SmallBitVec, I>(
- p, m, n, h, std::forward<I>(scratch), ds);
- compact_index<SmallBitVec, HC>(ms, ds, n, m, h, hc);
- }
-
- delete[] ds;
+ // Get total precision and max precision if not supplied
+ if (M == 0 || m == 0) {
+ M = m = 0;
+ for (size_t i = 0; i < n; ++i) {
+ assert(num_bits(p[i]) >= ms[i]);
+ if (ms[i] > m) {
+ m = ms[i];
+ }
+ M += ms[i];
+ }
+ }
+
+ const size_t mn = m * n;
+
+ assert(num_bits(hc) >= M);
+
+ // If we could avoid allocation altogether (ie: have a
+ // fixed buffer allocated on the stack) then this increases
+ // speed by a bit (4% when n=4, m=20)
+ auto* const ds = new size_t[m];
+
+ if (mn > SmallBitVec::bits_per_rack) {
+ DynamicBitVec h(mn);
+ detail::coords_to_index<P, DynamicBitVec, I>(
+ p, m, n, h, std::forward<I>(scratch), ds);
+ compact_index<DynamicBitVec, HC>(ms, ds, n, m, h, hc);
+ } else {
+ SmallBitVec h(mn);
+ detail::coords_to_index<P, SmallBitVec, I>(
+ p, m, n, h, std::forward<I>(scratch), ds);
+ compact_index<SmallBitVec, HC>(ms, ds, n, m, h, hc);
+ }
+
+ delete[] ds;
}
-template <class P, class HC, class I>
+template<class P, class HC, class I>
inline void
compact_index_to_coords(P& p,
const size_t* ms,
@@ -349,109 +349,107 @@ compact_index_to_coords(P& p,
size_t M,
size_t m)
{
- I e{std::forward<I>(scratch)};
- I l{e};
- I t{e};
- I w{e};
- I r{e};
- I mask{e};
- I ptrn{e};
-
- // Get total precision and max precision
- // if not supplied
- if (M == 0 || m == 0) {
- M = m = 0;
- for (size_t i = 0; i < n; ++i) {
- if (ms[i] > m) {
- m = ms[i];
- }
- M += ms[i];
- }
- }
-
- assert(num_bits(hc) >= M);
- assert(num_bits(p[0]) >= m);
-
- // Initialize
- e.reset();
- l.reset();
- for (size_t j = 0; j < n; ++j) {
- reset_bits(p[j]);
- }
-
- // Work from MSB to LSB
- size_t d = D0;
-
- for (auto i = static_cast<intptr_t>(m - 1); i >= 0; --i) {
- // Get the mask and ptrn
- size_t b = 0;
- extract_mask<I>(ms, n, d, static_cast<size_t>(i), mask, b);
- ptrn = e;
- assert(ptrn.size() == n);
- ptrn.rotr(d);
-
- // Get the Hilbert index bits
- M -= b;
- r.reset(); // GetBits doesn't do this
- get_bits<HC, I>(hc, b, M, r);
-
- // w = GrayCodeRankInv(r)
- // t = GrayCode(w)
- gray_code_rank_inv<I>(mask, ptrn, r, n, b, t, w);
-
- // Reverse the transform
- // l = T^{-1}_{(e,d)}(t)
- l = t;
- transform_inv<I>(e, d, n, l);
-
- // Distribute these bits
- // to the coordinates.
- set_location<P, I>(p, n, static_cast<size_t>(i), l);
-
- // Update the entry point and direction.
- update1<I>(l, t, w, n, e, d);
- }
+ I e{std::forward<I>(scratch)};
+ I l{e};
+ I t{e};
+ I w{e};
+ I r{e};
+ I mask{e};
+ I ptrn{e};
+
+ // Get total precision and max precision
+ // if not supplied
+ if (M == 0 || m == 0) {
+ M = m = 0;
+ for (size_t i = 0; i < n; ++i) {
+ if (ms[i] > m) {
+ m = ms[i];
+ }
+ M += ms[i];
+ }
+ }
+
+ assert(num_bits(hc) >= M);
+ assert(num_bits(p[0]) >= m);
+
+ // Initialize
+ e.reset();
+ l.reset();
+ for (size_t j = 0; j < n; ++j) {
+ reset_bits(p[j]);
+ }
+
+ // Work from MSB to LSB
+ size_t d = D0;
+
+ for (auto i = static_cast<intptr_t>(m - 1); i >= 0; --i) {
+ // Get the mask and ptrn
+ size_t b = 0;
+ extract_mask<I>(ms, n, d, static_cast<size_t>(i), mask, b);
+ ptrn = e;
+ assert(ptrn.size() == n);
+ ptrn.rotr(d);
+
+ // Get the Hilbert index bits
+ M -= b;
+ r.reset(); // GetBits doesn't do this
+ get_bits<HC, I>(hc, b, M, r);
+
+ // w = GrayCodeRankInv(r)
+ // t = GrayCode(w)
+ gray_code_rank_inv<I>(mask, ptrn, r, n, b, t, w);
+
+ // Reverse the transform
+ // l = T^{-1}_{(e,d)}(t)
+ l = t;
+ transform_inv<I>(e, d, n, l);
+
+ // Distribute these bits
+ // to the coordinates.
+ set_location<P, I>(p, n, static_cast<size_t>(i), l);
+
+ // Update the entry point and direction.
+ update1<I>(l, t, w, n, e, d);
+ }
}
} // namespace detail
-template <class P, class H>
+template<class P, class H>
inline void
coords_to_index(const P& p, const size_t m, const size_t n, H& h)
{
- assert(detail::num_bits(h) >= n * m);
- assert(detail::num_bits(p[0]) >= m);
-
- if (n <= SmallBitVec::bits_per_rack) {
- // Intermediate variables will fit in fixed width
- detail::coords_to_index<P, H, SmallBitVec>(p, m, n, h, SmallBitVec(n));
- } else {
- // Otherwise, they must be DynamicBitVecs
- detail::coords_to_index<P, H, DynamicBitVec>(
- p, m, n, h, DynamicBitVec(n));
- }
+ assert(detail::num_bits(h) >= n * m);
+ assert(detail::num_bits(p[0]) >= m);
+
+ if (n <= SmallBitVec::bits_per_rack) {
+ // Intermediate variables will fit in fixed width
+ detail::coords_to_index<P, H, SmallBitVec>(p, m, n, h, SmallBitVec(n));
+ } else {
+ // Otherwise, they must be DynamicBitVecs
+ detail::coords_to_index<P, H, DynamicBitVec>(p, m, n, h, DynamicBitVec(n));
+ }
}
-template <class P, class H>
+template<class P, class H>
inline void
index_to_coords(P& p, const size_t m, const size_t n, const H& h)
{
- assert(m > 0);
- assert(n > 0);
- assert(detail::num_bits(h) >= n * m);
- assert(detail::num_bits(p[0]) >= m);
-
- if (n <= SmallBitVec::bits_per_rack) {
- // Intermediate variables will fit in fixed width
- detail::index_to_coords<P, H, SmallBitVec>(p, m, n, h, SmallBitVec(n));
- } else {
- // Otherwise, they must be DynamicBitVecs
- detail::index_to_coords<P, H, DynamicBitVec>(
- p, m, n, h, DynamicBitVec(n));
- }
+ assert(m > 0);
+ assert(n > 0);
+ assert(detail::num_bits(h) >= n * m);
+ assert(detail::num_bits(p[0]) >= m);
+
+ if (n <= SmallBitVec::bits_per_rack) {
+ // Intermediate variables will fit in fixed width
+ detail::index_to_coords<P, H, SmallBitVec>(p, m, n, h, SmallBitVec(n));
+ } else {
+ // Otherwise, they must be DynamicBitVecs
+ detail::index_to_coords<P, H, DynamicBitVec>(p, m, n, h, DynamicBitVec(n));
+ }
}
-template <class P, class HC>
+template<class P, class HC>
inline void
coords_to_compact_index(const P& p,
const size_t* const ms,
@@ -460,20 +458,20 @@ coords_to_compact_index(const P& p,
const size_t M,
const size_t m)
{
- assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0)));
-
- if (n <= SmallBitVec::bits_per_rack) {
- // Intermediate variables will fit in fixed width
- detail::coords_to_compact_index<P, HC, SmallBitVec>(
- p, ms, n, hc, SmallBitVec(n), M, m);
- } else {
- // Otherwise, they must be DynamicBitVecs
- detail::coords_to_compact_index<P, HC, DynamicBitVec>(
- p, ms, n, hc, DynamicBitVec(n), M, m);
- }
+ assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0)));
+
+ if (n <= SmallBitVec::bits_per_rack) {
+ // Intermediate variables will fit in fixed width
+ detail::coords_to_compact_index<P, HC, SmallBitVec>(
+ p, ms, n, hc, SmallBitVec(n), M, m);
+ } else {
+ // Otherwise, they must be DynamicBitVecs
+ detail::coords_to_compact_index<P, HC, DynamicBitVec>(
+ p, ms, n, hc, DynamicBitVec(n), M, m);
+ }
}
-template <class P, class HC>
+template<class P, class HC>
inline void
compact_index_to_coords(P& p,
const size_t* const ms,
@@ -482,19 +480,19 @@ compact_index_to_coords(P& p,
const size_t M,
const size_t m)
{
- assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0)));
-
- if (n <= SmallBitVec::bits_per_rack) {
- // Intermediate variables will fit in fixed width
- SmallBitVec scratch(n);
- detail::compact_index_to_coords<P, HC, SmallBitVec>(
- p, ms, n, hc, std::move(scratch), M, m);
- } else {
- // Otherwise, they must be DynamicBitVecs
- DynamicBitVec scratch(n);
- detail::compact_index_to_coords<P, HC, DynamicBitVec>(
- p, ms, n, hc, std::move(scratch), M, m);
- }
+ assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0)));
+
+ if (n <= SmallBitVec::bits_per_rack) {
+ // Intermediate variables will fit in fixed width
+ SmallBitVec scratch(n);
+ detail::compact_index_to_coords<P, HC, SmallBitVec>(
+ p, ms, n, hc, std::move(scratch), M, m);
+ } else {
+ // Otherwise, they must be DynamicBitVecs
+ DynamicBitVec scratch(n);
+ detail::compact_index_to_coords<P, HC, DynamicBitVec>(
+ p, ms, n, hc, std::move(scratch), M, m);
+ }
}
} // namespace chilbert
diff --git a/include/chilbert/detail/BitVecIndex.hpp b/include/chilbert/detail/BitVecIndex.hpp
index c8d0469..c337c70 100644
--- a/include/chilbert/detail/BitVecIndex.hpp
+++ b/include/chilbert/detail/BitVecIndex.hpp
@@ -27,22 +27,21 @@ namespace chilbert {
namespace detail {
/// Index into a multi-rack bit vector
-template <class BitVec>
-struct BitVecIndex
-{
- using Rack = typename BitVec::Rack;
-
- static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
-
- explicit BitVecIndex(const size_t bits)
- : rack{bits / bits_per_rack}
- , bit{bits - rack * bits_per_rack}
- {
- assert(bit < bits_per_rack);
- }
-
- size_t rack;
- size_t bit;
+template<class BitVec>
+struct BitVecIndex {
+ using Rack = typename BitVec::Rack;
+
+ static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
+
+ explicit BitVecIndex(const size_t bits)
+ : rack{bits / bits_per_rack}
+ , bit{bits - rack * bits_per_rack}
+ {
+ assert(bit < bits_per_rack);
+ }
+
+ size_t rack;
+ size_t bit;
};
} // namespace detail
diff --git a/include/chilbert/detail/BitVecIterator.hpp b/include/chilbert/detail/BitVecIterator.hpp
index 2458da9..43af2ee 100644
--- a/include/chilbert/detail/BitVecIterator.hpp
+++ b/include/chilbert/detail/BitVecIterator.hpp
@@ -26,72 +26,69 @@
namespace chilbert {
namespace detail {
-template <class BitVec>
+template<class BitVec>
class BitVecIteratorBase : public BitVecMask<typename BitVec::Rack>
{
public:
- using Mask = typename BitVec::Mask;
+ using Mask = typename BitVec::Mask;
- BitVecIteratorBase& operator++()
- {
- Mask::operator++();
- return *this;
- }
+ BitVecIteratorBase& operator++()
+ {
+ Mask::operator++();
+ return *this;
+ }
- BitVecIteratorBase& operator--()
- {
- Mask::operator--();
- return *this;
- }
+ BitVecIteratorBase& operator--()
+ {
+ Mask::operator--();
+ return *this;
+ }
- bool operator==(const BitVecIteratorBase& rhs) const
- {
- return m_vec == rhs.m_vec && Mask::operator==(rhs);
- }
+ bool operator==(const BitVecIteratorBase& rhs) const
+ {
+ return m_vec == rhs.m_vec && Mask::operator==(rhs);
+ }
- bool operator!=(const BitVecIteratorBase& rhs) const
- {
- return !operator==(rhs);
- }
+ bool operator!=(const BitVecIteratorBase& rhs) const
+ {
+ return !operator==(rhs);
+ }
- bool operator*() const { return m_vec->test(*this); }
+ bool operator*() const { return m_vec->test(*this); }
protected:
- BitVecIteratorBase(BitVec& vec, const size_t index)
- : Mask{index}
- , m_vec{&vec}
- {
- }
+ BitVecIteratorBase(BitVec& vec, const size_t index)
+ : Mask{index}
+ , m_vec{&vec}
+ {}
- BitVec* m_vec;
+ BitVec* m_vec;
};
-template <class BitVec>
+template<class BitVec>
class BitVecIterator : public BitVecIteratorBase<BitVec>
{
public:
- void set() { this->m_vec->set(*this); }
- void reset() { this->m_vec->reset(*this); }
+ void set() { this->m_vec->set(*this); }
+ void reset() { this->m_vec->reset(*this); }
private:
- friend BitVec;
+ friend BitVec;
- BitVecIterator(BitVec& vec, const size_t index)
- : BitVecIteratorBase<BitVec>{vec, index}
- {
- }
+ BitVecIterator(BitVec& vec, const size_t index)
+ : BitVecIteratorBase<BitVec>{vec, index}
+ {}
};
-template <class BitVec>
+template<class BitVec>
class ConstBitVecIterator : public BitVecIteratorBase<const BitVec>
{
private:
- friend BitVec;
+ friend BitVec;
- ConstBitVecIterator(const BitVec& vec, const size_t index)
- : BitVecIteratorBase<const BitVec>{vec, index}
- {
- }
+ ConstBitVecIterator(const BitVec& vec, const size_t index)
+ : BitVecIteratorBase<const BitVec>{vec, index}
+ {}
};
} // namespace detail
diff --git a/include/chilbert/detail/BitVecMask.hpp b/include/chilbert/detail/BitVecMask.hpp
index 5674114..855e4f3 100644
--- a/include/chilbert/detail/BitVecMask.hpp
+++ b/include/chilbert/detail/BitVecMask.hpp
@@ -30,42 +30,40 @@ namespace detail {
* This enables fast iteration while setting or resetting bits since it is
* internally stored as a mask which avoids repeated index math.
*/
-template <class Rack>
-struct BitVecMask
-{
- static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
+template<class Rack>
+struct BitVecMask {
+ static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
- explicit BitVecMask(const size_t index)
- : rack{index / bits_per_rack}
- , mask{Rack{1} << (index - rack * bits_per_rack)}
- {
- }
+ explicit BitVecMask(const size_t index)
+ : rack{index / bits_per_rack}
+ , mask{Rack{1} << (index - rack * bits_per_rack)}
+ {}
- void operator++()
- {
- if ((mask <<= 1U) == 0U) {
- mask = 1U;
- ++rack;
- }
- }
+ void operator++()
+ {
+ if ((mask <<= 1U) == 0U) {
+ mask = 1U;
+ ++rack;
+ }
+ }
- void operator--()
- {
- if ((mask >>= 1U) == 0U) {
- mask = Rack{1} << (bits_per_rack - 1U);
- --rack;
- }
- }
+ void operator--()
+ {
+ if ((mask >>= 1U) == 0U) {
+ mask = Rack{1} << (bits_per_rack - 1U);
+ --rack;
+ }
+ }
- bool operator==(const BitVecMask& rhs) const
- {
- return rack == rhs.rack && mask == rhs.mask;
- }
+ bool operator==(const BitVecMask& rhs) const
+ {
+ return rack == rhs.rack && mask == rhs.mask;
+ }
- bool operator!=(const BitVecMask& rhs) const { return !operator==(rhs); }
+ bool operator!=(const BitVecMask& rhs) const { return !operator==(rhs); }
- size_t rack;
- Rack mask;
+ size_t rack;
+ Rack mask;
};
} // namespace detail
diff --git a/include/chilbert/detail/MultiBitVec.hpp b/include/chilbert/detail/MultiBitVec.hpp
index 6db1065..e8ae08a 100644
--- a/include/chilbert/detail/MultiBitVec.hpp
+++ b/include/chilbert/detail/MultiBitVec.hpp
@@ -33,358 +33,358 @@
namespace chilbert {
namespace detail {
-template <class Derived>
+template<class Derived>
class MultiBitVec
{
public:
- using Rack = uintptr_t;
- using Mask = BitVecMask<Rack>;
-
- using iterator = BitVecIterator<MultiBitVec<Derived>>;
- using const_iterator = ConstBitVecIterator<MultiBitVec<Derived>>;
-
- static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
-
- /// Return the value of the bit covered by `mask`
- bool test(const Mask mask) const { return rack(mask.rack) & mask.mask; }
-
- /// Return the value of the `index`th bit
- bool test(const size_t index) const { return test(mask(index)); }
-
- /// Set all bits to one
- Derived& set()
- {
- if (size()) {
- memset(data(), 0xFF, data_size());
- self()->truncate();
- }
-
- return *self();
- }
-
- /// Set the bit covered by `mask` to 1
- Derived& set(const Mask mask)
- {
- rack(mask.rack) |= mask.mask;
- return *self();
- }
-
- /// Set the `index`th bit to 1
- Derived& set(const size_t index) { return set(mask(index)); }
-
- /// Set the bit covered by `mask` to `value`
- Derived& set(const Mask mask, const bool value)
- {
- auto& r = rack(mask.rack);
- r ^= (-Rack{value} ^ r) & mask.mask;
- return *self();
- }
-
- /// Set the `index`th bit to `value`
- Derived& set(const size_t index, const bool value)
- {
- return set(mask(index), value);
- }
-
- /// Set all bits to zero
- Derived& reset()
- {
- memset(data(), 0, data_size());
- return *self();
- }
-
- /// Reset the bit covered by `mask` to 0
- Derived& reset(const Mask mask)
- {
- rack(mask.rack) &= ~mask.mask;
- return *self();
- }
-
- /// Reset the `index`th bit to 0
- Derived& reset(const size_t index) { return reset(mask(index)); }
-
- /// Flip all bits (one's complement)
- Derived& flip()
- {
- for (size_t i = 0; i < num_racks(); ++i) {
- rack(i) = ~rack(i);
- }
- return *self();
- }
-
- /// Flip the value of the bit covered by `mask`
- Derived& flip(const Mask mask)
- {
- rack(mask.rack) ^= mask.mask;
- return *self();
- }
-
- /// Flip the value of the `index`th bit
- Derived& flip(const size_t index) { return flip(mask(index)); }
-
- /// Clear any bits in storage outside the valid range if necessary
- void truncate()
- {
- if (const auto pad = num_racks() * bits_per_rack - size()) {
- rack(num_racks() - 1) &= ~Rack{0} >> pad;
- }
- }
-
- /// Right-rotate by `bits` positions
- Derived& rotr(const size_t bits)
- {
- assert(bits <= size());
- if (bits == 0 || bits == size()) {
- return *self();
- }
-
- Derived t1(*self());
- *self() >>= bits;
- t1 <<= (size() - bits);
- *self() |= t1;
-
- truncate();
- return *self();
- }
-
- /// Left-rotate by `bits` positions
- Derived& rotl(const size_t bits)
- {
- assert(bits <= size());
- if (bits == 0 || bits == size()) {
- return *self();
- }
-
- Derived t1(*self());
- *self() <<= bits;
- t1 >>= (size() - bits);
- *self() |= t1;
-
- truncate();
- return *self();
- }
-
- /// Return true iff all bits are zero
- bool none() const
- {
- for (size_t i = 0; i < num_racks(); ++i) {
- if (rack(i)) {
- return false;
- }
- }
- return true;
- }
-
- /// Return 1 + the index of the first set bit, or 0 if there are none
- size_t find_first() const
- {
- for (size_t i = 0; i < num_racks(); ++i) {
- const int j = chilbert::detail::find_first(rack(i));
- if (j) {
- return (i * bits_per_rack) + static_cast<size_t>(j);
- }
- }
- return 0;
- }
-
- /// Return the number of set bits
- size_t count() const
- {
- size_t c = 0;
- for (size_t i = 0; i < num_racks(); ++i) {
- c += static_cast<size_t>(pop_count(rack(i)));
- }
- return c;
- }
-
- /// Return a mask that covers the bit with index `i`
- Mask mask(const size_t i = 0) const
- {
- assert(i <= size());
- return Mask{i};
- }
-
- bool operator==(const Derived& vec) const
- {
- return (num_racks() == vec.num_racks() &&
- (num_racks() == 0 ||
- !memcmp(data(), vec.data(), num_racks() * sizeof(Rack))));
- }
-
- bool operator!=(const Derived& vec) const { return !(*this == vec); }
-
- bool operator<(const Derived& vec) const
- {
- assert(size() == vec.size());
-
- for (size_t ri = 0; ri < num_racks(); ++ri) {
- const size_t i = num_racks() - ri - 1;
- if (rack(i) < vec.rack(i)) {
- return true;
- }
-
- if (rack(i) > vec.rack(i)) {
- return false;
- }
- }
- return false;
- }
-
- Derived& operator&=(const Derived& vec)
- {
- for (size_t i = 0; i < std::min(num_racks(), vec.num_racks()); ++i) {
- rack(i) &= vec.rack(i);
- }
-
- return *self();
- }
-
- Derived& operator|=(const Derived& vec)
- {
- for (size_t i = 0; i < std::min(num_racks(), vec.num_racks()); ++i) {
- rack(i) |= vec.rack(i);
- }
-
- return *self();
- }
-
- Derived& operator^=(const Derived& vec)
- {
- for (size_t i = 0; i < std::min(num_racks(), vec.num_racks()); ++i) {
- rack(i) ^= vec.rack(i);
- }
-
- return *self();
- }
-
- Derived& operator<<=(const size_t bits)
- {
- if (bits == 0) {
- return *self();
- }
-
- if (bits >= size()) {
- reset();
- return *self();
- }
-
- const Index index{bits};
-
- if (index.bit == 0) {
- // Simple rack-aligned shift
- for (size_t i = num_racks() - 1; i >= index.rack; --i) {
- rack(i) = rack(i - index.rack);
- }
- } else {
- // Rack + bit offset shift
- const size_t right_shift = bits_per_rack - index.bit;
- for (size_t i = num_racks() - index.rack - 1; i > 0; --i) {
- rack(i + index.rack) =
- (rack(i) << index.bit) | (rack(i - 1) >> right_shift);
- }
-
- rack(index.rack) = rack(0) << index.bit;
- }
-
- // Zero least significant racks
- for (size_t i = 0; i < index.rack; ++i) {
- rack(i) = 0;
- }
-
- return *self();
- }
-
- Derived& operator>>=(const size_t bits)
- {
- if (bits == 0) {
- return *self();
- }
-
- if (bits >= size()) {
- reset();
- return *self();
- }
-
- const Index index{bits};
-
- if (index.bit == 0) {
- // Simple rack-aligned shift
- for (size_t i = 0; i < num_racks() - index.rack; ++i) {
- rack(i) = rack(i + index.rack);
- }
- } else {
- // Rack + bit offset shift
- const size_t last = num_racks() - 1;
- const size_t left_shift = bits_per_rack - index.bit;
- for (size_t i = index.rack; i < last; ++i) {
- rack(i - index.rack) =
- (rack(i) >> index.bit) | (rack(i + 1) << left_shift);
- }
-
- rack(last - index.rack) = rack(last) >> index.bit;
- }
-
- // Zero most significant racks
- for (size_t i = num_racks() - index.rack; i < num_racks(); ++i) {
- rack(i) = 0;
- }
-
- return *self();
- }
-
- auto begin(const size_t i = 0) { return iterator(*self(), i); }
- auto end() { return iterator(*self(), size()); }
- auto begin(const size_t i = 0) const { return const_iterator(*self(), i); }
- auto end() const { return const_iterator(self(), size()); }
-
- const Rack& rack(const size_t index) const { return self()->rack(index); }
- Rack& rack(const size_t index) { return self()->rack(index); }
- Rack* data() { return self()->data(); }
- const Rack* data() const { return self()->data(); }
- size_t num_racks() const { return self()->num_racks(); }
- size_t size() const { return self()->size(); }
- size_t data_size() const { return self()->data_size(); }
+ using Rack = uintptr_t;
+ using Mask = BitVecMask<Rack>;
+
+ using iterator = BitVecIterator<MultiBitVec<Derived>>;
+ using const_iterator = ConstBitVecIterator<MultiBitVec<Derived>>;
+
+ static constexpr size_t bits_per_rack = sizeof(Rack) * CHAR_BIT;
+
+ /// Return the value of the bit covered by `mask`
+ bool test(const Mask mask) const { return rack(mask.rack) & mask.mask; }
+
+ /// Return the value of the `index`th bit
+ bool test(const size_t index) const { return test(mask(index)); }
+
+ /// Set all bits to one
+ Derived& set()
+ {
+ if (size()) {
+ memset(data(), 0xFF, data_size());
+ self()->truncate();
+ }
+
+ return *self();
+ }
+
+ /// Set the bit covered by `mask` to 1
+ Derived& set(const Mask mask)
+ {
+ rack(mask.rack) |= mask.mask;
+ return *self();
+ }
+
+ /// Set the `index`th bit to 1
+ Derived& set(const size_t index) { return set(mask(index)); }
+
+ /// Set the bit covered by `mask` to `value`
+ Derived& set(const Mask mask, const bool value)
+ {
+ auto& r = rack(mask.rack);
+ r ^= (-Rack{value} ^ r) & mask.mask;
+ return *self();
+ }
+
+ /// Set the `index`th bit to `value`
+ Derived& set(const size_t index, const bool value)
+ {
+ return set(mask(index), value);
+ }
+
+ /// Set all bits to zero
+ Derived& reset()
+ {
+ memset(data(), 0, data_size());
+ return *self();
+ }
+
+ /// Reset the bit covered by `mask` to 0
+ Derived& reset(const Mask mask)
+ {
+ rack(mask.rack) &= ~mask.mask;
+ return *self();
+ }
+
+ /// Reset the `index`th bit to 0
+ Derived& reset(const size_t index) { return reset(mask(index)); }
+
+ /// Flip all bits (one's complement)
+ Derived& flip()
+ {
+ for (size_t i = 0; i < num_racks(); ++i) {
+ rack(i) = ~rack(i);
+ }
+ return *self();
+ }
+
+ /// Flip the value of the bit covered by `mask`
+ Derived& flip(const Mask mask)
+ {
+ rack(mask.rack) ^= mask.mask;
+ return *self();
+ }
+
+ /// Flip the value of the `index`th bit
+ Derived& flip(const size_t index) { return flip(mask(index)); }
+
+ /// Clear any bits in storage outside the valid range if necessary
+ void truncate()
+ {
+ if (const auto pad = num_racks() * bits_per_rack - size()) {
+ rack(num_racks() - 1) &= ~Rack{0} >> pad;
+ }
+ }
+
+ /// Right-rotate by `bits` positions
+ Derived& rotr(const size_t bits)
+ {
+ assert(bits <= size());
+ if (bits == 0 || bits == size()) {
+ return *self();
+ }
+
+ Derived t1(*self());
+ *self() >>= bits;
+ t1 <<= (size() - bits);
+ *self() |= t1;
+
+ truncate();
+ return *self();
+ }
+
+ /// Left-rotate by `bits` positions
+ Derived& rotl(const size_t bits)
+ {
+ assert(bits <= size());
+ if (bits == 0 || bits == size()) {
+ return *self();
+ }
+
+ Derived t1(*self());
+ *self() <<= bits;
+ t1 >>= (size() - bits);
+ *self() |= t1;
+
+ truncate();
+ return *self();
+ }
+
+ /// Return true iff all bits are zero
+ bool none() const
+ {
+ for (size_t i = 0; i < num_racks(); ++i) {
+ if (rack(i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /// Return 1 + the index of the first set bit, or 0 if there are none
+ size_t find_first() const
+ {
+ for (size_t i = 0; i < num_racks(); ++i) {
+ const int j = chilbert::detail::find_first(rack(i));
+ if (j) {
+ return (i * bits_per_rack) + static_cast<size_t>(j);
+ }
+ }
+ return 0;
+ }
+
+ /// Return the number of set bits
+ size_t count() const
+ {
+ size_t c = 0;
+ for (size_t i = 0; i < num_racks(); ++i) {
+ c += static_cast<size_t>(pop_count(rack(i)));
+ }
+ return c;
+ }
+
+ /// Return a mask that covers the bit with index `i`
+ Mask mask(const size_t i = 0) const
+ {
+ assert(i <= size());
+ return Mask{i};
+ }
+
+ bool operator==(const Derived& vec) const
+ {
+ return (num_racks() == vec.num_racks() &&
+ (num_racks() == 0 ||
+ !memcmp(data(), vec.data(), num_racks() * sizeof(Rack))));
+ }
+
+ bool operator!=(const Derived& vec) const { return !(*this == vec); }
+
+ bool operator<(const Derived& vec) const
+ {
+ assert(size() == vec.size());
+
+ for (size_t ri = 0; ri < num_racks(); ++ri) {
+ const size_t i = num_racks() - ri - 1;
+ if (rack(i) < vec.rack(i)) {
+ return true;
+ }
+
+ if (rack(i) > vec.rack(i)) {
+ return false;
+ }
+ }
+ return false;
+ }
+
+ Derived& operator&=(const Derived& vec)
+ {
+ for (size_t i = 0; i < std::min(num_racks(), vec.num_racks()); ++i) {
+ rack(i) &= vec.rack(i);
+ }
+
+ return *self();
+ }
+
+ Derived& operator|=(const Derived& vec)
+ {
+ for (size_t i = 0; i < std::min(num_racks(), vec.num_racks()); ++i) {
+ rack(i) |= vec.rack(i);
+ }
+
+ return *self();
+ }
+
+ Derived& operator^=(const Derived& vec)
+ {
+ for (size_t i = 0; i < std::min(num_racks(), vec.num_racks()); ++i) {
+ rack(i) ^= vec.rack(i);
+ }
+
+ return *self();
+ }
+
+ Derived& operator<<=(const size_t bits)
+ {
+ if (bits == 0) {
+ return *self();
+ }
+
+ if (bits >= size()) {
+ reset();
+ return *self();
+ }
+
+ const Index index{bits};
+
+ if (index.bit == 0) {
+ // Simple rack-aligned shift
+ for (size_t i = num_racks() - 1; i >= index.rack; --i) {
+ rack(i) = rack(i - index.rack);
+ }
+ } else {
+ // Rack + bit offset shift
+ const size_t right_shift = bits_per_rack - index.bit;
+ for (size_t i = num_racks() - index.rack - 1; i > 0; --i) {
+ rack(i + index.rack) =
+ (rack(i) << index.bit) | (rack(i - 1) >> right_shift);
+ }
+
+ rack(index.rack) = rack(0) << index.bit;
+ }
+
+ // Zero least significant racks
+ for (size_t i = 0; i < index.rack; ++i) {
+ rack(i) = 0;
+ }
+
+ return *self();
+ }
+
+ Derived& operator>>=(const size_t bits)
+ {
+ if (bits == 0) {
+ return *self();
+ }
+
+ if (bits >= size()) {
+ reset();
+ return *self();
+ }
+
+ const Index index{bits};
+
+ if (index.bit == 0) {
+ // Simple rack-aligned shift
+ for (size_t i = 0; i < num_racks() - index.rack; ++i) {
+ rack(i) = rack(i + index.rack);
+ }
+ } else {
+ // Rack + bit offset shift
+ const size_t last = num_racks() - 1;
+ const size_t left_shift = bits_per_rack - index.bit;
+ for (size_t i = index.rack; i < last; ++i) {
+ rack(i - index.rack) =
+ (rack(i) >> index.bit) | (rack(i + 1) << left_shift);
+ }
+
+ rack(last - index.rack) = rack(last) >> index.bit;
+ }
+
+ // Zero most significant racks
+ for (size_t i = num_racks() - index.rack; i < num_racks(); ++i) {
+ rack(i) = 0;
+ }
+
+ return *self();
+ }
+
+ auto begin(const size_t i = 0) { return iterator(*self(), i); }
+ auto end() { return iterator(*self(), size()); }
+ auto begin(const size_t i = 0) const { return const_iterator(*self(), i); }
+ auto end() const { return const_iterator(self(), size()); }
+
+ const Rack& rack(const size_t index) const { return self()->rack(index); }
+ Rack& rack(const size_t index) { return self()->rack(index); }
+ Rack* data() { return self()->data(); }
+ const Rack* data() const { return self()->data(); }
+ size_t num_racks() const { return self()->num_racks(); }
+ size_t size() const { return self()->size(); }
+ size_t data_size() const { return self()->data_size(); }
private:
- using Index = detail::BitVecIndex<Derived>;
+ using Index = detail::BitVecIndex<Derived>;
- Derived* self() { return static_cast<Derived*>(this); }
- const Derived* self() const { return static_cast<const Derived*>(this); }
+ Derived* self() { return static_cast<Derived*>(this); }
+ const Derived* self() const { return static_cast<const Derived*>(this); }
};
-template <class Derived>
+template<class Derived>
void
gray_code(MultiBitVec<Derived>& value)
{
- typename MultiBitVec<Derived>::Rack s = 0;
-
- constexpr size_t left_shift = MultiBitVec<Derived>::bits_per_rack - 1;
- for (size_t ri = 0; ri < value.num_racks(); ++ri) {
- const size_t i = value.num_racks() - ri - 1U;
- auto& rack = value.rack(i);
- const auto t = rack & 1U;
- gray_code(rack);
- rack ^= (s << left_shift);
- s = t;
- }
+ typename MultiBitVec<Derived>::Rack s = 0;
+
+ constexpr size_t left_shift = MultiBitVec<Derived>::bits_per_rack - 1;
+ for (size_t ri = 0; ri < value.num_racks(); ++ri) {
+ const size_t i = value.num_racks() - ri - 1U;
+ auto& rack = value.rack(i);
+ const auto t = rack & 1U;
+ gray_code(rack);
+ rack ^= (s << left_shift);
+ s = t;
+ }
}
-template <class Derived>
+template<class Derived>
void
gray_code_inv(MultiBitVec<Derived>& value)
{
- using Rack = typename MultiBitVec<Derived>::Rack;
-
- constexpr std::array<Rack, 2> masks{Rack{0}, ~Rack{0}};
- bool s = false;
-
- for (size_t ri = 0; ri < value.num_racks(); ++ri) {
- const size_t i = value.num_racks() - ri - 1;
- auto& rack = value.rack(i);
- gray_code_inv(rack);
- rack ^= masks[s];
- s = rack & 1U;
- }
+ using Rack = typename MultiBitVec<Derived>::Rack;
+
+ constexpr std::array<Rack, 2> masks{Rack{0}, ~Rack{0}};
+ bool s = false;
+
+ for (size_t ri = 0; ri < value.num_racks(); ++ri) {
+ const size_t i = value.num_racks() - ri - 1;
+ auto& rack = value.rack(i);
+ gray_code_inv(rack);
+ rack ^= masks[s];
+ s = rack & 1U;
+ }
}
} // namespace detail
diff --git a/include/chilbert/detail/gray_code_rank.hpp b/include/chilbert/detail/gray_code_rank.hpp
index b6811ff..a326617 100644
--- a/include/chilbert/detail/gray_code_rank.hpp
+++ b/include/chilbert/detail/gray_code_rank.hpp
@@ -30,7 +30,7 @@ namespace detail {
// a Compact Hilbert Index. It compresses a previously
// calculated index when provided the rotation
// at each level of precision.
-template <class H, class HC>
+template<class H, class HC>
inline void
compact_index(const size_t* const ms,
const size_t* const ds,
@@ -39,60 +39,60 @@ compact_index(const size_t* const ms,
H& h,
HC& hc)
{
- assert(h.size() >= n * m);
- assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0)));
-
- reset_bits(hc);
-
- auto hm = h.mask(0);
- auto hcm = hc.mask(0);
-
- // Run through the levels of precision
- for (size_t i = 0; i < m; ++i) {
- // Run through the dimensions
- size_t j = ds[i];
- do {
- // This dimension contributes a bit?
- if (ms[j] > i) {
- if (h.test(hm)) {
- hc.set(hcm);
- }
- ++hcm;
- }
-
- if (++j == n) {
- j = 0;
- }
- ++hm;
- } while (j != ds[i]);
- }
+ assert(h.size() >= n * m);
+ assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0)));
+
+ reset_bits(hc);
+
+ auto hm = h.mask(0);
+ auto hcm = hc.mask(0);
+
+ // Run through the levels of precision
+ for (size_t i = 0; i < m; ++i) {
+ // Run through the dimensions
+ size_t j = ds[i];
+ do {
+ // This dimension contributes a bit?
+ if (ms[j] > i) {
+ if (h.test(hm)) {
+ hc.set(hcm);
+ }
+ ++hcm;
+ }
+
+ if (++j == n) {
+ j = 0;
+ }
+ ++hm;
+ } while (j != ds[i]);
+ }
}
-template <class I>
+template<class I>
inline void
gray_code_rank(const I& mask, const I& gi, const size_t n, I& r)
{
- assert(mask.size() == n);
- assert(gi.size() == n);
- assert(r.size() == n);
-
- r.reset();
-
- auto mi = mask.begin();
- auto gii = gi.begin();
- auto ri = r.begin();
-
- for (size_t i = 0; i < n; ++i, ++mi, ++gii) {
- if (*mi) {
- if (*gii) {
- ri.set();
- }
- ++ri;
- }
- }
+ assert(mask.size() == n);
+ assert(gi.size() == n);
+ assert(r.size() == n);
+
+ r.reset();
+
+ auto mi = mask.begin();
+ auto gii = gi.begin();
+ auto ri = r.begin();
+
+ for (size_t i = 0; i < n; ++i, ++mi, ++gii) {
+ if (*mi) {
+ if (*gii) {
+ ri.set();
+ }
+ ++ri;
+ }
+ }
}
-template <class I>
+template<class I>
inline void
gray_code_rank_inv(const I& mask,
const I& ptrn,
@@ -102,51 +102,51 @@ gray_code_rank_inv(const I& mask,
I& g,
I& gi)
{
- assert(mask.size() == n);
- assert(ptrn.size() == n);
- assert(r.size() == n);
- assert(g.size() == n);
- assert(gi.size() == n);
-
- g.reset();
- gi.reset();
-
- auto m = mask.mask(n - 1);
- auto ri = r.begin(b - 1);
-
- typename I::Rack gi0 = 0;
- typename I::Rack gi1 = 0;
- typename I::Rack g0 = 0;
-
- for (size_t i = 0; i < n; ++i) {
- if (mask.test(m)) { // Unconstrained bit
- gi1 = gi0;
- gi0 = *ri;
- g0 = gi0 ^ gi1;
- if (gi0) {
- gi.set(m);
- }
- if (g0) {
- g.set(m);
- }
- --ri;
- } else { // Constrained bit
- g0 = (ptrn.test(m) > 0);
- gi1 = gi0;
- gi0 = g0 ^ gi1;
- if (gi0) {
- gi.set(m);
- }
- if (g0) {
- g.set(m);
- }
- }
-
- --m;
- }
+ assert(mask.size() == n);
+ assert(ptrn.size() == n);
+ assert(r.size() == n);
+ assert(g.size() == n);
+ assert(gi.size() == n);
+
+ g.reset();
+ gi.reset();
+
+ auto m = mask.mask(n - 1);
+ auto ri = r.begin(b - 1);
+
+ typename I::Rack gi0 = 0;
+ typename I::Rack gi1 = 0;
+ typename I::Rack g0 = 0;
+
+ for (size_t i = 0; i < n; ++i) {
+ if (mask.test(m)) { // Unconstrained bit
+ gi1 = gi0;
+ gi0 = *ri;
+ g0 = gi0 ^ gi1;
+ if (gi0) {
+ gi.set(m);
+ }
+ if (g0) {
+ g.set(m);
+ }
+ --ri;
+ } else { // Constrained bit
+ g0 = (ptrn.test(m) > 0);
+ gi1 = gi0;
+ gi0 = g0 ^ gi1;
+ if (gi0) {
+ gi.set(m);
+ }
+ if (g0) {
+ g.set(m);
+ }
+ }
+
+ --m;
+ }
}
-template <class I>
+template<class I>
inline void
extract_mask(const size_t* const ms,
const size_t n,
@@ -155,25 +155,25 @@ extract_mask(const size_t* const ms,
I& mask,
size_t& b)
{
- assert(d < n);
- assert(mask.size() == n);
-
- mask.reset();
- b = 0;
-
- auto mi = mask.begin();
- size_t j = d; // #D j = (d==n-1) ? 0 : d+1;
- do {
- if (ms[j] > i) {
- mi.set();
- ++b;
- }
-
- ++mi;
- if (++j == n) {
- j = 0;
- }
- } while (j != d);
+ assert(d < n);
+ assert(mask.size() == n);
+
+ mask.reset();
+ b = 0;
+
+ auto mi = mask.begin();
+ size_t j = d; // #D j = (d==n-1) ? 0 : d+1;
+ do {
+ if (ms[j] > i) {
+ mi.set();
+ ++b;
+ }
+
+ ++mi;
+ if (++j == n) {
+ j = 0;
+ }
+ } while (j != d);
}
} // namespace detail
diff --git a/include/chilbert/detail/operations.hpp b/include/chilbert/detail/operations.hpp
index a196ba4..ed9dfac 100644
--- a/include/chilbert/detail/operations.hpp
+++ b/include/chilbert/detail/operations.hpp
@@ -32,135 +32,139 @@ namespace chilbert {
namespace detail {
/// Reset all bits in `field`
-template <typename T>
+template<typename T>
std::enable_if_t<std::is_integral<T>::value>
reset_bits(T& field)
{
- field = static_cast<T>(0);
+ field = static_cast<T>(0);
}
/// Reset all bits in `field`
-template <typename T>
+template<typename T>
std::enable_if_t<is_bitvec_v<T>>
reset_bits(T& field)
{
- field.reset();
+ field.reset();
}
/// Return the `index`th bit in `field`
-template <typename T>
+template<typename T>
std::enable_if_t<std::is_integral<T>::value, bool>
test_bit(const T& field, const size_t index)
{
- assert(size_t(index) < sizeof(field) * CHAR_BIT);
- return field & (T{1} << index);
+ assert(size_t(index) < sizeof(field) * CHAR_BIT);
+ return field & (T{1} << index);
}
/// Return the `index`th bit in `field`
-template <typename T>
+template<typename T>
std::enable_if_t<is_bitvec_v<T>, bool>
test_bit(const T& field, const size_t index)
{
- return field.test(index);
+ return field.test(index);
}
/// Set the `index`th bit in `field`
-template <typename T>
+template<typename T>
std::enable_if_t<std::is_integral<T>::value>
set_bit(T& field, const size_t index)
{
- assert(size_t(index) < sizeof(field) * CHAR_BIT);
- field |= (T{1} << index);
- assert(test_bit(field, index));
+ assert(size_t(index) < sizeof(field) * CHAR_BIT);
+ field |= (T{1} << index);
+ assert(test_bit(field, index));
}
/// Set the `index`th bit in `field` to `value`
-template <typename T>
+template<typename T>
std::enable_if_t<std::is_integral<T>::value>
set_bit(T& field, const size_t index, const bool value)
{
- assert(size_t(index) < sizeof(field) * CHAR_BIT);
- field ^= (-T{value} ^ field) & (T{1U} << index);
- assert(test_bit(field, index) == value);
+ assert(size_t(index) < sizeof(field) * CHAR_BIT);
+ field ^= (-T{value} ^ field) & (T{1U} << index);
+ assert(test_bit(field, index) == value);
}
/// Set the `index`th bit in `field`
-template <typename T>
+template<typename T>
std::enable_if_t<is_bitvec_v<T>>
set_bit(T& field, const size_t index)
{
- field.set(index);
+ field.set(index);
}
/// Set the `index`th bit in `field` to `value`
-template <typename T>
+template<typename T>
std::enable_if_t<is_bitvec_v<T>>
set_bit(T& field, const size_t index, const bool value)
{
- field.set(index, value);
+ field.set(index, value);
}
/// Return 1 + the index of the least significant 1-bit of `field`, or zero
-template <typename T>
-inline int pop_count(const T& field);
+template<typename T>
+inline int
+pop_count(const T& field);
-template <>
+template<>
inline int
pop_count<unsigned long>(const unsigned long& field)
{
- return __builtin_popcountl(field);
+ return __builtin_popcountl(field);
}
-template <>
+template<>
inline int
pop_count<unsigned long long>(const unsigned long long& field)
{
- return __builtin_popcountll(field);
+ return __builtin_popcountll(field);
}
/// Return 1 + the index of the least significant 1-bit of `field`, or zero
-template <typename T>
-inline int find_first(const T field);
+template<typename T>
+inline int
+find_first(const T field);
-template <>
+template<>
inline int
find_first<unsigned long>(const unsigned long field)
{
- return __builtin_ffsl(static_cast<long>(field));
+ return __builtin_ffsl(static_cast<long>(field));
}
-template <>
+template<>
inline int
find_first<unsigned long long>(const unsigned long long field)
{
- return __builtin_ffsll(static_cast<long long>(field));
+ return __builtin_ffsll(static_cast<long long>(field));
}
/// Calculates the Gray Code of `value` in place
-template <typename T>
-std::enable_if_t<is_bitvec_v<T>> gray_code(T& value);
+template<typename T>
+std::enable_if_t<is_bitvec_v<T>>
+gray_code(T& value);
/// Implementation of grayCode for any integral type
-template <typename T>
+template<typename T>
std::enable_if_t<std::is_integral<T>::value>
gray_code(T& value)
{
- value ^= (value >> 1U);
+ value ^= (value >> 1U);
}
/// Calculates the inverse Gray Code of `value` in place
-template <typename T>
-std::enable_if_t<is_bitvec_v<T>> gray_code_inv(T& value);
+template<typename T>
+std::enable_if_t<is_bitvec_v<T>>
+gray_code_inv(T& value);
/// Implementation of gray_code_inv for any integral type
-template <typename T>
+template<typename T>
std::enable_if_t<std::is_integral<T>::value>
gray_code_inv(T& value)
{
- constexpr T shift_end = sizeof(T) * CHAR_BIT;
- for (T shift = 1U; shift < shift_end; shift <<= 1U) {
- value ^= (value >> shift);
- }
+ constexpr T shift_end = sizeof(T) * CHAR_BIT;
+ for (T shift = 1U; shift < shift_end; shift <<= 1U) {
+ value ^= (value >> shift);
+ }
}
} // namespace detail
diff --git a/include/chilbert/detail/traits.hpp b/include/chilbert/detail/traits.hpp
index 3e3f4d2..e6a2023 100644
--- a/include/chilbert/detail/traits.hpp
+++ b/include/chilbert/detail/traits.hpp
@@ -23,14 +23,13 @@ namespace chilbert {
namespace detail {
/// Member `value` is true iff T is a chilbert bitset
-template <class T>
-struct is_bitvec
-{
- static constexpr bool value = false;
+template<class T>
+struct is_bitvec {
+ static constexpr bool value = false;
};
/// True iff T is a chilbert bitset
-template <class T>
+template<class T>
static constexpr bool is_bitvec_v = is_bitvec<T>::value;
} // namespace detail
diff --git a/include/chilbert/operators.hpp b/include/chilbert/operators.hpp
index efad7f4..4a92e0d 100644
--- a/include/chilbert/operators.hpp
+++ b/include/chilbert/operators.hpp
@@ -28,67 +28,68 @@ namespace chilbert {
using detail::is_bitvec_v;
-template <class T>
-std::enable_if_t<is_bitvec_v<T>, T> operator&(const T& lhs, const T& rhs)
+template<class T>
+std::enable_if_t<is_bitvec_v<T>, T>
+operator&(const T& lhs, const T& rhs)
{
- T r{lhs};
- r &= rhs;
- return r;
+ T r{lhs};
+ r &= rhs;
+ return r;
}
-template <class T>
+template<class T>
std::enable_if_t<is_bitvec_v<T>, T>
operator|(const T& lhs, const T& rhs)
{
- T r{lhs};
- r |= rhs;
- return r;
+ T r{lhs};
+ r |= rhs;
+ return r;
}
-template <class T>
+template<class T>
std::enable_if_t<is_bitvec_v<T>, T>
operator^(const T& lhs, const T& rhs)
{
- T r{lhs};
- r ^= rhs;
- return r;
+ T r{lhs};
+ r ^= rhs;
+ return r;
}
-template <class T>
+template<class T>
std::enable_if_t<is_bitvec_v<T>, T>
operator~(const T& vec)
{
- T r{vec};
- r.flip();
- return r;
+ T r{vec};
+ r.flip();
+ return r;
}
-template <class T>
+template<class T>
std::enable_if_t<is_bitvec_v<T>, T>
operator<<(const T& vec, const size_t bits)
{
- T r{vec};
- r <<= bits;
- return r;
+ T r{vec};
+ r <<= bits;
+ return r;
}
-template <class T>
+template<class T>
std::enable_if_t<is_bitvec_v<T>, T>
operator>>(const T& vec, const size_t bits)
{
- T r{vec};
- r >>= bits;
- return r;
+ T r{vec};
+ r >>= bits;
+ return r;
}
-template <class T>
+template<class T>
inline std::enable_if_t<is_bitvec_v<T>, std::ostream>&
operator<<(std::ostream& os, const T& vec)
{
- for (size_t i = 0; i < vec.size(); ++i) {
- os << vec.test(vec.size() - i - 1);
- }
- return os;
+ for (size_t i = 0; i < vec.size(); ++i) {
+ os << vec.test(vec.size() - i - 1);
+ }
+ return os;
}
} // namespace chilbert