aboutsummaryrefslogtreecommitdiffstats
path: root/chilbert
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2018-08-11 22:40:03 +0200
committerDavid Robillard <d@drobilla.net>2018-09-29 14:46:25 +0200
commit50145430b5fb08802bc22a6ae06351a11a091c60 (patch)
tree4ba0d1c8c5b76e7608fbd8ffbc985ba8a9f09622 /chilbert
parentdf0d52f4bd78c2197a88a805a1dd402978df3290 (diff)
downloadchilbert-50145430b5fb08802bc22a6ae06351a11a091c60.tar.gz
chilbert-50145430b5fb08802bc22a6ae06351a11a091c60.tar.bz2
chilbert-50145430b5fb08802bc22a6ae06351a11a091c60.zip
Clean up types and fix every even remotely reasonable warning
Diffstat (limited to 'chilbert')
-rw-r--r--chilbert/Algorithm.hpp140
-rw-r--r--chilbert/BigBitVec.hpp123
-rw-r--r--chilbert/FixBitVec.hpp57
-rw-r--r--chilbert/GetBits.hpp4
-rw-r--r--chilbert/GetLocation.hpp4
-rw-r--r--chilbert/GrayCodeRank.hpp65
-rw-r--r--chilbert/Hilbert.hpp102
-rw-r--r--chilbert/Operations.hpp14
-rw-r--r--chilbert/SetBits.hpp4
-rw-r--r--chilbert/SetLocation.hpp4
10 files changed, 276 insertions, 241 deletions
diff --git a/chilbert/Algorithm.hpp b/chilbert/Algorithm.hpp
index d43a42b..7d40f2d 100644
--- a/chilbert/Algorithm.hpp
+++ b/chilbert/Algorithm.hpp
@@ -54,7 +54,7 @@ namespace chilbert {
// 'Transforms' a point.
template <class I>
inline void
-transform(const I& e, int d, int n, I& a)
+transform(const I& e, const size_t d, const size_t n, I& a)
{
a ^= e;
a.rotr(d, n); //#D d+1, n );
@@ -63,7 +63,7 @@ transform(const I& e, int d, int n, I& a)
// Inverse 'transforms' a point.
template <class I>
inline void
-transformInv(const I& e, int d, int n, I& a)
+transformInv(const I& e, const size_t d, const size_t n, I& a)
{
a.rotl(d, n); //#D d+1, n );
a ^= e;
@@ -72,7 +72,7 @@ transformInv(const I& e, int d, int n, I& a)
// Update for method 1 (GrayCodeInv in the loop)
template <class I>
inline void
-update1(const I& l, const I& t, const I& w, int n, I& e, int& d)
+update1(const I& l, const I& t, const I& w, const size_t n, I& e, size_t& d)
{
assert(0 <= d && d < n);
e = l;
@@ -96,7 +96,7 @@ update1(const I& l, const I& t, const I& w, int n, I& e, int& d)
// Update for method 2 (GrayCodeInv out of loop)
template <class I>
inline void
-update2(const I& l, const I& t, const I& w, int n, I& e, int& d)
+update2(const I& l, const I& t, const size_t n, I& e, size_t& d)
{
assert(0 <= d && d < n);
e = l;
@@ -115,13 +115,13 @@ update2(const I& l, const I& t, const I& w, int n, I& e, int& d)
template <class P, class H, class I>
inline void
-_coordsToIndex(const P* p,
- int m,
- int n,
- H& h,
- I&& scratch,
- int* ds = nullptr // #HACK
-)
+_coordsToIndex(const P* const p,
+ const size_t m,
+ const size_t n,
+ H& h,
+ I&& scratch,
+ size_t* const ds = nullptr // #HACK
+ )
{
I e{std::move(scratch)};
I l{e};
@@ -134,16 +134,16 @@ _coordsToIndex(const P* p,
h = 0U;
// Work from MSB to LSB
- int d = D0;
- int ho = m * n;
- for (int i = m - 1; i >= 0; i--) {
+ size_t d = D0;
+ size_t ho = m * n;
+ for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) {
// #HACK
if (ds) {
ds[i] = d;
}
// Get corner of sub-hypercube where point lies.
- getLocation<P, I>(p, n, i, l);
+ getLocation<P, I>(p, n, static_cast<size_t>(i), l);
// Mirror and reflect the location.
// t = T_{(e,d)}(l)
@@ -151,7 +151,7 @@ _coordsToIndex(const P* p,
transform<I>(e, d, n, t);
w = t;
- if (i < m - 1) {
+ if (static_cast<size_t>(i) < m - 1) {
w.flip(n - 1);
}
@@ -160,7 +160,7 @@ _coordsToIndex(const P* p,
setBits<H, I>(h, n, ho, w);
// Update the entry point and direction.
- update2<I>(l, t, w, n, e, d);
+ update2<I>(l, t, n, e, d);
}
grayCodeInv(h);
@@ -174,11 +174,11 @@ _coordsToIndex(const P* p,
// Assumes h is big enough for the output (n*m bits!)
template <class P, class H>
inline void
-coordsToIndex(const P* p, // [in ] point
- int m, // [in ] precision of each dimension in bits
- int n, // [in ] number of dimensions
- H& h // [out] Hilbert index
-)
+coordsToIndex(const P* const p, // [in ] point
+ const size_t m, // [in ] precision of each dimension in bits
+ const size_t n, // [in ] number of dimensions
+ H& h // [out] Hilbert index
+ )
{
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width
@@ -191,7 +191,7 @@ coordsToIndex(const P* p, // [in ] point
template <class P, class H, class I>
inline void
-_indexToCoords(P* p, int m, int n, const H& h, I&& scratch)
+_indexToCoords(P* p, const size_t m, const size_t n, const H& h, I&& scratch)
{
I e{std::move(scratch)};
I l{e};
@@ -201,14 +201,14 @@ _indexToCoords(P* p, int m, int n, const H& h, I&& scratch)
// Initialize
e.reset();
l.reset();
- for (int j = 0; j < n; j++) {
+ for (size_t j = 0; j < n; j++) {
p[j] = 0U;
}
// Work from MSB to LSB
- int d = D0;
- int ho = m * n;
- for (int i = m - 1; i >= 0; i--) {
+ size_t d = D0;
+ size_t ho = m * n;
+ for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) {
// Get the Hilbert index bits
ho -= n;
getBits<H, I>(h, n, ho, w);
@@ -224,7 +224,7 @@ _indexToCoords(P* p, int m, int n, const H& h, I&& scratch)
// Distribute these bits
// to the coordinates.
- setLocation<P, I>(p, n, i, l);
+ setLocation<P, I>(p, n, static_cast<size_t>(i), l);
// Update the entry point and direction.
update1<I>(l, t, w, n, e, d);
@@ -240,11 +240,11 @@ _indexToCoords(P* p, int m, int n, const H& h, I&& scratch)
// appropriate variable.
template <class P, class H>
inline void
-indexToCoords(P* p, // [out] point
- int m, // [in ] precision of each dimension in bits
- int n, // [in ] number of dimensions
- const H& h // [out] Hilbert index
-)
+indexToCoords(P* const p, // [out] point
+ const size_t m, // [in ] precision of each dimension in bits
+ const size_t n, // [in ] number of dimensions
+ const H& h // [out] Hilbert index
+ )
{
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width
@@ -257,18 +257,18 @@ indexToCoords(P* p, // [out] point
template <class P, class HC, class I>
inline void
-_coordsToCompactIndex(const P* p,
- const int* ms,
- int n,
- HC& hc,
- I&& scratch,
- int M = 0,
- int m = 0)
+_coordsToCompactIndex(const P* const p,
+ const size_t* const ms,
+ const size_t n,
+ HC& hc,
+ I&& scratch,
+ size_t M = 0,
+ size_t m = 0)
{
// Get total precision and max precision if not supplied
if (M == 0 || m == 0) {
M = m = 0;
- for (int i = 0; i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
if (ms[i] > m) {
m = ms[i];
}
@@ -276,12 +276,12 @@ _coordsToCompactIndex(const P* p,
}
}
- const int mn = m * n;
+ const size_t mn = m * n;
// If we could avoid allocation altogether (ie: have a
// fixed buffer allocated on the stack) then this increases
// speed by a bit (4% when n=4, m=20)
- int* const ds = new int[m];
+ size_t* const ds = new size_t[m];
if (mn > FBV_BITS) {
CBigBitVec h(mn);
@@ -304,12 +304,13 @@ _coordsToCompactIndex(const P* p,
// Assumes h is big enough for the output (n*m bits!)
template <class P, class HC>
inline void
-coordsToCompactIndex(const P* p, // [in ] point
- const int* ms, // [in ] precision of each dimension in bits
- int n, // [in ] number of dimensions
- HC& hc, // [out] Hilbert index
- int M = 0,
- int m = 0)
+coordsToCompactIndex(
+ const P* const p, // [in ] point
+ const size_t* const ms, // [in ] precision of each dimension in bits
+ size_t n, // [in ] number of dimensions
+ HC& hc, // [out] Hilbert index
+ const size_t M = 0,
+ const size_t m = 0)
{
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width?
@@ -324,13 +325,13 @@ coordsToCompactIndex(const P* p, // [in ] point
template <class P, class HC, class I>
inline void
-_compactIndexToCoords(P* p,
- const int* ms,
- int n,
- const HC& hc,
- I&& scratch,
- int M = 0,
- int m = 0)
+_compactIndexToCoords(P* const p,
+ const size_t* ms,
+ const size_t n,
+ const HC& hc,
+ I&& scratch,
+ size_t M = 0,
+ size_t m = 0)
{
I e{std::move(scratch)};
I l{e};
@@ -344,7 +345,7 @@ _compactIndexToCoords(P* p,
// if not supplied
if (M == 0 || m == 0) {
M = m = 0;
- for (int i = 0; i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
if (ms[i] > m) {
m = ms[i];
}
@@ -355,17 +356,17 @@ _compactIndexToCoords(P* p,
// Initialize
e.reset();
l.reset();
- for (int j = 0; j < n; j++) {
+ for (size_t j = 0; j < n; j++) {
p[j] = 0;
}
// Work from MSB to LSB
- int d = D0;
+ size_t d = D0;
- for (int i = m - 1; i >= 0; i--) {
+ for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) {
// Get the mask and ptrn
- int b = 0;
- extractMask<I>(ms, n, d, i, mask, b);
+ size_t b = 0;
+ extractMask<I>(ms, n, d, static_cast<size_t>(i), mask, b);
ptrn = e;
ptrn.rotr(d, n); //#D ptrn.Rotr(d+1,n);
@@ -385,7 +386,7 @@ _compactIndexToCoords(P* p,
// Distribute these bits
// to the coordinates.
- setLocation<P, I>(p, n, i, l);
+ setLocation<P, I>(p, n, static_cast<size_t>(i), l);
// Update the entry point and direction.
update1<I>(l, t, w, n, e, d);
@@ -401,12 +402,13 @@ _compactIndexToCoords(P* p,
// appropriate variable.
template <class P, class HC>
inline void
-compactIndexToCoords(P* p, // [out] point
- const int* ms, // [in ] precision of each dimension in bits
- int n, // [in ] number of dimensions
- const HC& hc, // [out] Hilbert index
- int M = 0,
- int m = 0)
+compactIndexToCoords(
+ P* const p, // [out] point
+ const size_t* ms, // [in ] precision of each dimension in bits
+ const size_t n, // [in ] number of dimensions
+ const HC& hc, // [out] Hilbert index
+ const size_t M = 0,
+ const size_t m = 0)
{
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width
diff --git a/chilbert/BigBitVec.hpp b/chilbert/BigBitVec.hpp
index 8b315a0..83b8a5b 100644
--- a/chilbert/BigBitVec.hpp
+++ b/chilbert/BigBitVec.hpp
@@ -23,24 +23,19 @@
#include <algorithm>
#include <cassert>
+#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <memory>
-#define FBVS_NEEDED(b) ((std::max(b, 1) + FBV_BITS - 1) / FBV_BITS)
-#define BBV_MODSPLIT(r, b, k) \
- { \
- b = (k); \
- r = b / FBV_BITS; \
- b -= r * FBV_BITS; \
- }
+#define FBVS_NEEDED(b) ((std::max(b, size_t(1)) + FBV_BITS - 1) / FBV_BITS)
namespace chilbert {
class CBigBitVec
{
public:
- CBigBitVec(const int bits = 0)
+ CBigBitVec(const size_t bits = 0)
: m_pcRacks{make_racks(FBVS_NEEDED(bits))}
, m_iRacks{bits == 0 ? 0 : FBVS_NEEDED(bits)}
{
@@ -67,7 +62,7 @@ public:
}
/// Return the size in bits
- int size() const { return m_iRacks * FBV_BITS; }
+ size_t size() const { return m_iRacks * FBV_BITS; }
/// Set all bits to zero
CBigBitVec& reset()
@@ -84,9 +79,9 @@ public:
}
/// Truncate to a given precision in bits (zero MSBs)
- CBigBitVec& truncate(const int bits)
+ CBigBitVec& truncate(const size_t bits)
{
- assert(bits >= 0 && bits <= size());
+ assert(bits <= size());
const Ref ref(bits);
if (ref.rack >= m_iRacks) {
return *this;
@@ -95,7 +90,7 @@ public:
// Truncate rack that contains the split point
m_pcRacks[ref.rack] &= FBVN1S(ref.bit);
- for (int i = ref.rack + 1; i < m_iRacks; ++i) {
+ for (size_t i = ref.rack + 1; i < m_iRacks; ++i) {
m_pcRacks[i] = 0;
}
@@ -159,44 +154,44 @@ public:
}
/// Return the value of the `index`th bit
- bool test(const int index) const
+ bool test(const size_t index) const
{
- assert(index >= 0 && index < size());
+ assert(index < size());
const Ref ref(index);
return testBit(m_pcRacks[ref.rack], ref.bit);
}
/// Set the `index`th bit to 1
- CBigBitVec& set(const int index)
+ CBigBitVec& set(const size_t index)
{
- assert(index >= 0 && index < size());
+ assert(index < size());
const Ref ref(index);
setBit(m_pcRacks[ref.rack], ref.bit);
return *this;
}
/// Reset the `index`th bit to 0
- CBigBitVec& reset(const int index)
+ CBigBitVec& reset(const size_t index)
{
- assert(index >= 0 && index < size());
+ assert(index < size());
const Ref ref(index);
- m_pcRacks[ref.rack] &= ~((FBV_UINT)1 << ref.bit);
+ m_pcRacks[ref.rack] &= ~(FBV_UINT{1} << ref.bit);
return *this;
}
/// Set the `index`th bit to `value`
- CBigBitVec& set(const int index, const bool value)
+ CBigBitVec& set(const size_t index, const bool value)
{
- assert(index >= 0 && index < size());
+ assert(index < size());
const Ref ref(index);
setBit(m_pcRacks[ref.rack], ref.bit, value);
return *this;
}
/// Flip the value of the `index`th bit
- CBigBitVec& flip(const int index)
+ CBigBitVec& flip(const size_t index)
{
- assert(index >= 0 && index < size());
+ assert(index < size());
const Ref ref(index);
m_pcRacks[ref.rack] ^= (FBV1 << ref.bit);
return *this;
@@ -204,7 +199,7 @@ public:
CBigBitVec& operator&=(const CBigBitVec& vec)
{
- for (int i = 0; i < std::min(m_iRacks, vec.m_iRacks); ++i) {
+ for (size_t i = 0; i < std::min(m_iRacks, vec.m_iRacks); ++i) {
m_pcRacks[i] &= vec.m_pcRacks[i];
}
@@ -221,7 +216,7 @@ public:
CBigBitVec& operator|=(const CBigBitVec& vec)
{
- for (int i = 0; i < std::min(m_iRacks, vec.m_iRacks); ++i) {
+ for (size_t i = 0; i < std::min(m_iRacks, vec.m_iRacks); ++i) {
m_pcRacks[i] |= vec.m_pcRacks[i];
}
@@ -238,7 +233,7 @@ public:
CBigBitVec& operator^=(const CBigBitVec& vec)
{
- for (int i = 0; i < std::min(m_iRacks, vec.m_iRacks); ++i) {
+ for (size_t i = 0; i < std::min(m_iRacks, vec.m_iRacks); ++i) {
m_pcRacks[i] ^= vec.m_pcRacks[i];
}
@@ -253,9 +248,9 @@ public:
return t;
}
- CBigBitVec& operator<<=(const int bits)
+ CBigBitVec& operator<<=(const size_t bits)
{
- assert(bits >= 0);
+ assert(bits < size());
// No shift?
if (bits == 0) {
@@ -272,19 +267,18 @@ public:
// Do rack shifts.
if (ref.rack > 0) {
- int i;
- for (i = m_iRacks - 1; i >= ref.rack; --i) {
+ for (size_t i = m_iRacks - 1; i >= ref.rack; --i) {
m_pcRacks[i] = m_pcRacks[i - ref.rack];
}
- for (; i >= 0; --i) {
+ for (size_t i = 0; i < ref.rack; ++i) {
m_pcRacks[i] = 0;
}
}
// Do bit shifts.
if (ref.bit > 0) {
- int bi = FBV_BITS - ref.bit;
- int i;
+ size_t bi = FBV_BITS - ref.bit;
+ size_t i;
for (i = m_iRacks - 1; i >= ref.rack + 1; --i) {
m_pcRacks[i] <<= ref.bit;
m_pcRacks[i] |= m_pcRacks[i - 1] >> bi;
@@ -295,16 +289,16 @@ public:
return *this;
}
- CBigBitVec operator<<(const int bits) const
+ CBigBitVec operator<<(const size_t bits) const
{
CBigBitVec t(*this);
t <<= bits;
return t;
}
- CBigBitVec& operator>>=(const int bits)
+ CBigBitVec& operator>>=(const size_t bits)
{
- assert(bits >= 0);
+ assert(bits < size());
// No shift?
if (bits == 0) {
@@ -321,7 +315,7 @@ public:
// Do rack shifts.
if (ref.rack > 0) {
- int i;
+ size_t i;
for (i = 0; i < m_iRacks - ref.rack; ++i) {
m_pcRacks[i] = m_pcRacks[i + ref.rack];
}
@@ -332,8 +326,8 @@ public:
// Do bit shifts.
if (ref.bit > 0) {
- int bi = FBV_BITS - ref.bit;
- int i;
+ size_t bi = FBV_BITS - ref.bit;
+ size_t i;
for (i = 0; i < m_iRacks - ref.rack - 1; ++i) {
m_pcRacks[i] >>= ref.bit;
m_pcRacks[i] |= m_pcRacks[i + 1] << bi;
@@ -344,7 +338,7 @@ public:
return *this;
}
- CBigBitVec operator>>(const int bits) const
+ CBigBitVec operator>>(const size_t bits) const
{
CBigBitVec t(*this);
t >>= bits;
@@ -352,10 +346,8 @@ public:
}
/// Right-rotate the least significant `width` bits by `bits` positions
- CBigBitVec& rotr(const int bits, int width)
+ CBigBitVec& rotr(const size_t bits, size_t width)
{
- assert(bits >= 0);
-
// Fill in the width, if necessary.
if (width <= 0) {
width = size();
@@ -381,10 +373,8 @@ public:
}
/// Left-rotate the least significant `width` bits by `bits` positions
- CBigBitVec& rotl(const int bits, int width)
+ CBigBitVec& rotl(const size_t bits, size_t width)
{
- assert(bits >= 0);
-
// Fill in the width, if necessary.
if (width <= 0) {
width = size();
@@ -412,7 +402,7 @@ public:
/// Return true iff all bits are zero
bool none() const
{
- for (int i = 0; i < m_iRacks; ++i) {
+ for (size_t i = 0; i < m_iRacks; ++i) {
if (m_pcRacks[i]) {
return false;
}
@@ -421,12 +411,12 @@ public:
}
/// Return 1 + the index of the first set bit, or 0 if there are none
- int find_first() const
+ size_t find_first() const
{
- for (int i = 0; i < m_iRacks; ++i) {
+ for (size_t i = 0; i < m_iRacks; ++i) {
const int j = ffs(m_pcRacks[i]);
if (j) {
- return (i * FBV_BITS) + j;
+ return (i * FBV_BITS) + static_cast<size_t>(j);
}
}
return 0;
@@ -435,7 +425,7 @@ public:
/// Flip all bits (one's complement)
CBigBitVec& flip()
{
- for (int i = 0; i < m_iRacks; ++i) {
+ for (size_t i = 0; i < m_iRacks; ++i) {
m_pcRacks[i] = ~m_pcRacks[i];
}
return *this;
@@ -450,19 +440,19 @@ public:
const FBV_UINT* racks() const { return m_pcRacks.get(); }
/// Return the number of racks
- int rackCount() const { return m_iRacks; }
+ size_t rackCount() const { return m_iRacks; }
private:
struct Ref
{
- Ref(const int bits)
+ Ref(const size_t bits)
: rack{bits / FBV_BITS}
, bit{bits - rack * FBV_BITS}
{
}
- int rack;
- int bit;
+ size_t rack;
+ size_t bit;
};
struct RacksDeleter
@@ -472,24 +462,25 @@ private:
using RacksPtr = std::unique_ptr<FBV_UINT[], RacksDeleter>;
- static RacksPtr make_racks(const int n)
+ static RacksPtr make_racks(const size_t n)
{
return RacksPtr{static_cast<FBV_UINT*>(calloc(n, sizeof(FBV_UINT)))};
}
// Right rotates entire racks (in place).
- void rackRotr(int k)
+ void rackRotr(const size_t k)
{
- assert(0 <= k && k < m_iRacks);
+ assert(k < m_iRacks);
if (k == 0) {
return;
}
- int c = 0;
- for (int v = 0; c < m_iRacks; ++v) {
- int t = v, tp = v + k;
- const int tmp = m_pcRacks[v];
+ size_t c = 0;
+ for (size_t v = 0; c < m_iRacks; ++v) {
+ size_t t = v;
+ size_t tp = v + k;
+ const FBV_UINT tmp = m_pcRacks[v];
c++;
while (tp != v) {
m_pcRacks[t] = m_pcRacks[tp];
@@ -505,7 +496,7 @@ private:
}
RacksPtr m_pcRacks;
- int m_iRacks;
+ size_t m_iRacks;
};
template <>
@@ -514,7 +505,8 @@ grayCode(CBigBitVec& value)
{
FBV_UINT s = 0;
- for (int i = value.rackCount() - 1; i >= 0; --i) {
+ for (intptr_t i = static_cast<intptr_t>(value.rackCount() - 1); i >= 0;
+ --i) {
const FBV_UINT t = value.racks()[i] & 1;
grayCode(value.racks()[i]);
value.racks()[i] ^= (s << (FBV_BITS - 1));
@@ -528,7 +520,8 @@ grayCodeInv(CBigBitVec& value)
{
FBV_UINT s = 0;
- for (int i = value.rackCount() - 1; i >= 0; --i) {
+ for (intptr_t i = static_cast<intptr_t>(value.rackCount() - 1); i >= 0;
+ --i) {
FBV_UINT& rack = value.racks()[i];
grayCodeInv(rack);
if (s) {
diff --git a/chilbert/FixBitVec.hpp b/chilbert/FixBitVec.hpp
index 9df2ada..e47b868 100644
--- a/chilbert/FixBitVec.hpp
+++ b/chilbert/FixBitVec.hpp
@@ -22,6 +22,8 @@
#include "chilbert/Operations.hpp"
#include <cassert>
+#include <climits>
+#include <cstddef>
#include <cstdint>
namespace chilbert {
@@ -33,21 +35,21 @@ typedef uint64_t FBV_UINT;
#define FBV_BITS 64
-#define FBV1 ((FBV_UINT)1)
-#define FBV1S (~(FBV_UINT)0)
+#define FBV1 (FBV_UINT{1})
+#define FBV1S (~FBV_UINT{0})
#define FBVN1S(n) (n == FBV_BITS ? FBV1S : (FBV1 << n) - 1)
class CFixBitVec
{
public:
- CFixBitVec(int bits = FBV_BITS)
+ CFixBitVec(const size_t bits = FBV_BITS)
: m_rack{0}
{
assert(bits <= FBV_BITS);
}
/// Return the size in bits
- int size() const { return FBV_BITS; }
+ size_t size() const { return FBV_BITS; }
/// Set all bits to one
CFixBitVec& set()
@@ -64,40 +66,40 @@ public:
}
/// Return the value of the `index`th bit
- bool test(const int index) const
+ bool test(const size_t index) const
{
- assert(0 <= index && index < FBV_BITS);
+ assert(index < FBV_BITS);
return ((m_rack & (FBV1 << index)) > 0);
}
/// Set the `index`th bit to 1
- CFixBitVec& set(const int index)
+ CFixBitVec& set(const size_t index)
{
- assert(0 <= index && index < FBV_BITS);
- m_rack |= ((FBV_UINT)1 << index);
+ assert(index < FBV_BITS);
+ m_rack |= (FBV_UINT{1} << index);
return *this;
}
/// Reset the `index`th bit to 0
- CFixBitVec& reset(const int index)
+ CFixBitVec& reset(const size_t index)
{
- assert(0 <= index && index < FBV_BITS);
- m_rack &= ~((FBV_UINT)1 << index);
+ assert(index < FBV_BITS);
+ m_rack &= ~(FBV_UINT{1} << index);
return *this;
}
/// Set the `index`th bit to `value`
- CFixBitVec& set(const int index, const bool value)
+ CFixBitVec& set(const size_t index, const bool value)
{
- assert(0 <= index && index < FBV_BITS);
- m_rack ^= (-value ^ m_rack) & ((FBV_UINT)1 << index);
+ assert(index < FBV_BITS);
+ m_rack ^= (-FBV_UINT{value} ^ m_rack) & (FBV_UINT{1U} << index);
return *this;
}
/// Flip the value of the `index`th bit
- CFixBitVec& flip(const int index)
+ CFixBitVec& flip(const size_t index)
{
- assert(0 <= index && index < FBV_BITS);
+ assert(index < FBV_BITS);
m_rack ^= (FBV1 << index);
return *this;
}
@@ -163,26 +165,28 @@ public:
return t;
}
- CFixBitVec& operator<<=(const int bits)
+ CFixBitVec& operator<<=(const size_t bits)
{
+ assert(bits < size());
m_rack <<= bits;
return *this;
}
- CFixBitVec operator<<(const int bits) const
+ CFixBitVec operator<<(const size_t bits) const
{
CFixBitVec t(*this);
t <<= bits;
return t;
}
- CFixBitVec& operator>>=(const int bits)
+ CFixBitVec& operator>>=(const size_t bits)
{
+ assert(bits < size());
m_rack >>= bits;
return *this;
}
- CFixBitVec operator>>(const int bits) const
+ CFixBitVec operator>>(const size_t bits) const
{
CFixBitVec t(*this);
t >>= bits;
@@ -190,9 +194,8 @@ public:
}
/// Right-rotate the least significant `width` bits by `bits` positions
- CFixBitVec& rotr(const int bits, const int width)
+ CFixBitVec& rotr(const size_t bits, const size_t width)
{
- assert(bits >= 0);
assert(width > 0);
assert(bits < width);
m_rack &= FBVN1S(width);
@@ -202,9 +205,8 @@ public:
}
/// Left-rotate the least significant `width` bits by `bits` positions
- CFixBitVec& rotl(int bits, int width)
+ CFixBitVec& rotl(const size_t bits, const size_t width)
{
- assert(bits >= 0);
assert(width > 0);
assert(bits < width);
m_rack &= FBVN1S(width);
@@ -217,7 +219,10 @@ public:
bool none() const { return m_rack == 0; }
/// Return 1 + the index of the first set bit, or 0 if there are none
- int find_first() const { return chilbert::ffs(m_rack); }
+ size_t find_first() const
+ {
+ return static_cast<size_t>(chilbert::ffs(m_rack));
+ }
/// Flip all bits (one's complement)
CFixBitVec& flip()
diff --git a/chilbert/GetBits.hpp b/chilbert/GetBits.hpp
index 66f6f32..288fd25 100644
--- a/chilbert/GetBits.hpp
+++ b/chilbert/GetBits.hpp
@@ -32,9 +32,9 @@ namespace chilbert {
*/
template <class H, class I>
inline void
-getBits(const H& h, const int n, const int i, I& w)
+getBits(const H& h, const size_t n, const size_t i, I& w)
{
- for (int j = 0; j < n; j++) {
+ for (size_t j = 0; j < n; j++) {
setBit(w, j, testBit(h, i + j));
}
}
diff --git a/chilbert/GetLocation.hpp b/chilbert/GetLocation.hpp
index 06e4b64..816b7b7 100644
--- a/chilbert/GetLocation.hpp
+++ b/chilbert/GetLocation.hpp
@@ -25,9 +25,9 @@ namespace chilbert {
template <class P, class I>
inline void
-getLocation(const P* const p, const int n, const int i, I& l)
+getLocation(const P* const p, const size_t n, const size_t i, I& l)
{
- for (int j = 0; j < n; ++j) {
+ for (size_t j = 0; j < n; ++j) {
setBit(l, j, testBit(p[j], i));
}
}
diff --git a/chilbert/GrayCodeRank.hpp b/chilbert/GrayCodeRank.hpp
index 3083489..7bbfa69 100644
--- a/chilbert/GrayCodeRank.hpp
+++ b/chilbert/GrayCodeRank.hpp
@@ -24,6 +24,13 @@
#include <cassert>
+#define MODSPLIT(r, b, k) \
+ { \
+ b = (k); \
+ r = b / FBV_BITS; \
+ b -= r * FBV_BITS; \
+ }
+
namespace chilbert {
// This is the bulk of the cost in calculating
@@ -32,17 +39,22 @@ namespace chilbert {
// at each level of precision.
template <class H, class HC>
inline void
-compactIndex(const int* ms, const int* ds, int n, int m, H& h, HC& hc)
+compactIndex(const size_t* const ms,
+ const size_t* const ds,
+ const size_t n,
+ const size_t m,
+ H& h,
+ HC& hc)
{
hc = 0;
- int hi = 0;
- int hci = 0;
+ size_t hi = 0;
+ size_t hci = 0;
// Run through the levels of precision
- for (int i = 0; i < m; i++) {
+ for (size_t i = 0; i < m; i++) {
// Run through the dimensions
- int j = ds[i];
+ size_t j = ds[i];
do {
// This dimension contributes a bit?
if (ms[j] > i) {
@@ -62,7 +74,7 @@ compactIndex(const int* ms, const int* ds, int n, int m, H& h, HC& hc)
template <class I>
inline void
-grayCodeRank(const I& mask, const I& gi, int n, I& r)
+grayCodeRank(const I& mask, const I& gi, const size_t n, I& r)
{
r.reset();
@@ -70,7 +82,7 @@ grayCodeRank(const I& mask, const I& gi, int n, I& r)
FBV_UINT jm = 1;
int ir = 0;
FBV_UINT im = 1;
- for (int i = 0; i < n; ++i) {
+ for (size_t i = 0; i < n; ++i) {
if (mask.racks()[ir] & im) {
if (gi.racks()[ir] & im) {
r.racks()[jr] |= jm;
@@ -92,26 +104,26 @@ grayCodeRank(const I& mask, const I& gi, int n, I& r)
template <class I>
inline void
-grayCodeRankInv(const I& mask,
- const I& ptrn,
- const I& r,
- int n,
- int b,
- I& g,
- I& gi)
+grayCodeRankInv(const I& mask,
+ const I& ptrn,
+ const I& r,
+ const size_t n,
+ const size_t b,
+ I& g,
+ I& gi)
{
g.reset();
gi.reset();
- int ir, jr;
+ size_t ir, jr;
FBV_UINT im, jm;
- int i = n - 1;
- BBV_MODSPLIT(ir, im, i);
+ intptr_t i = static_cast<intptr_t>(n - 1);
+ MODSPLIT(ir, im, (n - 1));
im = (FBV1 << im);
- int j = b - 1;
- BBV_MODSPLIT(jr, jm, j);
+ size_t j = b - 1;
+ MODSPLIT(jr, jm, j);
jm = (FBV1 << jm);
FBV_UINT gi0, gi1, g0;
@@ -131,7 +143,7 @@ grayCodeRankInv(const I& mask,
}
jm >>= 1;
if (jm == 0) {
- jm = ((FBV_UINT)1) << (FBV_BITS - 1);
+ jm = (FBV_UINT{1}) << (FBV_BITS - 1);
--jr;
}
} else {
@@ -148,7 +160,7 @@ grayCodeRankInv(const I& mask,
im >>= 1;
if (im == 0) {
- im = ((FBV_UINT)1) << (FBV_BITS - 1);
+ im = (FBV_UINT{1}) << (FBV_BITS - 1);
--ir;
}
}
@@ -156,7 +168,12 @@ grayCodeRankInv(const I& mask,
template <class I>
inline void
-extractMask(const int* ms, int n, int d, int i, I& mask, int& b)
+extractMask(const size_t* const ms,
+ const size_t n,
+ const size_t d,
+ const size_t i,
+ I& mask,
+ size_t& b)
{
assert(0 <= d && d < n);
@@ -164,8 +181,8 @@ extractMask(const int* ms, int n, int d, int i, I& mask, int& b)
b = 0;
FBV_UINT jm = 1;
- int jr = 0;
- int j = d; // #D j = (d==n-1) ? 0 : d+1;
+ size_t jr = 0;
+ size_t j = d; // #D j = (d==n-1) ? 0 : d+1;
do {
if (ms[j] > i) {
mask.racks()[jr] |= jm;
diff --git a/chilbert/Hilbert.hpp b/chilbert/Hilbert.hpp
index 34279ae..a6b48fb 100644
--- a/chilbert/Hilbert.hpp
+++ b/chilbert/Hilbert.hpp
@@ -62,35 +62,41 @@ namespace chilbert {
// fix -> fix
inline void
-coordsToIndex(const CFixBitVec* p, int m, int n, CFixBitVec& h)
+coordsToIndex(const CFixBitVec* const p,
+ const size_t m,
+ const size_t n,
+ CFixBitVec& h)
{
coordsToIndex<CFixBitVec, CFixBitVec>(p, m, n, h);
}
inline void
-indexToCoords(CFixBitVec* p, int m, int n, const CFixBitVec& h)
+indexToCoords(CFixBitVec* const p,
+ const size_t m,
+ const size_t n,
+ const CFixBitVec& h)
{
indexToCoords<CFixBitVec, CFixBitVec>(p, m, n, h);
}
inline void
-coordsToCompactIndex(const CFixBitVec* p,
- const int* ms,
- int n,
- CFixBitVec& hc,
- int M,
- int m)
+coordsToCompactIndex(const CFixBitVec* const p,
+ const size_t* const ms,
+ const size_t n,
+ CFixBitVec& hc,
+ const size_t M,
+ const size_t m)
{
coordsToCompactIndex<CFixBitVec, CFixBitVec>(p, ms, n, hc, M, m);
}
inline void
-compactIndexToCoords(CFixBitVec* p,
- const int* ms,
- int n,
- const CFixBitVec& hc,
- int M,
- int m)
+compactIndexToCoords(CFixBitVec* const p,
+ const size_t* const ms,
+ const size_t n,
+ const CFixBitVec& hc,
+ const size_t M,
+ const size_t m)
{
compactIndexToCoords<CFixBitVec, CFixBitVec>(p, ms, n, hc, M, m);
}
@@ -98,35 +104,41 @@ compactIndexToCoords(CFixBitVec* p,
// fix -> big
inline void
-coordsToIndex(const CFixBitVec* p, int m, int n, CBigBitVec& h)
+coordsToIndex(const CFixBitVec* const p,
+ const size_t m,
+ const size_t n,
+ CBigBitVec& h)
{
coordsToIndex<CFixBitVec, CBigBitVec>(p, m, n, h);
}
inline void
-indexToCoords(CFixBitVec* p, int m, int n, const CBigBitVec& h)
+indexToCoords(CFixBitVec* const p,
+ const size_t m,
+ const size_t n,
+ const CBigBitVec& h)
{
indexToCoords<CFixBitVec, CBigBitVec>(p, m, n, h);
}
inline void
-coordsToCompactIndex(const CFixBitVec* p,
- const int* ms,
- int n,
- CBigBitVec& hc,
- int M,
- int m)
+coordsToCompactIndex(const CFixBitVec* const p,
+ const size_t* const ms,
+ const size_t n,
+ CBigBitVec& hc,
+ const size_t M,
+ const size_t m)
{
coordsToCompactIndex<CFixBitVec, CBigBitVec>(p, ms, n, hc, M, m);
}
inline void
-compactIndexToCoords(CFixBitVec* p,
- const int* ms,
- int n,
- const CBigBitVec& hc,
- int M,
- int m)
+compactIndexToCoords(CFixBitVec* const p,
+ const size_t* const ms,
+ const size_t n,
+ const CBigBitVec& hc,
+ const size_t M,
+ const size_t m)
{
compactIndexToCoords<CFixBitVec, CBigBitVec>(p, ms, n, hc, M, m);
}
@@ -134,35 +146,41 @@ compactIndexToCoords(CFixBitVec* p,
// big -> big
inline void
-coordsToIndex(const CBigBitVec* p, int m, int n, CBigBitVec& h)
+coordsToIndex(const CBigBitVec* p,
+ const size_t m,
+ const size_t n,
+ CBigBitVec& h)
{
coordsToIndex<CBigBitVec, CBigBitVec>(p, m, n, h);
}
inline void
-indexToCoords(CBigBitVec* p, int m, int n, const CBigBitVec& h)
+indexToCoords(CBigBitVec* const p,
+ const size_t m,
+ const size_t n,
+ const CBigBitVec& h)
{
indexToCoords<CBigBitVec, CBigBitVec>(p, m, n, h);
}
inline void
-coordsToCompactIndex(const CBigBitVec* p,
- const int* ms,
- int n,
- CBigBitVec& hc,
- int M,
- int m)
+coordsToCompactIndex(const CBigBitVec* const p,
+ const size_t* const ms,
+ const size_t n,
+ CBigBitVec& hc,
+ const size_t M,
+ const size_t m)
{
coordsToCompactIndex<CBigBitVec, CBigBitVec>(p, ms, n, hc, M, m);
}
inline void
-compactIndexToCoords(CBigBitVec* p,
- const int* ms,
- int n,
- const CBigBitVec& hc,
- int M,
- int m)
+compactIndexToCoords(CBigBitVec* const p,
+ const size_t* const ms,
+ const size_t n,
+ const CBigBitVec& hc,
+ const size_t M,
+ const size_t m)
{
compactIndexToCoords<CBigBitVec, CBigBitVec>(p, ms, n, hc, M, m);
}
diff --git a/chilbert/Operations.hpp b/chilbert/Operations.hpp
index 06a06fa..b2e4344 100644
--- a/chilbert/Operations.hpp
+++ b/chilbert/Operations.hpp
@@ -29,11 +29,11 @@ namespace chilbert {
/// IntegralIndex<T> only exists if T is integral
template <typename T>
-using IntegralIndex = std::enable_if_t<std::is_integral<T>::value, int>;
+using IntegralIndex = std::enable_if_t<std::is_integral<T>::value, size_t>;
/// BitsetIndex<T> only exists if T is not integral (must be a bitset)
template <typename T>
-using BitsetIndex = std::enable_if_t<!std::is_integral<T>::value, int>;
+using BitsetIndex = std::enable_if_t<!std::is_integral<T>::value, size_t>;
/// Return the `index`th bit in `field`
template <typename T>
@@ -41,7 +41,7 @@ bool
testBit(const T& field, const IntegralIndex<T> index)
{
assert(size_t(index) < sizeof(field) * CHAR_BIT);
- return field & (((T)1) << index);
+ return field & (T{1} << index);
}
/// Return the `index`th bit in `field`
@@ -58,7 +58,7 @@ void
setBit(T& field, const IntegralIndex<T> index)
{
assert(size_t(index) < sizeof(field) * CHAR_BIT);
- field |= ((T)1 << index);
+ field |= (T{1} << index);
assert(testBit(field, index));
}
@@ -68,7 +68,7 @@ void
setBit(T& field, const IntegralIndex<T> index, const bool value)
{
assert(size_t(index) < sizeof(field) * CHAR_BIT);
- field ^= (-value ^ field) & ((T)1 << index);
+ field ^= (-T{value} ^ field) & (T{1U} << index);
assert(testBit(field, index) == value);
}
@@ -96,14 +96,14 @@ template <>
int
ffs<unsigned long>(const unsigned long field)
{
- return __builtin_ffsl(field);
+ return __builtin_ffsl(static_cast<long>(field));
}
template <>
int
ffs<unsigned long long>(const unsigned long long field)
{
- return __builtin_ffsll(field);
+ return __builtin_ffsll(static_cast<long long>(field));
}
/// Calculates the Gray Code of `value` in place
diff --git a/chilbert/SetBits.hpp b/chilbert/SetBits.hpp
index 1a4874b..0adbbf8 100644
--- a/chilbert/SetBits.hpp
+++ b/chilbert/SetBits.hpp
@@ -32,9 +32,9 @@ namespace chilbert {
*/
template <class H, class I>
inline void
-setBits(H& h, const int n, const int i, const I& w)
+setBits(H& h, const size_t n, const size_t i, const I& w)
{
- for (int j = 0; j < n; j++) {
+ for (size_t j = 0; j < n; j++) {
setBit(h, i + j, testBit(w, j));
}
}
diff --git a/chilbert/SetLocation.hpp b/chilbert/SetLocation.hpp
index 644f53b..47aa969 100644
--- a/chilbert/SetLocation.hpp
+++ b/chilbert/SetLocation.hpp
@@ -25,9 +25,9 @@ namespace chilbert {
template <class P, class I>
inline void
-setLocation(P* const p, const int n, const int i, const I& l)
+setLocation(P* const p, const size_t n, const size_t i, const I& l)
{
- for (int j = 0; j < n; j++) {
+ for (size_t j = 0; j < n; j++) {
setBit(p[j], i, testBit(l, j));
}
}