aboutsummaryrefslogtreecommitdiffstats
path: root/chilbert/Algorithm.hpp
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2018-08-11 22:40:03 +0200
committerDavid Robillard <d@drobilla.net>2018-09-29 14:46:25 +0200
commit50145430b5fb08802bc22a6ae06351a11a091c60 (patch)
tree4ba0d1c8c5b76e7608fbd8ffbc985ba8a9f09622 /chilbert/Algorithm.hpp
parentdf0d52f4bd78c2197a88a805a1dd402978df3290 (diff)
downloadchilbert-50145430b5fb08802bc22a6ae06351a11a091c60.tar.gz
chilbert-50145430b5fb08802bc22a6ae06351a11a091c60.tar.bz2
chilbert-50145430b5fb08802bc22a6ae06351a11a091c60.zip
Clean up types and fix every even remotely reasonable warning
Diffstat (limited to 'chilbert/Algorithm.hpp')
-rw-r--r--chilbert/Algorithm.hpp140
1 files changed, 71 insertions, 69 deletions
diff --git a/chilbert/Algorithm.hpp b/chilbert/Algorithm.hpp
index d43a42b..7d40f2d 100644
--- a/chilbert/Algorithm.hpp
+++ b/chilbert/Algorithm.hpp
@@ -54,7 +54,7 @@ namespace chilbert {
// 'Transforms' a point.
template <class I>
inline void
-transform(const I& e, int d, int n, I& a)
+transform(const I& e, const size_t d, const size_t n, I& a)
{
a ^= e;
a.rotr(d, n); //#D d+1, n );
@@ -63,7 +63,7 @@ transform(const I& e, int d, int n, I& a)
// Inverse 'transforms' a point.
template <class I>
inline void
-transformInv(const I& e, int d, int n, I& a)
+transformInv(const I& e, const size_t d, const size_t n, I& a)
{
a.rotl(d, n); //#D d+1, n );
a ^= e;
@@ -72,7 +72,7 @@ transformInv(const I& e, int d, int n, I& a)
// Update for method 1 (GrayCodeInv in the loop)
template <class I>
inline void
-update1(const I& l, const I& t, const I& w, int n, I& e, int& d)
+update1(const I& l, const I& t, const I& w, const size_t n, I& e, size_t& d)
{
assert(0 <= d && d < n);
e = l;
@@ -96,7 +96,7 @@ update1(const I& l, const I& t, const I& w, int n, I& e, int& d)
// Update for method 2 (GrayCodeInv out of loop)
template <class I>
inline void
-update2(const I& l, const I& t, const I& w, int n, I& e, int& d)
+update2(const I& l, const I& t, const size_t n, I& e, size_t& d)
{
assert(0 <= d && d < n);
e = l;
@@ -115,13 +115,13 @@ update2(const I& l, const I& t, const I& w, int n, I& e, int& d)
template <class P, class H, class I>
inline void
-_coordsToIndex(const P* p,
- int m,
- int n,
- H& h,
- I&& scratch,
- int* ds = nullptr // #HACK
-)
+_coordsToIndex(const P* const p,
+ const size_t m,
+ const size_t n,
+ H& h,
+ I&& scratch,
+ size_t* const ds = nullptr // #HACK
+ )
{
I e{std::move(scratch)};
I l{e};
@@ -134,16 +134,16 @@ _coordsToIndex(const P* p,
h = 0U;
// Work from MSB to LSB
- int d = D0;
- int ho = m * n;
- for (int i = m - 1; i >= 0; i--) {
+ size_t d = D0;
+ size_t ho = m * n;
+ for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) {
// #HACK
if (ds) {
ds[i] = d;
}
// Get corner of sub-hypercube where point lies.
- getLocation<P, I>(p, n, i, l);
+ getLocation<P, I>(p, n, static_cast<size_t>(i), l);
// Mirror and reflect the location.
// t = T_{(e,d)}(l)
@@ -151,7 +151,7 @@ _coordsToIndex(const P* p,
transform<I>(e, d, n, t);
w = t;
- if (i < m - 1) {
+ if (static_cast<size_t>(i) < m - 1) {
w.flip(n - 1);
}
@@ -160,7 +160,7 @@ _coordsToIndex(const P* p,
setBits<H, I>(h, n, ho, w);
// Update the entry point and direction.
- update2<I>(l, t, w, n, e, d);
+ update2<I>(l, t, n, e, d);
}
grayCodeInv(h);
@@ -174,11 +174,11 @@ _coordsToIndex(const P* p,
// Assumes h is big enough for the output (n*m bits!)
template <class P, class H>
inline void
-coordsToIndex(const P* p, // [in ] point
- int m, // [in ] precision of each dimension in bits
- int n, // [in ] number of dimensions
- H& h // [out] Hilbert index
-)
+coordsToIndex(const P* const p, // [in ] point
+ const size_t m, // [in ] precision of each dimension in bits
+ const size_t n, // [in ] number of dimensions
+ H& h // [out] Hilbert index
+ )
{
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width
@@ -191,7 +191,7 @@ coordsToIndex(const P* p, // [in ] point
template <class P, class H, class I>
inline void
-_indexToCoords(P* p, int m, int n, const H& h, I&& scratch)
+_indexToCoords(P* p, const size_t m, const size_t n, const H& h, I&& scratch)
{
I e{std::move(scratch)};
I l{e};
@@ -201,14 +201,14 @@ _indexToCoords(P* p, int m, int n, const H& h, I&& scratch)
// Initialize
e.reset();
l.reset();
- for (int j = 0; j < n; j++) {
+ for (size_t j = 0; j < n; j++) {
p[j] = 0U;
}
// Work from MSB to LSB
- int d = D0;
- int ho = m * n;
- for (int i = m - 1; i >= 0; i--) {
+ size_t d = D0;
+ size_t ho = m * n;
+ for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) {
// Get the Hilbert index bits
ho -= n;
getBits<H, I>(h, n, ho, w);
@@ -224,7 +224,7 @@ _indexToCoords(P* p, int m, int n, const H& h, I&& scratch)
// Distribute these bits
// to the coordinates.
- setLocation<P, I>(p, n, i, l);
+ setLocation<P, I>(p, n, static_cast<size_t>(i), l);
// Update the entry point and direction.
update1<I>(l, t, w, n, e, d);
@@ -240,11 +240,11 @@ _indexToCoords(P* p, int m, int n, const H& h, I&& scratch)
// appropriate variable.
template <class P, class H>
inline void
-indexToCoords(P* p, // [out] point
- int m, // [in ] precision of each dimension in bits
- int n, // [in ] number of dimensions
- const H& h // [out] Hilbert index
-)
+indexToCoords(P* const p, // [out] point
+ const size_t m, // [in ] precision of each dimension in bits
+ const size_t n, // [in ] number of dimensions
+ const H& h // [out] Hilbert index
+ )
{
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width
@@ -257,18 +257,18 @@ indexToCoords(P* p, // [out] point
template <class P, class HC, class I>
inline void
-_coordsToCompactIndex(const P* p,
- const int* ms,
- int n,
- HC& hc,
- I&& scratch,
- int M = 0,
- int m = 0)
+_coordsToCompactIndex(const P* const p,
+ const size_t* const ms,
+ const size_t n,
+ HC& hc,
+ I&& scratch,
+ size_t M = 0,
+ size_t m = 0)
{
// Get total precision and max precision if not supplied
if (M == 0 || m == 0) {
M = m = 0;
- for (int i = 0; i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
if (ms[i] > m) {
m = ms[i];
}
@@ -276,12 +276,12 @@ _coordsToCompactIndex(const P* p,
}
}
- const int mn = m * n;
+ const size_t mn = m * n;
// If we could avoid allocation altogether (ie: have a
// fixed buffer allocated on the stack) then this increases
// speed by a bit (4% when n=4, m=20)
- int* const ds = new int[m];
+ size_t* const ds = new size_t[m];
if (mn > FBV_BITS) {
CBigBitVec h(mn);
@@ -304,12 +304,13 @@ _coordsToCompactIndex(const P* p,
// Assumes h is big enough for the output (n*m bits!)
template <class P, class HC>
inline void
-coordsToCompactIndex(const P* p, // [in ] point
- const int* ms, // [in ] precision of each dimension in bits
- int n, // [in ] number of dimensions
- HC& hc, // [out] Hilbert index
- int M = 0,
- int m = 0)
+coordsToCompactIndex(
+ const P* const p, // [in ] point
+ const size_t* const ms, // [in ] precision of each dimension in bits
+ size_t n, // [in ] number of dimensions
+ HC& hc, // [out] Hilbert index
+ const size_t M = 0,
+ const size_t m = 0)
{
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width?
@@ -324,13 +325,13 @@ coordsToCompactIndex(const P* p, // [in ] point
template <class P, class HC, class I>
inline void
-_compactIndexToCoords(P* p,
- const int* ms,
- int n,
- const HC& hc,
- I&& scratch,
- int M = 0,
- int m = 0)
+_compactIndexToCoords(P* const p,
+ const size_t* ms,
+ const size_t n,
+ const HC& hc,
+ I&& scratch,
+ size_t M = 0,
+ size_t m = 0)
{
I e{std::move(scratch)};
I l{e};
@@ -344,7 +345,7 @@ _compactIndexToCoords(P* p,
// if not supplied
if (M == 0 || m == 0) {
M = m = 0;
- for (int i = 0; i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
if (ms[i] > m) {
m = ms[i];
}
@@ -355,17 +356,17 @@ _compactIndexToCoords(P* p,
// Initialize
e.reset();
l.reset();
- for (int j = 0; j < n; j++) {
+ for (size_t j = 0; j < n; j++) {
p[j] = 0;
}
// Work from MSB to LSB
- int d = D0;
+ size_t d = D0;
- for (int i = m - 1; i >= 0; i--) {
+ for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) {
// Get the mask and ptrn
- int b = 0;
- extractMask<I>(ms, n, d, i, mask, b);
+ size_t b = 0;
+ extractMask<I>(ms, n, d, static_cast<size_t>(i), mask, b);
ptrn = e;
ptrn.rotr(d, n); //#D ptrn.Rotr(d+1,n);
@@ -385,7 +386,7 @@ _compactIndexToCoords(P* p,
// Distribute these bits
// to the coordinates.
- setLocation<P, I>(p, n, i, l);
+ setLocation<P, I>(p, n, static_cast<size_t>(i), l);
// Update the entry point and direction.
update1<I>(l, t, w, n, e, d);
@@ -401,12 +402,13 @@ _compactIndexToCoords(P* p,
// appropriate variable.
template <class P, class HC>
inline void
-compactIndexToCoords(P* p, // [out] point
- const int* ms, // [in ] precision of each dimension in bits
- int n, // [in ] number of dimensions
- const HC& hc, // [out] Hilbert index
- int M = 0,
- int m = 0)
+compactIndexToCoords(
+ P* const p, // [out] point
+ const size_t* ms, // [in ] precision of each dimension in bits
+ const size_t n, // [in ] number of dimensions
+ const HC& hc, // [out] Hilbert index
+ const size_t M = 0,
+ const size_t m = 0)
{
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width