aboutsummaryrefslogtreecommitdiffstats
path: root/chilbert/Hilbert.ipp
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2018-08-19 01:42:30 +0200
committerDavid Robillard <d@drobilla.net>2018-09-29 14:47:26 +0200
commit09f8d4a4b20f234dafcdf2ce667f220801b9210f (patch)
tree76e02b8e23d33c86e803193eeb2d4d906c8e4728 /chilbert/Hilbert.ipp
parent967d2ac6c2034d1374fc603e64212fc06b5f6133 (diff)
downloadchilbert-09f8d4a4b20f234dafcdf2ce667f220801b9210f.tar.gz
chilbert-09f8d4a4b20f234dafcdf2ce667f220801b9210f.tar.bz2
chilbert-09f8d4a4b20f234dafcdf2ce667f220801b9210f.zip
Make size of bit vectors precise
Diffstat (limited to 'chilbert/Hilbert.ipp')
-rw-r--r--chilbert/Hilbert.ipp21
1 files changed, 12 insertions, 9 deletions
diff --git a/chilbert/Hilbert.ipp b/chilbert/Hilbert.ipp
index 523bfec..92a2f0e 100644
--- a/chilbert/Hilbert.ipp
+++ b/chilbert/Hilbert.ipp
@@ -77,8 +77,9 @@ template <class I>
inline void
transform(const I& e, const size_t d, const size_t n, I& a)
{
+ assert(a.size() == n);
a ^= e;
- a.rotr(d, n); //#D d+1, n );
+ a.rotr(d);
}
// Inverse 'transforms' a point.
@@ -86,7 +87,8 @@ template <class I>
inline void
transformInv(const I& e, const size_t d, const size_t n, I& a)
{
- a.rotl(d, n); //#D d+1, n );
+ assert(a.size() == n);
+ a.rotl(d);
a ^= e;
}
@@ -206,7 +208,7 @@ coordsToIndex(const P* const p, // [in ] point
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width
- _coordsToIndex<P, H, CFixBitVec>(p, m, n, h, CFixBitVec{});
+ _coordsToIndex<P, H, CFixBitVec>(p, m, n, h, CFixBitVec(n));
} else {
// Otherwise, they must be BigBitVecs
_coordsToIndex<P, H, CBigBitVec>(p, m, n, h, CBigBitVec(n));
@@ -277,7 +279,7 @@ indexToCoords(P* const p, // [out] point
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width
- _indexToCoords<P, H, CFixBitVec>(p, m, n, h, CFixBitVec{});
+ _indexToCoords<P, H, CFixBitVec>(p, m, n, h, CFixBitVec(n));
} else {
// Otherwise, they must be BigBitVecs
_indexToCoords<P, H, CBigBitVec>(p, m, n, h, CBigBitVec(n));
@@ -320,7 +322,7 @@ _coordsToCompactIndex(const P* const p,
_coordsToIndex<P, CBigBitVec, I>(p, m, n, h, std::move(scratch), ds);
compactIndex<CBigBitVec, HC>(ms, ds, n, m, h, hc);
} else {
- CFixBitVec h;
+ CFixBitVec h(mn);
_coordsToIndex<P, CFixBitVec, I>(p, m, n, h, std::move(scratch), ds);
compactIndex<CFixBitVec, HC>(ms, ds, n, m, h, hc);
}
@@ -347,7 +349,7 @@ coordsToCompactIndex(
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width?
_coordsToCompactIndex<P, HC, CFixBitVec>(
- p, ms, n, hc, CFixBitVec{}, M, m);
+ p, ms, n, hc, CFixBitVec(n), M, m);
} else {
// Otherwise, they must be BigBitVecs.
_coordsToCompactIndex<P, HC, CBigBitVec>(
@@ -403,7 +405,8 @@ _compactIndexToCoords(P* const p,
size_t b = 0;
extractMask<I>(ms, n, d, static_cast<size_t>(i), mask, b);
ptrn = e;
- ptrn.rotr(d, n); //#D ptrn.Rotr(d+1,n);
+ assert(ptrn.size() == n);
+ ptrn.rotr(d);
// Get the Hilbert index bits
M -= b;
@@ -447,9 +450,9 @@ compactIndexToCoords(
{
if (n <= FBV_BITS) {
// Intermediate variables will fit in fixed width
- CFixBitVec scratch;
+ CFixBitVec scratch(n);
_compactIndexToCoords<P, HC, CFixBitVec>(
- p, ms, n, hc, CFixBitVec{}, M, m);
+ p, ms, n, hc, std::move(scratch), M, m);
} else {
// Otherwise, they must be BigBitVecs
CBigBitVec scratch(n);