aboutsummaryrefslogtreecommitdiffstats
path: root/chilbert/Hilbert.ipp
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2018-08-18 15:37:27 +0200
committerDavid Robillard <d@drobilla.net>2018-09-29 14:46:47 +0200
commit9be08ad2f8be8964ae72f6571d04ba38909e4388 (patch)
tree7ce85854927a47ec7c0b3e507bd7d6f333d62bb3 /chilbert/Hilbert.ipp
parent767265863fca8e2043622f0591281a1deb7821af (diff)
downloadchilbert-9be08ad2f8be8964ae72f6571d04ba38909e4388.tar.gz
chilbert-9be08ad2f8be8964ae72f6571d04ba38909e4388.tar.bz2
chilbert-9be08ad2f8be8964ae72f6571d04ba38909e4388.zip
Clean up main public API header
Diffstat (limited to 'chilbert/Hilbert.ipp')
-rw-r--r--chilbert/Hilbert.ipp429
1 files changed, 429 insertions, 0 deletions
diff --git a/chilbert/Hilbert.ipp b/chilbert/Hilbert.ipp
new file mode 100644
index 0000000..b4f2b63
--- /dev/null
+++ b/chilbert/Hilbert.ipp
@@ -0,0 +1,429 @@
+/*
+ Copyright (C) 2018 David Robillard <d@drobilla.net>
+ Copyright (C) 2006-2007 Chris Hamilton <chamilton@cs.dal.ca>
+
+ This program is free software: you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free Software
+ Foundation, either version 2 of the License, or (at your option) any later
+ version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program. If not, see <https://www.gnu.org/licenses/>.
+*/
+
+#ifndef CHILBERT_ALGORITHM_HPP
+#define CHILBERT_ALGORITHM_HPP
+
+#include "chilbert/BigBitVec.hpp"
+#include "chilbert/FixBitVec.hpp"
+#include "chilbert/GetBits.hpp"
+#include "chilbert/GetLocation.hpp"
+#include "chilbert/GrayCodeRank.hpp"
+#include "chilbert/Hilbert.hpp"
+#include "chilbert/SetBits.hpp"
+#include "chilbert/SetLocation.hpp"
+
+#include <cassert>
+
+// Templated Hilbert functions.
+// P - is the class used to represent each dimension
+// of a multidimensional point.
+// H - is the class used to represent the point as a Hilbert
+// index.
+// I - is the class used to represent interim variables.
+// Needs to have as many bits as there are dimensions.
+//
+// In general, each of P,H,I should be a FixBitVec if they
+// fit in that fixed precision. Otherwise, use a BigBitVec.
+// Whatever you use, they must all be of consistent underlying
+// storage types.
+
+// The dimension across which the Hilbert curve travels
+// principally.
+// D0 is d + 1, where d is the actual dimension you want to
+// walk across.
+// MUST HAVE 0 <= D0 < n
+#define D0 1
+
+namespace chilbert {
+
+// 'Transforms' a point.
+template <class I>
+inline void
+transform(const I& e, const size_t d, const size_t n, I& a)
+{
+ a ^= e;
+ a.rotr(d, n); //#D d+1, n );
+}
+
+// Inverse 'transforms' a point.
+template <class I>
+inline void
+transformInv(const I& e, const size_t d, const size_t n, I& a)
+{
+ a.rotl(d, n); //#D d+1, n );
+ a ^= e;
+}
+
+// Update for method 1 (GrayCodeInv in the loop)
+template <class I>
+inline void
+update1(const I& l, const I& t, const I& w, const size_t n, I& e, size_t& d)
+{
+ assert(0 <= d && d < n);
+ e = l;
+ e.flip(d); //#D d == n-1 ? 0 : d+1 );
+
+ // Update direction
+ d += 1 + t.find_first();
+ if (d >= n) {
+ d -= n;
+ }
+ if (d >= n) {
+ d -= n;
+ }
+ assert(0 <= d && d < n);
+
+ if (!(w.rack() & 1)) {
+ e.flip(d == 0 ? n - 1 : d - 1); //#D d );
+ }
+}
+
+// Update for method 2 (GrayCodeInv out of loop)
+template <class I>
+inline void
+update2(const I& l, const I& t, const size_t n, I& e, size_t& d)
+{
+ assert(0 <= d && d < n);
+ e = l;
+ e.flip(d); //#D d == n-1 ? 0 : d+1 );
+
+ // Update direction
+ d += 1 + t.find_first();
+ if (d >= n) {
+ d -= n;
+ }
+ if (d >= n) {
+ d -= n;
+ }
+ assert(0 <= d && d < n);
+}
+
+template <class P, class H, class I>
+inline void
+_coordsToIndex(const P* const p,
+ const size_t m,
+ const size_t n,
+ H& h,
+ I&& scratch,
+ size_t* const ds = nullptr // #HACK
+)
+{
+ I e{std::move(scratch)};
+ I l{e};
+ I t{e};
+ I w{e};
+
+ // Initialize
+ e.reset();
+ l.reset();
+ h.reset();
+
+ // Work from MSB to LSB
+ size_t d = D0;
+ size_t ho = m * n;
+ for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) {
+ // #HACK
+ if (ds) {
+ ds[i] = d;
+ }
+
+ // Get corner of sub-hypercube where point lies.
+ getLocation<P, I>(p, n, static_cast<size_t>(i), l);
+
+ // Mirror and reflect the location.
+ // t = T_{(e,d)}(l)
+ t = l;
+ transform<I>(e, d, n, t);
+
+ w = t;
+ if (static_cast<size_t>(i) < m - 1) {
+ w.flip(n - 1);
+ }
+
+ // Concatenate to the index.
+ ho -= n;
+ setBits<H, I>(h, n, ho, w);
+
+ // Update the entry point and direction.
+ update2<I>(l, t, n, e, d);
+ }
+
+ grayCodeInv(h);
+}
+
+// This is wrapper to the basic Hilbert curve index
+// calculation function. It will support fixed or
+// arbitrary precision, templated. Depending on the
+// number of dimensions, it will use the most efficient
+// representation for interim variables.
+// Assumes h is big enough for the output (n*m bits!)
+template <class P, class H>
+inline void
+coordsToIndex(const P* const p, // [in ] point
+ const size_t m, // [in ] precision of each dimension in bits
+ const size_t n, // [in ] number of dimensions
+ H& h // [out] Hilbert index
+)
+{
+ if (n <= FBV_BITS) {
+ // Intermediate variables will fit in fixed width
+ _coordsToIndex<P, H, CFixBitVec>(p, m, n, h, CFixBitVec{});
+ } else {
+ // Otherwise, they must be BigBitVecs
+ _coordsToIndex<P, H, CBigBitVec>(p, m, n, h, CBigBitVec(n));
+ }
+}
+
+template <class P, class H, class I>
+inline void
+_indexToCoords(P* p, const size_t m, const size_t n, const H& h, I&& scratch)
+{
+ I e{std::move(scratch)};
+ I l{e};
+ I t{e};
+ I w{e};
+
+ // Initialize
+ e.reset();
+ l.reset();
+ for (size_t j = 0; j < n; j++) {
+ p[j] = 0U;
+ }
+
+ // Work from MSB to LSB
+ size_t d = D0;
+ size_t ho = m * n;
+ for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) {
+ // Get the Hilbert index bits
+ ho -= n;
+ getBits<H, I>(h, n, ho, w);
+
+ // t = GrayCode(w)
+ t = w;
+ grayCode(t);
+
+ // Reverse the transform
+ // l = T^{-1}_{(e,d)}(t)
+ l = t;
+ transformInv<I>(e, d, n, l);
+
+ // Distribute these bits
+ // to the coordinates.
+ setLocation<P, I>(p, n, static_cast<size_t>(i), l);
+
+ // Update the entry point and direction.
+ update1<I>(l, t, w, n, e, d);
+ }
+}
+
+// This is wrapper to the basic Hilbert curve inverse
+// index function. It will support fixed or
+// arbitrary precision, templated. Depending on the
+// number of dimensions, it will use the most efficient
+// representation for interim variables.
+// Assumes each entry of p is big enough to hold the
+// appropriate variable.
+template <class P, class H>
+inline void
+indexToCoords(P* const p, // [out] point
+ const size_t m, // [in ] precision of each dimension in bits
+ const size_t n, // [in ] number of dimensions
+ const H& h // [out] Hilbert index
+)
+{
+ if (n <= FBV_BITS) {
+ // Intermediate variables will fit in fixed width
+ _indexToCoords<P, H, CFixBitVec>(p, m, n, h, CFixBitVec{});
+ } else {
+ // Otherwise, they must be BigBitVecs
+ _indexToCoords<P, H, CBigBitVec>(p, m, n, h, CBigBitVec(n));
+ }
+}
+
+template <class P, class HC, class I>
+inline void
+_coordsToCompactIndex(const P* const p,
+ const size_t* const ms,
+ const size_t n,
+ HC& hc,
+ I&& scratch,
+ size_t M = 0,
+ size_t m = 0)
+{
+ // Get total precision and max precision if not supplied
+ if (M == 0 || m == 0) {
+ M = m = 0;
+ for (size_t i = 0; i < n; i++) {
+ if (ms[i] > m) {
+ m = ms[i];
+ }
+ M += ms[i];
+ }
+ }
+
+ const size_t mn = m * n;
+
+ // If we could avoid allocation altogether (ie: have a
+ // fixed buffer allocated on the stack) then this increases
+ // speed by a bit (4% when n=4, m=20)
+ size_t* const ds = new size_t[m];
+
+ if (mn > FBV_BITS) {
+ CBigBitVec h(mn);
+ _coordsToIndex<P, CBigBitVec, I>(p, m, n, h, std::move(scratch), ds);
+ compactIndex<CBigBitVec, HC>(ms, ds, n, m, h, hc);
+ } else {
+ CFixBitVec h;
+ _coordsToIndex<P, CFixBitVec, I>(p, m, n, h, std::move(scratch), ds);
+ compactIndex<CFixBitVec, HC>(ms, ds, n, m, h, hc);
+ }
+
+ delete[] ds;
+}
+
+// This is wrapper to the basic Hilbert curve index
+// calculation function. It will support fixed or
+// arbitrary precision, templated. Depending on the
+// number of dimensions, it will use the most efficient
+// representation for interim variables.
+// Assumes h is big enough for the output (n*m bits!)
+template <class P, class HC>
+inline void
+coordsToCompactIndex(
+ const P* const p, // [in ] point
+ const size_t* const ms, // [in ] precision of each dimension in bits
+ size_t n, // [in ] number of dimensions
+ HC& hc, // [out] Hilbert index
+ const size_t M,
+ const size_t m)
+{
+ if (n <= FBV_BITS) {
+ // Intermediate variables will fit in fixed width?
+ _coordsToCompactIndex<P, HC, CFixBitVec>(
+ p, ms, n, hc, CFixBitVec{}, M, m);
+ } else {
+ // Otherwise, they must be BigBitVecs.
+ _coordsToCompactIndex<P, HC, CBigBitVec>(
+ p, ms, n, hc, CBigBitVec(n), M, m);
+ }
+}
+
+template <class P, class HC, class I>
+inline void
+_compactIndexToCoords(P* const p,
+ const size_t* ms,
+ const size_t n,
+ const HC& hc,
+ I&& scratch,
+ size_t M,
+ size_t m)
+{
+ I e{std::move(scratch)};
+ I l{e};
+ I t{e};
+ I w{e};
+ I r{e};
+ I mask{e};
+ I ptrn{e};
+
+ // Get total precision and max precision
+ // if not supplied
+ if (M == 0 || m == 0) {
+ M = m = 0;
+ for (size_t i = 0; i < n; i++) {
+ if (ms[i] > m) {
+ m = ms[i];
+ }
+ M += ms[i];
+ }
+ }
+
+ // Initialize
+ e.reset();
+ l.reset();
+ for (size_t j = 0; j < n; j++) {
+ p[j] = 0;
+ }
+
+ // Work from MSB to LSB
+ size_t d = D0;
+
+ for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) {
+ // Get the mask and ptrn
+ size_t b = 0;
+ extractMask<I>(ms, n, d, static_cast<size_t>(i), mask, b);
+ ptrn = e;
+ ptrn.rotr(d, n); //#D ptrn.Rotr(d+1,n);
+
+ // Get the Hilbert index bits
+ M -= b;
+ r.reset(); // GetBits doesn't do this
+ getBits<HC, I>(hc, b, M, r);
+
+ // w = GrayCodeRankInv(r)
+ // t = GrayCode(w)
+ grayCodeRankInv<I>(mask, ptrn, r, n, b, t, w);
+
+ // Reverse the transform
+ // l = T^{-1}_{(e,d)}(t)
+ l = t;
+ transformInv<I>(e, d, n, l);
+
+ // Distribute these bits
+ // to the coordinates.
+ setLocation<P, I>(p, n, static_cast<size_t>(i), l);
+
+ // Update the entry point and direction.
+ update1<I>(l, t, w, n, e, d);
+ }
+}
+
+// This is wrapper to the basic Hilbert curve inverse
+// index function. It will support fixed or
+// arbitrary precision, templated. Depending on the
+// number of dimensions, it will use the most efficient
+// representation for interim variables.
+// Assumes each entry of p is big enough to hold the
+// appropriate variable.
+template <class P, class HC>
+inline void
+compactIndexToCoords(
+ P* const p, // [out] point
+ const size_t* ms, // [in ] precision of each dimension in bits
+ const size_t n, // [in ] number of dimensions
+ const HC& hc, // [out] Hilbert index
+ const size_t M,
+ const size_t m)
+{
+ if (n <= FBV_BITS) {
+ // Intermediate variables will fit in fixed width
+ CFixBitVec scratch;
+ _compactIndexToCoords<P, HC, CFixBitVec>(
+ p, ms, n, hc, CFixBitVec{}, M, m);
+ } else {
+ // Otherwise, they must be BigBitVecs
+ CBigBitVec scratch(n);
+ _compactIndexToCoords<P, HC, CBigBitVec>(
+ p, ms, n, hc, std::move(scratch), M, m);
+ }
+}
+
+} // namespace chilbert
+
+#endif