diff options
Diffstat (limited to 'chilbert/chilbert.ipp')
-rw-r--r-- | chilbert/chilbert.ipp | 501 |
1 files changed, 501 insertions, 0 deletions
diff --git a/chilbert/chilbert.ipp b/chilbert/chilbert.ipp new file mode 100644 index 0000000..c4e8249 --- /dev/null +++ b/chilbert/chilbert.ipp @@ -0,0 +1,501 @@ +/* + Copyright (C) 2018 David Robillard <d@drobilla.net> + Copyright (C) 2006-2007 Chris Hamilton <chamilton@cs.dal.ca> + + This program is free software: you can redistribute it and/or modify it under + the terms of the GNU General Public License as published by the Free Software + Foundation, either version 2 of the License, or (at your option) any later + version. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + details. + + You should have received a copy of the GNU General Public License along with + this program. If not, see <https://www.gnu.org/licenses/>. +*/ + +#ifndef CHILBERT_ALGORITHM_HPP +#define CHILBERT_ALGORITHM_HPP + +#include "chilbert/BoundedBitVec.hpp" +#include "chilbert/DynamicBitVec.hpp" +#include "chilbert/SmallBitVec.hpp" +#include "chilbert/StaticBitVec.hpp" +#include "chilbert/chilbert.hpp" +#include "chilbert/detail/gray_code_rank.hpp" + +#include <cassert> +#include <climits> +#include <numeric> +#include <type_traits> + +namespace chilbert { +namespace detail { + +/** The dimension across which the Hilbert curve travels principally. + * + * D0 is d + 1, where d is the actual dimension you want to walk across. + * MUST HAVE 0 <= D0 < n. + */ +static constexpr size_t D0 = 1; + +template <class T> +size_t +num_bits(const T&, std::enable_if_t<std::is_integral<T>::value>* = nullptr) +{ + return sizeof(T) * CHAR_BIT; +} + +template <class T> +size_t +num_bits(const T& vec, + std::enable_if_t<std::is_same<T, SmallBitVec>::value || + std::is_same<T, DynamicBitVec>::value>* = nullptr) +{ + return vec.size(); +} + +template <size_t N> +size_t +num_bits(const StaticBitVec<N>&, void* = nullptr) +{ + return N; +} + +template <size_t MaxN> +size_t +num_bits(const BoundedBitVec<MaxN>& vec, void* = nullptr) +{ + return vec.size(); +} + +/** Copy a range of bits from one field to the start of another. + * + * @param h Source bit field + * @param n Number of bits + * @param i Start bit index in source + * @param[out] w Destination bit field + */ +template <class H, class I> +inline void +get_bits(const H& h, const size_t n, const size_t i, I& w) +{ + for (size_t j = 0; j < n; j++) { + set_bit(w, j, test_bit(h, i + j)); + } +} + +/** Set a range of bits in one field from the start of another. + * + * @param[out] h Destination bit field + * @param n Number of bits + * @param i Start bit index in destination + * @param w Source bit field + */ +template <class H, class I> +inline void +set_bits(H& h, const size_t n, const size_t i, const I& w) +{ + for (size_t j = 0; j < n; j++) { + set_bit(h, i + j, test_bit(w, j)); + } +} + +/** Set the leading bits in `l` to bit `i` from the corresponding value in `p`. + * + * @param p Point. + * @param n Number of bits to set. + * @param i Bit position to copy from values in `p`. + * @param[out] l Output bits. + */ +template <class P, class I> +inline void +get_location(const P* const p, const size_t n, const size_t i, I& l) +{ + for (size_t j = 0; j < n; ++j) { + set_bit(l, j, test_bit(p[j], i)); + } +} + +/** Set bit `i` in values in `p` to the corresponding leadings bits in `l`. + * + * @param[out] p Point. + * @param n Number of bits to set. + * @param i Bit position to set in values in `p`. + * @param l Input bits. + */ +template <class P, class I> +inline void +set_location(P* const p, const size_t n, const size_t i, const I& l) +{ + for (size_t j = 0; j < n; j++) { + set_bit(p[j], i, test_bit(l, j)); + } +} + +// 'Transforms' a point. +template <class I> +inline void +transform(const I& e, const size_t d, const size_t n, I& a) +{ + assert(a.size() == n); + a ^= e; + a.rotr(d); +} + +// Inverse 'transforms' a point. +template <class I> +inline void +transform_inv(const I& e, const size_t d, const size_t n, I& a) +{ + assert(a.size() == n); + a.rotl(d); + a ^= e; +} + +// Update for method 1 (GrayCodeInv in the loop) +template <class I> +inline void +update1(const I& l, const I& t, const I& w, const size_t n, I& e, size_t& d) +{ + assert(0 <= d && d < n); + e = l; + e.flip(d); //#D d == n-1 ? 0 : d+1 ); + + // Update direction + d += 1 + t.find_first(); + if (d >= n) { + d -= n; + } + if (d >= n) { + d -= n; + } + assert(0 <= d && d < n); + + if (!w.test(0)) { + e.flip(d == 0 ? n - 1 : d - 1); //#D d ); + } +} + +// Update for method 2 (GrayCodeInv out of loop) +template <class I> +inline void +update2(const I& l, const I& t, const size_t n, I& e, size_t& d) +{ + assert(0 <= d && d < n); + e = l; + e.flip(d); //#D d == n-1 ? 0 : d+1 ); + + // Update direction + d += 1 + t.find_first(); + if (d >= n) { + d -= n; + } + if (d >= n) { + d -= n; + } + assert(0 <= d && d < n); +} + +template <class P, class H, class I> +inline void +coords_to_index(const P* const p, + const size_t m, + const size_t n, + H& h, + I&& scratch, + size_t* const ds = nullptr) +{ + I e{std::move(scratch)}; + I l{e}; + I t{e}; + I w{e}; + + // Initialize + e.reset(); + l.reset(); + reset_bits(h); + + // Work from MSB to LSB + size_t d = D0; + size_t ho = m * n; + for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) { + if (ds) { + ds[i] = d; + } + + // Get corner of sub-hypercube where point lies. + get_location<P, I>(p, n, static_cast<size_t>(i), l); + + // Mirror and reflect the location. + // t = T_{(e,d)}(l) + t = l; + transform<I>(e, d, n, t); + + w = t; + if (static_cast<size_t>(i) < m - 1) { + w.flip(n - 1); + } + + // Concatenate to the index. + ho -= n; + set_bits<H, I>(h, n, ho, w); + + // Update the entry point and direction. + update2<I>(l, t, n, e, d); + } + + gray_code_inv(h); +} + +template <class P, class H, class I> +inline void +index_to_coords(P* p, const size_t m, const size_t n, const H& h, I&& scratch) +{ + I e{std::move(scratch)}; + I l{e}; + I t{e}; + I w{e}; + + // Initialize + e.reset(); + l.reset(); + for (size_t j = 0; j < n; j++) { + reset_bits(p[j]); + } + + // Work from MSB to LSB + size_t d = D0; + size_t ho = m * n; + for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) { + // Get the Hilbert index bits + ho -= n; + get_bits<H, I>(h, n, ho, w); + + // t = GrayCode(w) + t = w; + gray_code(t); + + // Reverse the transform + // l = T^{-1}_{(e,d)}(t) + l = t; + transform_inv<I>(e, d, n, l); + + // Distribute these bits + // to the coordinates. + set_location<P, I>(p, n, static_cast<size_t>(i), l); + + // Update the entry point and direction. + update1<I>(l, t, w, n, e, d); + } +} + +template <class P, class HC, class I> +inline void +coords_to_compact_index(const P* const p, + const size_t* const ms, + const size_t n, + HC& hc, + I&& scratch, + size_t M = 0, + size_t m = 0) +{ + // Get total precision and max precision if not supplied + if (M == 0 || m == 0) { + M = m = 0; + for (size_t i = 0; i < n; i++) { + assert(num_bits(p[i]) >= ms[i]); + if (ms[i] > m) { + m = ms[i]; + } + M += ms[i]; + } + } + + const size_t mn = m * n; + + assert(num_bits(hc) >= M); + + // If we could avoid allocation altogether (ie: have a + // fixed buffer allocated on the stack) then this increases + // speed by a bit (4% when n=4, m=20) + size_t* const ds = new size_t[m]; + + if (mn > SmallBitVec::bits_per_rack) { + DynamicBitVec h(mn); + detail::coords_to_index<P, DynamicBitVec, I>( + p, m, n, h, std::move(scratch), ds); + compact_index<DynamicBitVec, HC>(ms, ds, n, m, h, hc); + } else { + SmallBitVec h(mn); + detail::coords_to_index<P, SmallBitVec, I>( + p, m, n, h, std::move(scratch), ds); + compact_index<SmallBitVec, HC>(ms, ds, n, m, h, hc); + } + + delete[] ds; +} + +template <class P, class HC, class I> +inline void +compact_index_to_coords(P* const p, + const size_t* ms, + const size_t n, + const HC& hc, + I&& scratch, + size_t M, + size_t m) +{ + I e{std::move(scratch)}; + I l{e}; + I t{e}; + I w{e}; + I r{e}; + I mask{e}; + I ptrn{e}; + + // Get total precision and max precision + // if not supplied + if (M == 0 || m == 0) { + M = m = 0; + for (size_t i = 0; i < n; i++) { + if (ms[i] > m) { + m = ms[i]; + } + M += ms[i]; + } + } + + assert(num_bits(hc) >= M); + assert(num_bits(p[0]) >= m); + + // Initialize + e.reset(); + l.reset(); + for (size_t j = 0; j < n; j++) { + reset_bits(p[j]); + } + + // Work from MSB to LSB + size_t d = D0; + + for (intptr_t i = static_cast<intptr_t>(m - 1); i >= 0; i--) { + // Get the mask and ptrn + size_t b = 0; + extract_mask<I>(ms, n, d, static_cast<size_t>(i), mask, b); + ptrn = e; + assert(ptrn.size() == n); + ptrn.rotr(d); + + // Get the Hilbert index bits + M -= b; + r.reset(); // GetBits doesn't do this + get_bits<HC, I>(hc, b, M, r); + + // w = GrayCodeRankInv(r) + // t = GrayCode(w) + gray_code_rank_inv<I>(mask, ptrn, r, n, b, t, w); + + // Reverse the transform + // l = T^{-1}_{(e,d)}(t) + l = t; + transform_inv<I>(e, d, n, l); + + // Distribute these bits + // to the coordinates. + set_location<P, I>(p, n, static_cast<size_t>(i), l); + + // Update the entry point and direction. + update1<I>(l, t, w, n, e, d); + } +} + +} // namespace detail + +template <class P, class H> +inline void +coords_to_index(const P* const p, const size_t m, const size_t n, H& h) +{ + assert(detail::num_bits(h) >= n * m); + assert(detail::num_bits(p[0]) >= m); + + if (n <= SmallBitVec::bits_per_rack) { + // Intermediate variables will fit in fixed width + detail::coords_to_index<P, H, SmallBitVec>(p, m, n, h, SmallBitVec(n)); + } else { + // Otherwise, they must be DynamicBitVecs + detail::coords_to_index<P, H, DynamicBitVec>( + p, m, n, h, DynamicBitVec(n)); + } +} + +template <class P, class H> +inline void +index_to_coords(P* const p, const size_t m, const size_t n, const H& h) +{ + assert(m > 0); + assert(n > 0); + assert(detail::num_bits(h) >= n * m); + assert(detail::num_bits(p[0]) >= m); + + if (n <= SmallBitVec::bits_per_rack) { + // Intermediate variables will fit in fixed width + detail::index_to_coords<P, H, SmallBitVec>(p, m, n, h, SmallBitVec(n)); + } else { + // Otherwise, they must be DynamicBitVecs + detail::index_to_coords<P, H, DynamicBitVec>( + p, m, n, h, DynamicBitVec(n)); + } +} + +template <class P, class HC> +inline void +coords_to_compact_index(const P* const p, + const size_t* const ms, + size_t n, + HC& hc, + const size_t M, + const size_t m) +{ + assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0))); + + if (n <= SmallBitVec::bits_per_rack) { + // Intermediate variables will fit in fixed width + detail::coords_to_compact_index<P, HC, SmallBitVec>( + p, ms, n, hc, SmallBitVec(n), M, m); + } else { + // Otherwise, they must be DynamicBitVecs + detail::coords_to_compact_index<P, HC, DynamicBitVec>( + p, ms, n, hc, DynamicBitVec(n), M, m); + } +} + +template <class P, class HC> +inline void +compact_index_to_coords(P* const p, + const size_t* ms, + const size_t n, + const HC& hc, + const size_t M, + const size_t m) +{ + assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0))); + + if (n <= SmallBitVec::bits_per_rack) { + // Intermediate variables will fit in fixed width + SmallBitVec scratch(n); + detail::compact_index_to_coords<P, HC, SmallBitVec>( + p, ms, n, hc, std::move(scratch), M, m); + } else { + // Otherwise, they must be DynamicBitVecs + DynamicBitVec scratch(n); + detail::compact_index_to_coords<P, HC, DynamicBitVec>( + p, ms, n, hc, std::move(scratch), M, m); + } +} + +} // namespace chilbert + +#endif |