/* Copyright (C) 2018 David Robillard Copyright (C) 2006-2007 Chris Hamilton This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #ifndef CHILBERT_ALGORITHM_HPP #define CHILBERT_ALGORITHM_HPP #include "chilbert/BoundedBitVec.hpp" #include "chilbert/DynamicBitVec.hpp" #include "chilbert/SmallBitVec.hpp" #include "chilbert/StaticBitVec.hpp" #include "chilbert/chilbert.hpp" #include "chilbert/detail/gray_code_rank.hpp" #include #include #include #include namespace chilbert { namespace detail { /** The dimension across which the Hilbert curve travels principally. * * D0 is d + 1, where d is the actual dimension you want to walk across. * MUST HAVE 0 <= D0 < n. */ static constexpr size_t D0 = 1; template size_t num_bits(const T&, std::enable_if_t::value>* = nullptr) { return sizeof(T) * CHAR_BIT; } template size_t num_bits(const T& vec, std::enable_if_t::value || std::is_same::value>* = nullptr) { return vec.size(); } template size_t num_bits(const StaticBitVec&, void* = nullptr) { return N; } template size_t num_bits(const BoundedBitVec& vec, void* = nullptr) { return vec.size(); } /** Copy a range of bits from one field to the start of another. * * @param h Source bit field * @param n Number of bits * @param i Start bit index in source * @param[out] w Destination bit field */ template inline void get_bits(const H& h, const size_t n, const size_t i, I& w) { for (size_t j = 0; j < n; ++j) { set_bit(w, j, test_bit(h, i + j)); } } /** Set a range of bits in one field from the start of another. * * @param[out] h Destination bit field * @param n Number of bits * @param i Start bit index in destination * @param w Source bit field */ template inline void set_bits(H& h, const size_t n, const size_t i, const I& w) { for (size_t j = 0; j < n; ++j) { set_bit(h, i + j, test_bit(w, j)); } } /** Set the leading bits in `l` to bit `i` from the corresponding value in `p`. * * @param p Point. * @param n Number of bits to set. * @param i Bit position to copy from values in `p`. * @param[out] l Output bits. */ template inline void get_location(const P* const p, const size_t n, const size_t i, I& l) { for (size_t j = 0; j < n; ++j) { set_bit(l, j, test_bit(p[j], i)); } } /** Set bit `i` in values in `p` to the corresponding leadings bits in `l`. * * @param[out] p Point. * @param n Number of bits to set. * @param i Bit position to set in values in `p`. * @param l Input bits. */ template inline void set_location(P* const p, const size_t n, const size_t i, const I& l) { for (size_t j = 0; j < n; ++j) { set_bit(p[j], i, test_bit(l, j)); } } // 'Transforms' a point. template inline void transform(const I& e, const size_t d, const size_t n, I& a) { assert(a.size() == n); a ^= e; a.rotr(d); } // Inverse 'transforms' a point. template inline void transform_inv(const I& e, const size_t d, const size_t n, I& a) { assert(a.size() == n); a.rotl(d); a ^= e; } // Update for method 1 (GrayCodeInv in the loop) template inline void update1(const I& l, const I& t, const I& w, const size_t n, I& e, size_t& d) { assert(0 <= d && d < n); e = l; e.flip(d); //#D d == n-1 ? 0 : d+1 ); // Update direction d += 1 + t.find_first(); if (d >= n) { d -= n; } if (d >= n) { d -= n; } assert(0 <= d && d < n); if (!w.test(0)) { e.flip(d == 0 ? n - 1 : d - 1); //#D d ); } } // Update for method 2 (GrayCodeInv out of loop) template inline void update2(const I& l, const I& t, const size_t n, I& e, size_t& d) { assert(0 <= d && d < n); e = l; e.flip(d); //#D d == n-1 ? 0 : d+1 ); // Update direction d += 1 + t.find_first(); if (d >= n) { d -= n; } if (d >= n) { d -= n; } assert(0 <= d && d < n); } template inline void coords_to_index(const P* const p, const size_t m, const size_t n, H& h, I&& scratch, size_t* const ds = nullptr) { I e{std::move(scratch)}; I l{e}; I t{e}; I w{e}; // Initialize e.reset(); l.reset(); reset_bits(h); // Work from MSB to LSB size_t d = D0; size_t ho = m * n; for (intptr_t i = static_cast(m - 1); i >= 0; --i) { if (ds) { ds[i] = d; } // Get corner of sub-hypercube where point lies. get_location(p, n, static_cast(i), l); // Mirror and reflect the location. // t = T_{(e,d)}(l) t = l; transform(e, d, n, t); w = t; if (static_cast(i) < m - 1) { w.flip(n - 1); } // Concatenate to the index. ho -= n; set_bits(h, n, ho, w); // Update the entry point and direction. update2(l, t, n, e, d); } gray_code_inv(h); } template inline void index_to_coords(P* p, const size_t m, const size_t n, const H& h, I&& scratch) { I e{std::move(scratch)}; I l{e}; I t{e}; I w{e}; // Initialize e.reset(); l.reset(); for (size_t j = 0; j < n; ++j) { reset_bits(p[j]); } // Work from MSB to LSB size_t d = D0; size_t ho = m * n; for (intptr_t i = static_cast(m - 1); i >= 0; --i) { // Get the Hilbert index bits ho -= n; get_bits(h, n, ho, w); // t = GrayCode(w) t = w; gray_code(t); // Reverse the transform // l = T^{-1}_{(e,d)}(t) l = t; transform_inv(e, d, n, l); // Distribute these bits // to the coordinates. set_location(p, n, static_cast(i), l); // Update the entry point and direction. update1(l, t, w, n, e, d); } } template inline void coords_to_compact_index(const P* const p, const size_t* const ms, const size_t n, HC& hc, I&& scratch, size_t M = 0, size_t m = 0) { // Get total precision and max precision if not supplied if (M == 0 || m == 0) { M = m = 0; for (size_t i = 0; i < n; ++i) { assert(num_bits(p[i]) >= ms[i]); if (ms[i] > m) { m = ms[i]; } M += ms[i]; } } const size_t mn = m * n; assert(num_bits(hc) >= M); // If we could avoid allocation altogether (ie: have a // fixed buffer allocated on the stack) then this increases // speed by a bit (4% when n=4, m=20) size_t* const ds = new size_t[m]; if (mn > SmallBitVec::bits_per_rack) { DynamicBitVec h(mn); detail::coords_to_index( p, m, n, h, std::move(scratch), ds); compact_index(ms, ds, n, m, h, hc); } else { SmallBitVec h(mn); detail::coords_to_index( p, m, n, h, std::move(scratch), ds); compact_index(ms, ds, n, m, h, hc); } delete[] ds; } template inline void compact_index_to_coords(P* const p, const size_t* ms, const size_t n, const HC& hc, I&& scratch, size_t M, size_t m) { I e{std::move(scratch)}; I l{e}; I t{e}; I w{e}; I r{e}; I mask{e}; I ptrn{e}; // Get total precision and max precision // if not supplied if (M == 0 || m == 0) { M = m = 0; for (size_t i = 0; i < n; ++i) { if (ms[i] > m) { m = ms[i]; } M += ms[i]; } } assert(num_bits(hc) >= M); assert(num_bits(p[0]) >= m); // Initialize e.reset(); l.reset(); for (size_t j = 0; j < n; ++j) { reset_bits(p[j]); } // Work from MSB to LSB size_t d = D0; for (intptr_t i = static_cast(m - 1); i >= 0; --i) { // Get the mask and ptrn size_t b = 0; extract_mask(ms, n, d, static_cast(i), mask, b); ptrn = e; assert(ptrn.size() == n); ptrn.rotr(d); // Get the Hilbert index bits M -= b; r.reset(); // GetBits doesn't do this get_bits(hc, b, M, r); // w = GrayCodeRankInv(r) // t = GrayCode(w) gray_code_rank_inv(mask, ptrn, r, n, b, t, w); // Reverse the transform // l = T^{-1}_{(e,d)}(t) l = t; transform_inv(e, d, n, l); // Distribute these bits // to the coordinates. set_location(p, n, static_cast(i), l); // Update the entry point and direction. update1(l, t, w, n, e, d); } } } // namespace detail template inline void coords_to_index(const P* const p, const size_t m, const size_t n, H& h) { assert(detail::num_bits(h) >= n * m); assert(detail::num_bits(p[0]) >= m); if (n <= SmallBitVec::bits_per_rack) { // Intermediate variables will fit in fixed width detail::coords_to_index(p, m, n, h, SmallBitVec(n)); } else { // Otherwise, they must be DynamicBitVecs detail::coords_to_index( p, m, n, h, DynamicBitVec(n)); } } template inline void index_to_coords(P* const p, const size_t m, const size_t n, const H& h) { assert(m > 0); assert(n > 0); assert(detail::num_bits(h) >= n * m); assert(detail::num_bits(p[0]) >= m); if (n <= SmallBitVec::bits_per_rack) { // Intermediate variables will fit in fixed width detail::index_to_coords(p, m, n, h, SmallBitVec(n)); } else { // Otherwise, they must be DynamicBitVecs detail::index_to_coords( p, m, n, h, DynamicBitVec(n)); } } template inline void coords_to_compact_index(const P* const p, const size_t* const ms, size_t n, HC& hc, const size_t M, const size_t m) { assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0))); if (n <= SmallBitVec::bits_per_rack) { // Intermediate variables will fit in fixed width detail::coords_to_compact_index( p, ms, n, hc, SmallBitVec(n), M, m); } else { // Otherwise, they must be DynamicBitVecs detail::coords_to_compact_index( p, ms, n, hc, DynamicBitVec(n), M, m); } } template inline void compact_index_to_coords(P* const p, const size_t* ms, const size_t n, const HC& hc, const size_t M, const size_t m) { assert(hc.size() >= std::accumulate(ms, ms + n, size_t(0))); if (n <= SmallBitVec::bits_per_rack) { // Intermediate variables will fit in fixed width SmallBitVec scratch(n); detail::compact_index_to_coords( p, ms, n, hc, std::move(scratch), M, m); } else { // Otherwise, they must be DynamicBitVecs DynamicBitVec scratch(n); detail::compact_index_to_coords( p, ms, n, hc, std::move(scratch), M, m); } } } // namespace chilbert #endif