John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2019 The Khronos Group Inc. |
| 2 | * Copyright (c) 2019 Valve Corporation |
| 3 | * Copyright (c) 2019 LunarG, Inc. |
| 4 | * Copyright (C) 2019 Google Inc. |
| 5 | * |
| 6 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | * you may not use this file except in compliance with the License. |
| 8 | * You may obtain a copy of the License at |
| 9 | * |
| 10 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | * |
| 12 | * Unless required by applicable law or agreed to in writing, software |
| 13 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | * See the License for the specific language governing permissions and |
| 16 | * limitations under the License. |
| 17 | * |
| 18 | * John Zulauf <jzulauf@lunarg.com> |
| 19 | * |
| 20 | */ |
| 21 | #pragma once |
| 22 | |
| 23 | #ifndef RANGE_VECTOR_H_ |
| 24 | #define RANGE_VECTOR_H_ |
| 25 | |
| 26 | #include <algorithm> |
| 27 | #include <cassert> |
| 28 | #include <map> |
| 29 | #include <utility> |
| 30 | |
| 31 | #define RANGE_ASSERT(b) assert(b) |
| 32 | |
| 33 | namespace sparse_container { |
| 34 | // range_map |
| 35 | // |
| 36 | // Implements an ordered map of non-overlapping, non-empty ranges |
| 37 | // |
| 38 | template <typename Index> |
| 39 | struct range { |
| 40 | using index_type = Index; |
| 41 | index_type begin; // Inclusive lower bound of range |
| 42 | index_type end; // Exlcusive upper bound of range |
| 43 | |
| 44 | inline bool empty() const { return begin == end; } |
| 45 | inline bool valid() const { return begin <= end; } |
| 46 | inline bool invalid() const { return !valid(); } |
| 47 | inline bool non_empty() const { return begin < end; } // valid and !empty |
| 48 | |
| 49 | inline bool is_prior_to(const range &other) const { return end == other.begin; } |
| 50 | inline bool is_subsequent_to(const range &other) const { return begin == other.end; } |
| 51 | inline bool includes(const index_type &index) const { return (begin <= index) && (index < end); } |
| 52 | inline bool includes(const range &other) const { return (begin <= other.begin) && (other.end <= end); } |
| 53 | inline bool excludes(const index_type &index) const { return (index < begin) || (end <= index); } |
| 54 | inline bool excludes(const range &other) const { return (other.end <= begin) || (end <= other.begin); } |
| 55 | inline bool intersects(const range &other) const { return includes(other.begin) || other.includes(begin); } |
| 56 | inline index_type distance() const { return end - begin; } |
| 57 | |
| 58 | inline bool operator==(const range &rhs) const { return (begin == rhs.begin) && (end == rhs.end); } |
| 59 | inline bool operator!=(const range &rhs) const { return (begin != rhs.begin) || (end != rhs.end); } |
| 60 | |
| 61 | inline range &operator-=(const index_type &offset) { |
| 62 | begin = begin - offset; |
| 63 | end = end - offset; |
| 64 | return *this; |
| 65 | } |
| 66 | |
| 67 | inline range &operator+=(const index_type &offset) { |
| 68 | begin = begin + offset; |
| 69 | end = end + offset; |
| 70 | return *this; |
| 71 | } |
| 72 | |
| 73 | // for a reversible/transitive < operator compare first on begin and then end |
| 74 | // only less or begin is less or if end is less when begin is equal |
| 75 | bool operator<(const range &rhs) const { |
| 76 | bool result = false; |
| 77 | if (invalid()) { |
| 78 | // all invalid < valid, allows map/set validity check by looking at begin()->first |
| 79 | // all invalid are equal, thus only equal if this is invalid and rhs is valid |
| 80 | result = rhs.valid(); |
| 81 | } else if (begin < rhs.begin) { |
| 82 | result = true; |
| 83 | } else if ((begin == rhs.begin) && (end < rhs.end)) { |
| 84 | result = true; // Simple common case -- boundary case require equality check for correctness. |
| 85 | } |
| 86 | return result; |
| 87 | } |
| 88 | |
| 89 | // use as "strictly less/greater than" to check for non-overlapping ranges |
| 90 | bool strictly_less(const range &rhs) const { return end <= rhs.begin; } |
| 91 | bool strictly_less(const index_type &index) const { return end <= index; } |
| 92 | bool strictly_greater(const range &rhs) const { return rhs.end <= begin; } |
| 93 | bool strictly_greater(const index_type &index) const { return index < begin; } |
| 94 | |
| 95 | range &operator=(const range &rhs) { |
| 96 | begin = rhs.begin; |
| 97 | end = rhs.end; |
| 98 | return *this; |
| 99 | } |
| 100 | |
| 101 | range operator&(const range &rhs) const { |
| 102 | if (includes(rhs.begin)) { |
| 103 | return range(rhs.begin, std::min(end, rhs.end)); |
| 104 | } else if (rhs.includes(begin)) { |
| 105 | return range(begin, std::min(end, rhs.end)); |
| 106 | } |
| 107 | return range(); // Empty default range on non-intersection |
| 108 | } |
| 109 | |
| 110 | range() : begin(), end() {} |
| 111 | range(const index_type &begin_, const index_type &end_) : begin(begin_), end(end_) {} |
| 112 | }; |
| 113 | |
| 114 | template <typename Container> |
| 115 | using const_correct_iterator = decltype(std::declval<Container>().begin()); |
| 116 | |
| 117 | template <typename Key, typename T> |
| 118 | class range_map { |
| 119 | public: |
| 120 | protected: |
| 121 | using RangeKey = range<Key>; |
| 122 | using MapKey = RangeKey; |
| 123 | using ImplMap = std::map<MapKey, T>; |
| 124 | ImplMap impl_map_; |
| 125 | using ImplIterator = typename ImplMap::iterator; |
| 126 | using ImplConstIterator = typename ImplMap::const_iterator; |
| 127 | |
| 128 | public: |
| 129 | using mapped_type = typename ImplMap::mapped_type; |
| 130 | using value_type = typename ImplMap::value_type; |
| 131 | using key_type = typename ImplMap::key_type; |
| 132 | using index_type = typename key_type::index_type; |
| 133 | |
| 134 | struct split_op_keep_both { |
| 135 | static constexpr bool keep_lower() { return true; } |
| 136 | static constexpr bool keep_upper() { return true; } |
| 137 | }; |
| 138 | |
| 139 | struct split_op_keep_lower { |
| 140 | static constexpr bool keep_lower() { return true; } |
| 141 | static constexpr bool keep_upper() { return false; } |
| 142 | }; |
| 143 | |
| 144 | struct split_op_keep_upper { |
| 145 | static constexpr bool keep_lower() { return false; } |
| 146 | static constexpr bool keep_upper() { return true; } |
| 147 | }; |
| 148 | |
| 149 | protected: |
| 150 | template <typename ThisType> |
| 151 | using ConstCorrectImplIterator = decltype(std::declval<ThisType>().impl_begin()); |
| 152 | |
| 153 | template <typename ThisType, typename WrappedIterator = ConstCorrectImplIterator<ThisType>> |
| 154 | static WrappedIterator lower_bound_impl(ThisType &that, const key_type &key) { |
| 155 | if (key.valid()) { |
| 156 | // ImplMap doesn't give us what want with a direct query, it will give us the first entry contained (if any) in key, |
| 157 | // not the first entry intersecting key, so, first look for the the first entry that starts at or after key.begin |
| 158 | // with the operator > in range, we can safely use an empty range for comparison |
| 159 | auto lower = that.impl_map_.lower_bound(key_type(key.begin, key.begin)); |
| 160 | |
| 161 | // If there is a preceding entry it's possible that begin is included, as all we know is that lower.begin >= key.begin |
| 162 | // or lower is at end |
| 163 | if (!that.at_impl_begin(lower)) { |
| 164 | auto prev = lower; |
| 165 | --prev; |
| 166 | // If the previous entry includes begin (and we know key.begin > prev.begin) then prev is actually lower |
| 167 | if (key.begin < prev->first.end) { |
| 168 | lower = prev; |
| 169 | } |
| 170 | } |
| 171 | return lower; |
| 172 | } |
| 173 | // Key is ill-formed |
| 174 | return that.impl_end(); // Point safely to nothing. |
| 175 | } |
| 176 | |
| 177 | ImplIterator lower_bound_impl(const key_type &key) { return lower_bound_impl(*this, key); } |
| 178 | |
| 179 | ImplConstIterator lower_bound_impl(const key_type &key) const { return lower_bound_impl(*this, key); } |
| 180 | |
| 181 | template <typename ThisType, typename WrappedIterator = ConstCorrectImplIterator<ThisType>> |
| 182 | static WrappedIterator upper_bound_impl(ThisType &that, const key_type &key) { |
| 183 | if (key.valid()) { |
| 184 | // the upper bound is the first range that is full greater (upper.begin >= key.end |
| 185 | // we can get close by looking for the first to exclude key.end, then adjust to account for the fact that key.end is |
| 186 | // exclusive and we thus ImplMap::upper_bound may be off by one here, i.e. the previous may be the upper bound |
| 187 | auto upper = that.impl_map_.upper_bound(key_type(key.end, key.end)); |
| 188 | if (!that.at_impl_end(upper) && (upper != that.impl_begin())) { |
| 189 | auto prev = upper; |
| 190 | --prev; |
| 191 | // We know key.end is >= prev.begin, the only question is whether it's == |
| 192 | if (prev->first.begin == key.end) { |
| 193 | upper = prev; |
| 194 | } |
| 195 | } |
| 196 | return upper; |
| 197 | } |
| 198 | return that.impl_end(); // Point safely to nothing. |
| 199 | } |
| 200 | |
| 201 | ImplIterator upper_bound_impl(const key_type &key) { return upper_bound_impl(*this, key); } |
| 202 | |
| 203 | ImplConstIterator upper_bound_impl(const key_type &key) const { return upper_bound_impl(*this, key); } |
| 204 | |
| 205 | ImplIterator impl_find(const key_type &key) { return impl_map_.find(key); } |
| 206 | ImplConstIterator impl_find(const key_type &key) const { return impl_map_.find(key); } |
| 207 | bool impl_not_found(const key_type &key) const { return impl_end() == impl_find(key); } |
| 208 | |
| 209 | ImplIterator impl_end() { return impl_map_.end(); } |
| 210 | ImplConstIterator impl_end() const { return impl_map_.end(); } |
| 211 | |
| 212 | ImplIterator impl_begin() { return impl_map_.begin(); } |
| 213 | ImplConstIterator impl_begin() const { return impl_map_.begin(); } |
| 214 | |
| 215 | inline bool at_impl_end(const ImplIterator &pos) { return pos == impl_end(); } |
| 216 | inline bool at_impl_end(const ImplConstIterator &pos) const { return pos == impl_end(); } |
| 217 | |
| 218 | inline bool at_impl_begin(const ImplIterator &pos) { return pos == impl_begin(); } |
| 219 | inline bool at_impl_begin(const ImplConstIterator &pos) const { return pos == impl_begin(); } |
| 220 | |
| 221 | ImplIterator impl_erase(const ImplIterator &pos) { return impl_map_.erase(pos); } |
| 222 | |
| 223 | template <typename Value> |
| 224 | ImplIterator impl_insert(const ImplIterator &hint, Value &&value) { |
| 225 | RANGE_ASSERT(impl_not_found(value.first)); |
| 226 | RANGE_ASSERT(value.first.non_empty()); |
| 227 | return impl_map_.emplace_hint(hint, std::forward<Value>(value)); |
| 228 | } |
| 229 | ImplIterator impl_insert(const ImplIterator &hint, const key_type &key, const mapped_type &value) { |
| 230 | return impl_insert(hint, std::make_pair(key, value)); |
| 231 | } |
| 232 | |
| 233 | ImplIterator impl_insert(const ImplIterator &hint, const index_type &begin, const index_type &end, const mapped_type &value) { |
| 234 | return impl_insert(hint, key_type(begin, end), value); |
| 235 | } |
| 236 | |
| 237 | template <typename SplitOp> |
| 238 | ImplIterator split_impl(const ImplIterator &split_it, const index_type &index, const SplitOp &) { |
| 239 | // Make sure contains the split point |
| 240 | if (!split_it->first.includes(index)) return split_it; // If we don't have a valid split point, just return the iterator |
| 241 | |
| 242 | const auto range = split_it->first; |
| 243 | key_type lower_range(range.begin, index); |
| 244 | if (lower_range.empty() && SplitOp::keep_upper()) { |
| 245 | return split_it; // this is a noop we're keeping the upper half which is the same as split_it; |
| 246 | } |
| 247 | // Save the contents of it and erase it |
| 248 | auto value = std::move(split_it->second); |
| 249 | auto next_it = impl_map_.erase(split_it); // Keep this, just in case the split point results in an empty "keep" set |
| 250 | |
| 251 | if (lower_range.empty() && !SplitOp::keep_upper()) { |
| 252 | // This effectively an erase... |
| 253 | return next_it; |
| 254 | } |
| 255 | // Upper range cannot be empty |
| 256 | key_type upper_range(index, range.end); |
| 257 | key_type move_range; |
| 258 | key_type copy_range; |
| 259 | |
| 260 | // Were either going to keep one or both of the split pieces. If we keep both, we'll copy value to the upper, |
| 261 | // and move to the lower, and return the lower, else move to, and return the kept one. |
| 262 | if (SplitOp::keep_lower() && !lower_range.empty()) { |
| 263 | move_range = lower_range; |
| 264 | if (SplitOp::keep_upper()) { |
| 265 | copy_range = upper_range; // only need a valid copy range if we keep both. |
| 266 | } |
| 267 | } else if (SplitOp::keep_upper()) { // We're not keeping the lower split because it's either empty or not wanted |
| 268 | move_range = upper_range; // this will be non_empty as index is included ( < end) in the original range) |
| 269 | } |
| 270 | |
| 271 | // we insert from upper to lower because that's what emplace_hint can do in constant time. (not log time in C++11) |
| 272 | if (!copy_range.empty()) { |
| 273 | // We have a second range to create, so do it by copy |
| 274 | RANGE_ASSERT(impl_map_.find(copy_range) == impl_map_.end()); |
| 275 | next_it = impl_map_.emplace_hint(next_it, std::make_pair(copy_range, value)); |
| 276 | } |
| 277 | |
| 278 | if (!move_range.empty()) { |
| 279 | // Whether we keep one or both, the one we return gets value moved to it, as the other one already has a copy |
| 280 | RANGE_ASSERT(impl_map_.find(move_range) == impl_map_.end()); |
| 281 | next_it = impl_map_.emplace_hint(next_it, std::make_pair(move_range, std::move(value))); |
| 282 | } |
| 283 | |
| 284 | // point to the beginning of the inserted elements (or the next from the erase |
| 285 | return next_it; |
| 286 | } |
| 287 | |
| 288 | // do an ranged insert that splits existing ranges at the boundaries, and writes value to any non-initialized sub-ranges |
| 289 | range<ImplIterator> infill_and_split(const key_type &bounds, const mapped_type &value, ImplIterator lower, bool split_bounds) { |
| 290 | auto pos = lower; |
| 291 | if (at_impl_end(pos)) return range<ImplIterator>(pos, pos); // defensive... |
| 292 | |
| 293 | // Logic assumes we are starting at lower bound |
| 294 | RANGE_ASSERT(lower == lower_bound_impl(bounds)); |
| 295 | |
| 296 | // Trim/infil the beginning if needed |
| 297 | const auto first_begin = pos->first.begin; |
| 298 | if (bounds.begin > first_begin && split_bounds) { |
| 299 | pos = split_impl(pos, bounds.begin, split_op_keep_both()); |
| 300 | lower = pos; |
| 301 | ++lower; |
| 302 | RANGE_ASSERT(lower == lower_bound_impl(bounds)); |
| 303 | } else if (bounds.begin < first_begin) { |
| 304 | pos = impl_insert(pos, bounds.begin, first_begin, value); |
| 305 | lower = pos; |
| 306 | RANGE_ASSERT(lower == lower_bound_impl(bounds)); |
| 307 | } |
| 308 | |
| 309 | // in the trim case pos starts one before lower_bound, but that allows trimming a single entry range in loop. |
| 310 | // NOTE that the loop is trimming and infilling at pos + 1 |
| 311 | while (!at_impl_end(pos) && pos->first.begin < bounds.end) { |
| 312 | auto last_end = pos->first.end; |
| 313 | // check for in-fill |
| 314 | ++pos; |
| 315 | if (at_impl_end(pos)) { |
| 316 | if (last_end < bounds.end) { |
| 317 | // Gap after last entry in impl_map and before end, |
| 318 | pos = impl_insert(pos, last_end, bounds.end, value); |
| 319 | ++pos; // advances to impl_end, as we're at upper boundary |
| 320 | RANGE_ASSERT(at_impl_end(pos)); |
| 321 | } |
| 322 | } else if (pos->first.begin != last_end) { |
| 323 | // we have a gap between last entry and current... fill, but not beyond bounds |
| 324 | if (bounds.includes(pos->first.begin)) { |
| 325 | pos = impl_insert(pos, last_end, pos->first.begin, value); |
| 326 | // don't further advance pos, because we may need to split the next entry and thus can't skip it. |
| 327 | } else if (last_end < bounds.end) { |
| 328 | // Non-zero length final gap in-bounds |
| 329 | pos = impl_insert(pos, last_end, bounds.end, value); |
| 330 | ++pos; // advances back to the out of bounds entry which we inserted just before |
| 331 | RANGE_ASSERT(!bounds.includes(pos->first.begin)); |
| 332 | } |
| 333 | } else if (pos->first.includes(bounds.end)) { |
| 334 | if (split_bounds) { |
| 335 | // extends past the end of the bounds range, snip to only include the bounded section |
| 336 | // NOTE: this splits pos, but the upper half of the split should now be considered upper_bound |
| 337 | // for the range |
| 338 | pos = split_impl(pos, bounds.end, split_op_keep_both()); |
| 339 | } |
| 340 | // advance to the upper haf of the split which will be upper_bound or to next which will both be out of bounds |
| 341 | ++pos; |
| 342 | RANGE_ASSERT(!bounds.includes(pos->first.begin)); |
| 343 | } |
| 344 | } |
| 345 | // Return the current position which should be the upper_bound for bounds |
| 346 | RANGE_ASSERT(pos == upper_bound_impl(bounds)); |
| 347 | return range<ImplIterator>(lower, pos); |
| 348 | } |
| 349 | |
| 350 | ImplIterator impl_erase_range(const key_type &bounds, ImplIterator lower) { |
| 351 | // Logic assumes we are starting at a valid lower bound |
| 352 | RANGE_ASSERT(!at_impl_end(lower)); |
| 353 | RANGE_ASSERT(lower == lower_bound_impl(bounds)); |
| 354 | |
| 355 | // Trim/infil the beginning if needed |
| 356 | auto current = lower; |
| 357 | const auto first_begin = current->first.begin; |
| 358 | if (bounds.begin > first_begin) { |
| 359 | // Preserve the portion of lower bound excluded from bounds |
| 360 | current = split_impl(current, bounds.begin, split_op_keep_lower()); |
| 361 | // Exclude the preserved portion |
| 362 | ++current; |
| 363 | RANGE_ASSERT(current == lower_bound_impl(bounds)); |
| 364 | } |
| 365 | |
| 366 | // Loop over completely contained entries and erase them |
| 367 | while (!at_impl_end(current) && (current->first.end <= bounds.end)) { |
| 368 | current = impl_erase(current); |
| 369 | } |
| 370 | |
| 371 | if (!at_impl_end(current) && current->first.includes(bounds.end)) { |
| 372 | // last entry extends past the end of the bounds range, snip to only erase the bounded section |
| 373 | current = split_impl(current, bounds.end, split_op_keep_upper()); |
| 374 | } |
| 375 | |
| 376 | RANGE_ASSERT(current == upper_bound_impl(bounds)); |
| 377 | return current; |
| 378 | } |
| 379 | |
| 380 | template <typename ValueType, typename WrappedIterator_> |
| 381 | struct iterator_impl { |
| 382 | public: |
| 383 | friend class range_map; |
| 384 | using WrappedIterator = WrappedIterator_; |
| 385 | |
| 386 | private: |
| 387 | WrappedIterator pos_; |
| 388 | |
| 389 | // Create an iterator at a specific internal state -- only from the parent container |
| 390 | iterator_impl(const WrappedIterator &pos) : pos_(pos) {} |
| 391 | |
| 392 | public: |
| 393 | iterator_impl() : iterator_impl(WrappedIterator()){}; |
| 394 | iterator_impl(const iterator_impl &other) : pos_(other.pos_){}; |
| 395 | |
| 396 | iterator_impl &operator=(const iterator_impl &rhs) { |
| 397 | pos_ = rhs.pos_; |
| 398 | return *this; |
| 399 | } |
| 400 | |
| 401 | inline bool operator==(const iterator_impl &rhs) const { return pos_ == rhs.pos_; } |
| 402 | |
| 403 | inline bool operator!=(const iterator_impl &rhs) const { return pos_ != rhs.pos_; } |
| 404 | |
| 405 | ValueType &operator*() const { return *pos_; } |
| 406 | ValueType *operator->() const { return &*pos_; } |
| 407 | |
| 408 | iterator_impl &operator++() { |
| 409 | ++pos_; |
| 410 | return *this; |
| 411 | } |
| 412 | |
| 413 | iterator_impl &operator--() { |
| 414 | --pos_; |
| 415 | return *this; |
| 416 | } |
| 417 | }; |
| 418 | |
| 419 | public: |
| 420 | using iterator = iterator_impl<value_type, ImplIterator>; |
| 421 | using const_iterator = iterator_impl<const value_type, ImplConstIterator>; |
| 422 | |
| 423 | protected: |
| 424 | inline bool at_end(const iterator &it) { return at_impl_end(it.pos_); } |
| 425 | inline bool at_end(const const_iterator &it) const { return at_impl_end(it.pos_); } |
| 426 | inline bool at_begin(const iterator &it) { return at_impl_begin(it.pos_); } |
| 427 | |
| 428 | template <typename That, typename Iterator> |
| 429 | static bool is_contiguous_impl(That *const that, const key_type &range, const Iterator &lower) { |
| 430 | // Search range or intersection is empty |
| 431 | if (lower == that->impl_end() || lower->first.excludes(range)) return false; |
| 432 | |
| 433 | if (lower->first.includes(range)) { |
| 434 | return true; // there is one entry that contains the whole key range |
| 435 | } |
| 436 | |
| 437 | bool contiguous = true; |
| 438 | for (auto pos = lower; contiguous && pos != that->impl_end() && range.includes(pos->first.begin); ++pos) { |
| 439 | // if current doesn't cover the rest of the key range, check to see that the next is extant and abuts |
| 440 | if (pos->first.end < range.end) { |
| 441 | auto next = pos; |
John Zulauf | f3eeba6 | 2019-11-22 15:09:07 -0700 | [diff] [blame^] | 442 | ++next; |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 443 | contiguous = (next != that->impl_end()) && pos->first.is_prior_to(next->first); |
| 444 | } |
| 445 | } |
| 446 | return contiguous; |
| 447 | } |
| 448 | |
| 449 | public: |
| 450 | iterator end() { return iterator(impl_map_.end()); } // policy and bounds don't matter for end |
| 451 | const_iterator end() const { return const_iterator(impl_map_.end()); } // policy and bounds don't matter for end |
| 452 | iterator begin() { return iterator(impl_map_.begin()); } // with default policy, and thus no bounds |
| 453 | const_iterator begin() const { return const_iterator(impl_map_.begin()); } // with default policy, and thus no bounds |
| 454 | const_iterator cbegin() const { return const_iterator(impl_map_.cbegin()); } // with default policy, and thus no bounds |
| 455 | const_iterator cend() const { return const_iterator(impl_map_.cend()); } // with default policy, and thus no bounds |
| 456 | |
| 457 | iterator erase(const iterator &pos) { |
| 458 | RANGE_ASSERT(!at_end(pos)); |
| 459 | return iterator(impl_erase(pos.pos_)); |
| 460 | } |
| 461 | |
| 462 | iterator erase(range<iterator> bounds) { |
| 463 | auto current = bounds.begin.pos_; |
| 464 | while (current != bounds.end.pos_) { |
| 465 | RANGE_ASSERT(!at_impl_end(current)); |
| 466 | current = impl_map_.erase(current); |
| 467 | } |
| 468 | RANGE_ASSERT(current == bounds.end.pos_); |
| 469 | return current; |
| 470 | } |
| 471 | |
| 472 | iterator erase(iterator first, iterator last) { return erase(range<iterator>(first, last)); } |
| 473 | |
| 474 | iterator erase_range(const key_type &bounds) { |
| 475 | auto lower = lower_bound_impl(bounds); |
| 476 | |
| 477 | if (at_impl_end(lower) || !bounds.intersects(lower->first)) { |
| 478 | // There is nothing in this range lower bound is above bound |
| 479 | return iterator(lower); |
| 480 | } |
| 481 | auto next = impl_erase_range(bounds, lower); |
| 482 | return iterator(next); |
| 483 | } |
| 484 | |
| 485 | void clear() { impl_map_.clear(); } |
| 486 | |
| 487 | iterator find(const key_type &key) { return iterator(impl_map_.find(key)); } |
| 488 | |
| 489 | const_iterator find(const key_type &key) const { return const_iterator(impl_map_.find(key)); } |
| 490 | |
| 491 | iterator find(const index_type &index) { |
| 492 | auto lower = lower_bound(range<index_type>(index, index + 1)); |
| 493 | if (!at_end(lower) && lower->first.includes(index)) { |
| 494 | return lower; |
| 495 | } |
| 496 | return end(); |
| 497 | } |
| 498 | |
| 499 | const_iterator find(const index_type &index) const { |
| 500 | auto lower = lower_bound(key_type(index, index + 1)); |
| 501 | if (!at_end(lower) && lower->first.includes(index)) { |
| 502 | return lower; |
| 503 | } |
| 504 | return end(); |
| 505 | } |
| 506 | |
| 507 | struct insert_range_no_split_bounds { |
| 508 | const static bool split_boundaries = false; |
| 509 | }; |
| 510 | |
| 511 | struct insert_range_split_bounds { |
| 512 | const static bool split_boundaries = true; |
| 513 | }; |
| 514 | |
| 515 | // Hint for this insert range is the *lower* bound of the insert as opposed to the upper_bound used |
| 516 | // as a non-range hint |
| 517 | template <typename Value, typename Split = insert_range_split_bounds> |
| 518 | range<iterator> insert_range(const iterator &lower, Value &&value, const Split &split = Split()) { |
| 519 | const key_type &bounds = value.first; |
| 520 | // We're not robust to a bad hint, so detect it with extreme prejudice |
| 521 | // TODO: Add bad hint test to make this robust... |
| 522 | RANGE_ASSERT(lower == lower_bound(bounds)); |
| 523 | range<ImplIterator> impl_bounds(lower.pos_, lower.pos_); |
| 524 | |
| 525 | if (at_impl_end(impl_bounds.begin) || !bounds.intersects(impl_bounds.begin->first)) { |
| 526 | // There is nothing in this range lower bound is above bound |
| 527 | // Generate the needed range (and we're done...) |
| 528 | impl_bounds.begin = impl_insert(impl_bounds.begin, std::forward<Value>(value)); |
| 529 | } else { |
| 530 | // Splitting from an occupied range, trim and infill (with value) as needed |
| 531 | RANGE_ASSERT(impl_bounds.begin->first.intersects(bounds)); // Must construct at the lower boundary of range |
| 532 | impl_bounds = infill_and_split(bounds, value.second, impl_bounds.begin, Split::split_boundaries); |
| 533 | } |
| 534 | |
| 535 | return range<iterator>(iterator(impl_bounds.begin), iterator(impl_bounds.end)); |
| 536 | } |
| 537 | |
| 538 | template <typename Value, typename Split = insert_range_split_bounds> |
| 539 | range<iterator> insert_range(Value &&value, const Split &split = Split()) { |
| 540 | return insert_range(lower_bound(value.first), value, split); |
| 541 | } |
| 542 | |
| 543 | iterator lower_bound(const key_type &key) { return iterator(lower_bound_impl(key)); } |
| 544 | |
| 545 | const_iterator lower_bound(const key_type &key) const { return const_iterator(lower_bound_impl(key)); } |
| 546 | |
| 547 | iterator upper_bound(const key_type &key) { return iterator(upper_bound_impl(key)); } |
| 548 | |
| 549 | const_iterator upper_bound(const key_type &key) const { return const_iterator(upper_bound_impl(key)); } |
| 550 | |
| 551 | range<iterator> bounds(const key_type &key) { return {lower_bound(key), upper_bound(key)}; } |
| 552 | range<const_iterator> cbounds(const key_type &key) const { return {lower_bound(key), upper_bound(key)}; } |
| 553 | range<const_iterator> bounds(const key_type &key) const { return cbounds(key); } |
| 554 | |
| 555 | using insert_pair = std::pair<iterator, bool>; |
| 556 | |
| 557 | // This is traditional no replacement insert. |
| 558 | template <typename Value> |
| 559 | insert_pair insert(Value &&value) { |
| 560 | const auto &key = value.first; |
| 561 | if (!key.non_empty()) { |
| 562 | // It's an invalid key, early bail pointing to end |
| 563 | return std::make_pair(end(), false); |
| 564 | } |
| 565 | |
| 566 | // Look for range conflicts (and an insertion point, which makes the lower_bound *not* wasted work) |
| 567 | // we don't have to check upper if just check that lower doesn't intersect (which it would if lower != upper) |
| 568 | auto lower = lower_bound_impl(key); |
| 569 | if (at_impl_end(lower) || !lower->first.intersects(key)) { |
| 570 | // range is not even paritally overlapped, and lower is strictly > than key |
| 571 | auto impl_insert = impl_map_.emplace_hint(lower, std::forward<Value>(value)); |
| 572 | // auto impl_insert = impl_map_.emplace(value); |
| 573 | iterator wrap_it(impl_insert); |
| 574 | return std::make_pair(wrap_it, true); |
| 575 | } |
| 576 | // We don't replace |
| 577 | return std::make_pair(iterator(lower), false); |
| 578 | }; |
| 579 | |
| 580 | iterator merge_adjacent(const range<iterator> &bounds) { |
| 581 | if (at_end(bounds.begin)) return bounds.begin; |
| 582 | |
| 583 | auto anchor = bounds.begin; |
| 584 | while (anchor != bounds.end) { |
| 585 | RANGE_ASSERT(!at_end(anchor)); |
| 586 | auto current = anchor; |
| 587 | auto next = current; |
| 588 | ++next; |
| 589 | // Walk from anchor to find adjoining ranges that have the same value |
| 590 | while (next != bounds.end && next->first.is_subsequent_to(current->first) && next->second == anchor->second) { |
| 591 | current = next; |
| 592 | ++next; |
| 593 | } |
| 594 | if (current != anchor) { |
| 595 | // the while loop above advanced at least onces, so we have something to merge |
| 596 | value_type merged = std::make_pair(key_type(anchor->first.begin, current->first.end), std::move(anchor->second)); |
| 597 | next = erase(range<iterator>(anchor, next)); |
| 598 | impl_map_.emplace_hint(next.pos_, merged); |
| 599 | } |
| 600 | // Reset the anchor for the next merge search |
| 601 | anchor = next; |
| 602 | } |
| 603 | RANGE_ASSERT(anchor == bounds.end); |
| 604 | return anchor; |
| 605 | } |
| 606 | |
| 607 | iterator merge_adjacent(iterator start) { return merge_adjacent(range<iterator>(start, end())); } |
| 608 | |
| 609 | iterator merge_adjacent() { return merge_adjacent(range<iterator>(begin(), end())); } |
| 610 | |
| 611 | template <typename SplitOp> |
| 612 | iterator split(const iterator whole_it, const index_type &index, const SplitOp &split_op) { |
| 613 | auto split_it = split_impl(whole_it.pos_, index, split_op); |
| 614 | return iterator(split_it); |
| 615 | } |
| 616 | |
| 617 | // The overwrite hint here is lower.... and if it's not right... this fails |
| 618 | template <typename Value> |
| 619 | iterator overwrite_range(const iterator &lower, Value &&value) { |
| 620 | // We're not robust to a bad hint, so detect it with extreme prejudice |
| 621 | // TODO: Add bad hint test to make this robust... |
| 622 | auto lower_impl = lower.pos_; |
| 623 | auto insert_hint = lower_impl; |
| 624 | if (!at_impl_end(lower_impl)) { |
| 625 | // If we're at end (and the hint is good, there's nothing to erase |
| 626 | RANGE_ASSERT(lower == lower_bound(value.first)); |
| 627 | insert_hint = impl_erase_range(value.first, lower_impl); |
| 628 | } |
| 629 | auto inserted = impl_insert(insert_hint, std::forward<Value>(value)); |
| 630 | return iterator(inserted); |
| 631 | } |
| 632 | |
| 633 | template <typename Value> |
| 634 | iterator overwrite_range(Value &&value) { |
| 635 | auto lower = lower_bound(value.first); |
| 636 | return overwrite_range(lower, value); |
| 637 | } |
| 638 | |
| 639 | // Need const_iterator/const and iterator/non-const variants using a common implementation |
| 640 | bool is_contiguous(const key_type &key, const const_iterator &lower) const { |
| 641 | // We're not robust to a bad lower, so detect it with extreme prejudice |
| 642 | // TODO: Add bad test to make this robust... |
| 643 | RANGE_ASSERT(lower == lower_bound(key)); |
| 644 | return is_contiguous_impl(this, lower.pos_); |
| 645 | } |
| 646 | |
| 647 | bool is_contiguous(const key_type &key, const iterator &lower) { |
| 648 | // We're not robust to a bad lower, so detect it with extreme prejudice |
| 649 | // TODO: Add bad lower test to make this robust... |
| 650 | RANGE_ASSERT(lower == lower_bound(key)); |
| 651 | return is_contiguous_impl(this, key, lower.pos_); |
| 652 | } |
| 653 | |
| 654 | // we don't need a non-const version of this variant |
| 655 | bool is_contiguous(const key_type &key) const { return is_contiguous_impl_(this, key, lower_bound(key).pos_); } |
| 656 | |
| 657 | bool empty() const { return impl_map_.empty(); } |
| 658 | size_t size() const { return impl_map_.size(); } |
| 659 | }; |
| 660 | |
| 661 | template <typename Container> |
| 662 | using const_correct_iterator = decltype(std::declval<Container>().begin()); |
| 663 | |
| 664 | // Forward index iterator, tracking an index value and the appropos lower bound |
| 665 | // returns an index_type, lower_bound pair. Supports ++, offset, and seek affecting the index, |
| 666 | // lower bound updates as needed. As the index may specify a range for which no entry exist, dereferenced |
| 667 | // iterator includes an "valid" field, true IFF the lower_bound is not end() and contains [index, index +1) |
| 668 | // |
| 669 | // Must be explicitly invalidated when the underlying map is changed. |
| 670 | template <typename Map> |
| 671 | class cached_lower_bound_impl { |
| 672 | using plain_map_type = typename std::remove_const<Map>::type; // Allow instatiation with const or non-const Map |
| 673 | public: |
| 674 | using iterator = const_correct_iterator<Map>; |
| 675 | using key_type = typename plain_map_type::key_type; |
| 676 | using mapped_type = typename plain_map_type::mapped_type; |
| 677 | // Both sides of the return pair are const'd because we're returning references/pointers to the *internal* state |
| 678 | // and we don't want and caller altering internal state. |
| 679 | using index_type = typename Map::index_type; |
| 680 | struct value_type { |
| 681 | const index_type &index; |
| 682 | const iterator &lower_bound; |
| 683 | const bool &valid; |
| 684 | value_type(const index_type &index_, const iterator &lower_bound_, bool &valid_) |
| 685 | : index(index_), lower_bound(lower_bound_), valid(valid_) {} |
| 686 | }; |
| 687 | |
| 688 | private: |
| 689 | Map *const map_; |
| 690 | value_type pos_; |
| 691 | |
| 692 | index_type index_; |
| 693 | iterator lower_bound_; |
| 694 | bool valid_; |
| 695 | |
| 696 | bool is_valid() const { return includes(index_); } |
| 697 | |
| 698 | // Allow reuse of a type with const semantics |
| 699 | void set_value(const index_type &index, const iterator &it) { |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 700 | RANGE_ASSERT(it == lower_bound(index)); |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 701 | index_ = index; |
| 702 | lower_bound_ = it; |
| 703 | valid_ = is_valid(); |
| 704 | } |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 705 | |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 706 | void update(const index_type &index) { |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 707 | RANGE_ASSERT(lower_bound_ == lower_bound(index)); |
| 708 | index_ = index; |
| 709 | valid_ = is_valid(); |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 710 | } |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 711 | |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 712 | inline iterator lower_bound(const index_type &index) { return map_->lower_bound(key_type(index, index + 1)); } |
| 713 | inline bool at_end(const iterator &it) const { return it == map_->end(); } |
| 714 | inline bool at_end() const { return at_end(lower_bound_); } |
| 715 | |
| 716 | bool is_lower_than(const index_type &index, const iterator &it) { return at_end(it) || (index < it->first.end); } |
| 717 | |
| 718 | public: |
| 719 | // includes(index) is a convenience function to test if the index would be in the currently cached lower bound |
| 720 | bool includes(const index_type &index) const { return !at_end() && lower_bound_->first.includes(index); } |
| 721 | |
| 722 | // The return is const because we are sharing the internal state directly. |
| 723 | const value_type &operator*() const { return pos_; } |
| 724 | const value_type *operator->() const { return &pos_; } |
| 725 | |
| 726 | // Advance the cached location by 1 |
| 727 | cached_lower_bound_impl &operator++() { |
| 728 | const index_type next = index_ + 1; |
| 729 | if (is_lower_than(next, lower_bound_)) { |
| 730 | update(next); |
| 731 | } else { |
| 732 | // if we're past pos_->second, next *must* be the new lower bound. |
| 733 | // NOTE: that next can't be past end, so lower_bound_ isn't end. |
| 734 | auto next_it = lower_bound_; |
| 735 | ++next_it; |
| 736 | set_value(next, next_it); |
| 737 | |
| 738 | // However we *must* not be past next. |
| 739 | RANGE_ASSERT(is_lower_than(next, next_it)); |
| 740 | } |
| 741 | |
| 742 | return *this; |
| 743 | } |
| 744 | |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 745 | // seek(index) updates lower_bound for index, updating lower_bound_ as needed. |
| 746 | cached_lower_bound_impl &seek(const index_type &seek_to) { |
| 747 | // Optimize seeking to forward |
| 748 | if (index_ == seek_to) { |
| 749 | // seek to self is a NOOP. To reset lower bound after a map change, use invalidate |
| 750 | } else if (index_ < seek_to) { |
| 751 | // See if the current or next ranges are the appropriate lower_bound... should be a common use case |
| 752 | if (is_lower_than(seek_to, lower_bound_)) { |
| 753 | // lower_bound_ is still the correct lower bound |
| 754 | update(seek_to); |
| 755 | } else { |
| 756 | // Look to see if the next range is the new lower_bound (and we aren't at end) |
| 757 | auto next_it = lower_bound_; |
| 758 | ++next_it; |
| 759 | if (is_lower_than(seek_to, next_it)) { |
| 760 | // next_it is the correct new lower bound |
| 761 | set_value(seek_to, next_it); |
| 762 | } else { |
| 763 | // We don't know where we are... and we aren't going to walk the tree looking for seek_to. |
| 764 | set_value(seek_to, lower_bound(seek_to)); |
| 765 | } |
| 766 | } |
| 767 | } else { |
| 768 | // General case... this is += so we're not implmenting optimized negative offset logic |
| 769 | set_value(seek_to, lower_bound(seek_to)); |
| 770 | } |
| 771 | return *this; |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 772 | } |
| 773 | |
| 774 | // Advance the cached location by offset. |
| 775 | cached_lower_bound_impl &offset(const index_type &offset) { |
| 776 | const index_type next = index_ + offset; |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 777 | return seek(next); |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 778 | } |
| 779 | |
| 780 | // invalidate() resets the the lower_bound_ cache, needed after insert/erase/overwrite/split operations |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 781 | cached_lower_bound_impl &invalidate() { |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 782 | index_type index = index_; // copy as set modifies in place. |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 783 | set_value(index, lower_bound(index)); |
| 784 | return *this; |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 785 | } |
| 786 | |
| 787 | // Allow a hint for a *valid* lower bound for current index |
| 788 | // TODO: if the fail-over becomes a hot-spot, the hint logic could be far more clever (looking at previous/next...) |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 789 | cached_lower_bound_impl &invalidate(const iterator &hint) { |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 790 | if ((hint != map_->end()) && hint->first.includes(index_)) { |
| 791 | auto index = index_; // by copy set modifies in place |
| 792 | set_value(index, hint); |
| 793 | } else { |
| 794 | invalidate(); |
| 795 | } |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 796 | return *this; |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 797 | } |
| 798 | |
| 799 | // The offset in index type to the next change (the end of the current range, or the transition from invalid to |
| 800 | // valid. If invalid and at_end, returns index_type(0) |
| 801 | index_type distance_to_edge() { |
| 802 | if (valid_) { |
| 803 | // Distance to edge of |
| 804 | return lower_bound_->first.end - index_; |
| 805 | } else if (at_end()) { |
| 806 | return index_type(0); |
| 807 | } else { |
| 808 | return lower_bound_->first.begin - index_; |
| 809 | } |
| 810 | } |
| 811 | |
| 812 | // Default constructed object reports valid (correctly) as false, but otherwise will fail (assert) under nearly any use. |
| 813 | cached_lower_bound_impl() : map_(nullptr), pos_(index_, lower_bound_, valid_), index_(0), lower_bound_(), valid_(false) {} |
John Zulauf | 6066f73 | 2019-11-21 13:15:10 -0700 | [diff] [blame] | 814 | cached_lower_bound_impl(Map &map, const index_type &index) |
| 815 | : map_(&map), pos_(index_, lower_bound_, valid_), index_(index), lower_bound_(lower_bound(index)), valid_(is_valid()) {} |
John Zulauf | 1121140 | 2019-11-15 14:02:36 -0700 | [diff] [blame] | 816 | }; |
| 817 | |
| 818 | template <typename CachedLowerBound, typename MappedType = typename CachedLowerBound::mapped_type> |
| 819 | const MappedType &evaluate(const CachedLowerBound &clb, const MappedType &default_value) { |
| 820 | if (clb->valid) { |
| 821 | return clb->lower_bound->second; |
| 822 | } |
| 823 | return default_value; |
| 824 | } |
| 825 | |
| 826 | // Parallel iterator |
| 827 | // Traverse to range maps over the the same range, but without assumptions of aligned ranges. |
| 828 | // ++ increments to the next point where on of the two maps changes range, giving a range over which the two |
| 829 | // maps do not transition ranges |
| 830 | template <typename MapA, typename MapB, typename KeyType = typename MapA::key_type> |
| 831 | class parallel_iterator { |
| 832 | public: |
| 833 | using key_type = KeyType; |
| 834 | using index_type = typename key_type::index_type; |
| 835 | |
| 836 | // The traits keep the iterator/const_interator consistent with the constness of the map. |
| 837 | using map_type_A = MapA; |
| 838 | using plain_map_type_A = typename std::remove_const<MapA>::type; // Allow instatiation with const or non-const Map |
| 839 | using key_type_A = typename plain_map_type_A::key_type; |
| 840 | using index_type_A = typename plain_map_type_A::index_type; |
| 841 | using iterator_A = const_correct_iterator<map_type_A>; |
| 842 | using lower_bound_A = cached_lower_bound_impl<map_type_A>; |
| 843 | |
| 844 | using map_type_B = MapB; |
| 845 | using plain_map_type_B = typename std::remove_const<MapB>::type; |
| 846 | using key_type_B = typename plain_map_type_B::key_type; |
| 847 | using index_type_B = typename plain_map_type_B::index_type; |
| 848 | using iterator_B = const_correct_iterator<map_type_B>; |
| 849 | using lower_bound_B = cached_lower_bound_impl<map_type_B>; |
| 850 | |
| 851 | // This is the value we'll always be returning, but the referenced object will be updated by the operations |
| 852 | struct value_type { |
| 853 | const key_type ⦥ |
| 854 | const lower_bound_A &pos_A; |
| 855 | const lower_bound_B &pos_B; |
| 856 | value_type(const key_type &range_, const lower_bound_A &pos_A_, const lower_bound_B &pos_B_) |
| 857 | : range(range_), pos_A(pos_A_), pos_B(pos_B_) {} |
| 858 | }; |
| 859 | |
| 860 | private: |
| 861 | lower_bound_A pos_A_; |
| 862 | lower_bound_B pos_B_; |
| 863 | key_type range_; |
| 864 | value_type pos_; |
| 865 | index_type compute_delta() { |
| 866 | auto delta_A = pos_A_.distance_to_edge(); |
| 867 | auto delta_B = pos_B_.distance_to_edge(); |
| 868 | index_type delta_min; |
| 869 | |
| 870 | // If either A or B are at end, there distance is *0*, so shouldn't be considered in the "distance to edge" |
| 871 | if (delta_A == 0) { // lower A is at end |
| 872 | delta_min = static_cast<index_type>(delta_B); |
| 873 | } else if (delta_B == 0) { // lower B is at end |
| 874 | delta_min = static_cast<index_type>(delta_A); |
| 875 | } else { |
| 876 | // Neither are at end, use the nearest edge, s.t. over this range A and B are both constant |
| 877 | delta_min = std::min(static_cast<index_type>(delta_A), static_cast<index_type>(delta_B)); |
| 878 | } |
| 879 | return delta_min; |
| 880 | } |
| 881 | |
| 882 | public: |
| 883 | // Default constructed object will report range empty (for end checks), but otherwise is unsafe to use |
| 884 | parallel_iterator() : pos_A_(), pos_B_(), range_(), pos_(range_, pos_A_, pos_B_) {} |
| 885 | parallel_iterator(map_type_A &map_A, map_type_B &map_B, index_type index) |
| 886 | : pos_A_(map_A, static_cast<index_type_A>(index)), |
| 887 | pos_B_(map_B, static_cast<index_type_B>(index)), |
| 888 | range_(index, index + compute_delta()), |
| 889 | pos_(range_, pos_A_, pos_B_) {} |
| 890 | |
| 891 | // Advance to the next spot one of the two maps changes |
| 892 | parallel_iterator &operator++() { |
| 893 | const auto start = range_.end; // we computed this the last time we set range |
| 894 | const auto delta = range_.distance(); // we computed this the last time we set range |
| 895 | RANGE_ASSERT(delta != 0); // Trying to increment past end |
| 896 | |
| 897 | pos_A_.offset(static_cast<index_type_A>(delta)); |
| 898 | pos_B_.offset(static_cast<index_type_B>(delta)); |
| 899 | |
| 900 | range_ = key_type(start, start + compute_delta()); // find the next boundary (must be after offset) |
| 901 | RANGE_ASSERT(pos_A_->index == start); |
| 902 | RANGE_ASSERT(pos_B_->index == start); |
| 903 | |
| 904 | return *this; |
| 905 | } |
| 906 | |
| 907 | // Seeks to a specific index in both maps reseting range. Cannot guarantee range.begin is on edge boundary, |
| 908 | /// but range.end will be. Lower bound objects assumed to invalidate their cached lower bounds on seek. |
| 909 | parallel_iterator &seek(const index_type &index) { |
| 910 | pos_A_.seek(static_cast<index_type_A>(index)); |
| 911 | pos_B_.seek(static_cast<index_type_B>(index)); |
| 912 | range_ = key_type(index, index + compute_delta()); |
| 913 | RANGE_ASSERT(pos_A_->index == index); |
| 914 | RANGE_ASSERT(pos_A_->index == pos_B_->index); |
| 915 | return *this; |
| 916 | } |
| 917 | |
| 918 | // Invalidates the lower_bound caches, reseting range. Cannot guarantee range.begin is on edge boundary, |
| 919 | // but range.end will be. |
| 920 | parallel_iterator &invalidate() { |
| 921 | const index_type start = range_.begin; |
| 922 | seek(start); |
| 923 | return *this; |
| 924 | } |
| 925 | parallel_iterator &invalidate_A() { |
| 926 | const index_type index = range_.begin; |
| 927 | pos_A_.seek(static_cast<index_type_A>(index)); |
| 928 | range_ = key_type(index, index + compute_delta()); |
| 929 | return *this; |
| 930 | } |
| 931 | parallel_iterator &invalidate_B() { |
| 932 | const index_type index = range_.begin; |
| 933 | pos_B_.seek(static_cast<index_type_B>(index)); |
| 934 | range_ = key_type(index, index + compute_delta()); |
| 935 | return *this; |
| 936 | } |
| 937 | |
| 938 | // The return is const because we are sharing the internal state directly. |
| 939 | const value_type &operator*() const { return pos_; } |
| 940 | const value_type *operator->() const { return &pos_; } |
| 941 | }; |
| 942 | |
| 943 | enum class splice_precedence { prefer_source, prefer_dest }; |
| 944 | |
| 945 | template <typename RangeMap, typename SourceIterator = typename RangeMap::const_iterator> |
| 946 | bool splice(RangeMap *to, const RangeMap &from, splice_precedence arbiter, SourceIterator begin, SourceIterator end) { |
| 947 | if (from.empty() || (begin == end) || (begin == from.cend())) return false; // nothing to merge. |
| 948 | |
| 949 | using ParallelIterator = parallel_iterator<RangeMap, const RangeMap>; |
| 950 | using Key = typename RangeMap::key_type; |
| 951 | using CachedLowerBound = cached_lower_bound_impl<RangeMap>; |
| 952 | using ConstCachedLowerBound = cached_lower_bound_impl<const RangeMap>; |
| 953 | using NoSplit = typename RangeMap::insert_range_no_split_bounds; |
| 954 | ParallelIterator par_it(*to, from, begin->first.begin); |
| 955 | bool updated = false; |
| 956 | while (par_it->range.non_empty() && par_it->pos_B->lower_bound != end) { |
| 957 | const Key &range = par_it->range; |
| 958 | const CachedLowerBound &to_lb = par_it->pos_A; |
| 959 | const ConstCachedLowerBound &from_lb = par_it->pos_B; |
| 960 | if (from_lb->valid) { |
| 961 | auto read_it = from_lb->lower_bound; |
| 962 | auto write_it = to_lb->lower_bound; |
| 963 | // Because of how the parallel iterator walk, "to" is valid over the whole range or it isn't (ranges don't span |
| 964 | // transitions between map entries or between valid and invalid ranges) |
| 965 | if (to_lb->valid) { |
| 966 | // Only rewrite this range if source is preferred (and the value differs) |
| 967 | // TODO determine if equality checks are always wanted. (for example heavyweight values) |
| 968 | if (arbiter == splice_precedence::prefer_source && (write_it->second != read_it->second)) { |
| 969 | // Both ranges occupied and source is preferred and from differs from to |
| 970 | if (write_it->first == range) { |
| 971 | // we're writing the whole destination range, so just set the value |
| 972 | write_it->second = read_it->second; |
| 973 | } else { |
| 974 | to->overwrite_range(write_it, std::make_pair(range, read_it->second)); |
| 975 | par_it.invalidate_A(); // we've changed map 'to' behind to_lb's back... let it know. |
| 976 | } |
| 977 | updated = true; |
| 978 | } |
| 979 | } else { |
| 980 | // Insert into the gap. |
| 981 | to->insert_range(write_it, std::make_pair(range, read_it->second), NoSplit()); |
| 982 | par_it.invalidate_A(); // we've changed map 'to' behind to_lb's back... let it know. |
| 983 | updated = true; |
| 984 | } |
| 985 | } |
| 986 | ++par_it; // next range over which both 'to' and 'from' stay constant |
| 987 | } |
| 988 | return updated; |
| 989 | } |
| 990 | // And short hand for "from begin to end" |
| 991 | template <typename RangeMap> |
| 992 | bool splice(RangeMap *to, const RangeMap &from, splice_precedence arbiter) { |
| 993 | return splice(to, from, arbiter, from.cbegin(), from.cend()); |
| 994 | } |
| 995 | |
| 996 | } // namespace sparse_container |
| 997 | |
| 998 | #endif |