LLVM 22.0.0git
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/ADL.h"
20#include "llvm/ADT/STLExtras.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstring>
32#include <initializer_list>
33#include <iterator>
34#include <new>
35#include <type_traits>
36#include <utility>
37
38namespace llvm {
39
40namespace detail {
41
42// We extend a pair to allow users to override the bucket type with their own
43// implementation without requiring two members.
44template <typename KeyT, typename ValueT>
45struct DenseMapPair : public std::pair<KeyT, ValueT> {
46 using std::pair<KeyT, ValueT>::pair;
47
48 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
49 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
50 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
51 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
52};
53
54} // end namespace detail
55
56template <typename KeyT, typename ValueT,
57 typename KeyInfoT = DenseMapInfo<KeyT>,
59 bool IsConst = false>
60class DenseMapIterator;
61
62template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
63 typename BucketT>
65 template <typename T>
66 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
67
68public:
70 using key_type = KeyT;
72 using value_type = BucketT;
73
77
78 inline iterator begin() {
79 // When the map is empty, avoid the overhead of advancing/retreating past
80 // empty buckets.
81 if (empty())
82 return end();
83 if (shouldReverseIterate<KeyT>())
84 return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
85 return makeIterator(getBuckets(), getBucketsEnd(), *this);
86 }
87 inline iterator end() {
88 return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
89 }
90 inline const_iterator begin() const {
91 if (empty())
92 return end();
93 if (shouldReverseIterate<KeyT>())
94 return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
95 return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
96 }
97 inline const_iterator end() const {
98 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
99 }
100
101 // Return an iterator to iterate over keys in the map.
102 inline auto keys() {
103 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
104 }
105
106 // Return an iterator to iterate over values in the map.
107 inline auto values() {
108 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
109 }
110
111 inline auto keys() const {
112 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
113 }
114
115 inline auto values() const {
116 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
117 }
118
119 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
120 unsigned size() const { return getNumEntries(); }
121
122 /// Grow the densemap so that it can contain at least \p NumEntries items
123 /// before resizing again.
124 void reserve(size_type NumEntries) {
125 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
127 if (NumBuckets > getNumBuckets())
128 grow(NumBuckets);
129 }
130
131 void clear() {
133 if (getNumEntries() == 0 && getNumTombstones() == 0)
134 return;
135
136 // If the capacity of the array is huge, and the # elements used is small,
137 // shrink the array.
138 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
139 shrink_and_clear();
140 return;
141 }
142
143 const KeyT EmptyKey = getEmptyKey();
144 if constexpr (std::is_trivially_destructible_v<ValueT>) {
145 // Use a simpler loop when values don't need destruction.
146 for (BucketT &B : buckets())
147 B.getFirst() = EmptyKey;
148 } else {
149 const KeyT TombstoneKey = getTombstoneKey();
150 unsigned NumEntries = getNumEntries();
151 for (BucketT &B : buckets()) {
152 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey)) {
153 if (!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
154 B.getSecond().~ValueT();
155 --NumEntries;
156 }
157 B.getFirst() = EmptyKey;
158 }
159 }
160 assert(NumEntries == 0 && "Node count imbalance!");
161 (void)NumEntries;
162 }
163 setNumEntries(0);
164 setNumTombstones(0);
165 }
166
167 /// Return true if the specified key is in the map, false otherwise.
168 bool contains(const_arg_type_t<KeyT> Val) const {
169 return doFind(Val) != nullptr;
170 }
171
172 /// Return 1 if the specified key is in the map, 0 otherwise.
173 size_type count(const_arg_type_t<KeyT> Val) const {
174 return contains(Val) ? 1 : 0;
175 }
176
177 iterator find(const_arg_type_t<KeyT> Val) { return find_as(Val); }
178 const_iterator find(const_arg_type_t<KeyT> Val) const { return find_as(Val); }
179
180 /// Alternate version of find() which allows a different, and possibly
181 /// less expensive, key type.
182 /// The DenseMapInfo is responsible for supplying methods
183 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
184 /// type used.
185 template <class LookupKeyT> iterator find_as(const LookupKeyT &Val) {
186 if (BucketT *Bucket = doFind(Val))
187 return makeIterator(
188 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
189 *this, true);
190 return end();
191 }
192 template <class LookupKeyT>
193 const_iterator find_as(const LookupKeyT &Val) const {
194 if (const BucketT *Bucket = doFind(Val))
195 return makeConstIterator(
196 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
197 *this, true);
198 return end();
199 }
200
201 /// lookup - Return the entry for the specified key, or a default
202 /// constructed value if no such entry exists.
203 ValueT lookup(const_arg_type_t<KeyT> Val) const {
204 if (const BucketT *Bucket = doFind(Val))
205 return Bucket->getSecond();
206 return ValueT();
207 }
208
209 // Return the entry with the specified key, or \p Default. This variant is
210 // useful, because `lookup` cannot be used with non-default-constructible
211 // values.
212 template <typename U = std::remove_cv_t<ValueT>>
213 ValueT lookup_or(const_arg_type_t<KeyT> Val, U &&Default) const {
214 if (const BucketT *Bucket = doFind(Val))
215 return Bucket->getSecond();
216 return Default;
217 }
218
219 /// at - Return the entry for the specified key, or abort if no such
220 /// entry exists.
221 const ValueT &at(const_arg_type_t<KeyT> Val) const {
222 auto Iter = this->find(std::move(Val));
223 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
224 return Iter->second;
225 }
226
227 // Inserts key,value pair into the map if the key isn't already in the map.
228 // If the key is already in the map, it returns false and doesn't update the
229 // value.
230 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
231 return try_emplace_impl(KV.first, KV.second);
232 }
233
234 // Inserts key,value pair into the map if the key isn't already in the map.
235 // If the key is already in the map, it returns false and doesn't update the
236 // value.
237 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
238 return try_emplace_impl(std::move(KV.first), std::move(KV.second));
239 }
240
241 // Inserts key,value pair into the map if the key isn't already in the map.
242 // The value is constructed in-place if the key is not in the map, otherwise
243 // it is not moved.
244 template <typename... Ts>
245 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&...Args) {
246 return try_emplace_impl(std::move(Key), std::forward<Ts>(Args)...);
247 }
248
249 // Inserts key,value pair into the map if the key isn't already in the map.
250 // The value is constructed in-place if the key is not in the map, otherwise
251 // it is not moved.
252 template <typename... Ts>
253 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&...Args) {
254 return try_emplace_impl(Key, std::forward<Ts>(Args)...);
255 }
256
257 /// Alternate version of insert() which allows a different, and possibly
258 /// less expensive, key type.
259 /// The DenseMapInfo is responsible for supplying methods
260 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
261 /// type used.
262 template <typename LookupKeyT>
263 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
264 const LookupKeyT &Val) {
265 BucketT *TheBucket;
266 if (LookupBucketFor(Val, TheBucket))
267 return {makeInsertIterator(TheBucket), false}; // Already in map.
268
269 // Otherwise, insert the new element.
270 TheBucket = findBucketForInsertion(Val, TheBucket);
271 TheBucket->getFirst() = std::move(KV.first);
272 ::new (&TheBucket->getSecond()) ValueT(std::move(KV.second));
273 return {makeInsertIterator(TheBucket), true};
274 }
275
276 /// insert - Range insertion of pairs.
277 template <typename InputIt> void insert(InputIt I, InputIt E) {
278 for (; I != E; ++I)
279 insert(*I);
280 }
281
282 /// Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
283 template <typename Range> void insert_range(Range &&R) {
284 insert(adl_begin(R), adl_end(R));
285 }
286
287 template <typename V>
288 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
289 auto Ret = try_emplace(Key, std::forward<V>(Val));
290 if (!Ret.second)
291 Ret.first->second = std::forward<V>(Val);
292 return Ret;
293 }
294
295 template <typename V>
296 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
297 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
298 if (!Ret.second)
299 Ret.first->second = std::forward<V>(Val);
300 return Ret;
301 }
302
303 template <typename... Ts>
304 std::pair<iterator, bool> emplace_or_assign(const KeyT &Key, Ts &&...Args) {
305 auto Ret = try_emplace(Key, std::forward<Ts>(Args)...);
306 if (!Ret.second)
307 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
308 return Ret;
309 }
310
311 template <typename... Ts>
312 std::pair<iterator, bool> emplace_or_assign(KeyT &&Key, Ts &&...Args) {
313 auto Ret = try_emplace(std::move(Key), std::forward<Ts>(Args)...);
314 if (!Ret.second)
315 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
316 return Ret;
317 }
318
319 bool erase(const KeyT &Val) {
320 BucketT *TheBucket = doFind(Val);
321 if (!TheBucket)
322 return false; // not in map.
323
324 TheBucket->getSecond().~ValueT();
325 TheBucket->getFirst() = getTombstoneKey();
326 decrementNumEntries();
327 incrementNumTombstones();
328 return true;
329 }
331 BucketT *TheBucket = &*I;
332 TheBucket->getSecond().~ValueT();
333 TheBucket->getFirst() = getTombstoneKey();
334 decrementNumEntries();
335 incrementNumTombstones();
336 }
337
338 ValueT &operator[](const KeyT &Key) {
339 return lookupOrInsertIntoBucket(Key).first->second;
340 }
341
343 return lookupOrInsertIntoBucket(std::move(Key)).first->second;
344 }
345
346 /// isPointerIntoBucketsArray - Return true if the specified pointer points
347 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
348 /// value in the DenseMap).
349 bool isPointerIntoBucketsArray(const void *Ptr) const {
350 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
351 }
352
353 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
354 /// array. In conjunction with the previous method, this can be used to
355 /// determine whether an insertion caused the DenseMap to reallocate.
356 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
357
358protected:
359 DenseMapBase() = default;
360
361 void destroyAll() {
362 if (getNumBuckets() == 0) // Nothing to do.
363 return;
364
365 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
366 for (BucketT &B : buckets()) {
367 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
368 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey))
369 B.getSecond().~ValueT();
370 B.getFirst().~KeyT();
371 }
372 }
373
374 void initEmpty() {
375 setNumEntries(0);
376 setNumTombstones(0);
377
378 assert((getNumBuckets() & (getNumBuckets() - 1)) == 0 &&
379 "# initial buckets must be a power of two!");
380 const KeyT EmptyKey = getEmptyKey();
381 for (BucketT &B : buckets())
382 ::new (&B.getFirst()) KeyT(EmptyKey);
383 }
384
385 /// Returns the number of buckets to allocate to ensure that the DenseMap can
386 /// accommodate \p NumEntries without need to grow().
387 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
388 // Ensure that "NumEntries * 4 < NumBuckets * 3"
389 if (NumEntries == 0)
390 return 0;
391 // +1 is required because of the strict equality.
392 // For example if NumEntries is 48, we need to return 401.
393 return NextPowerOf2(NumEntries * 4 / 3 + 1);
394 }
395
397 initEmpty();
398
399 // Insert all the old elements.
400 const KeyT EmptyKey = getEmptyKey();
401 const KeyT TombstoneKey = getTombstoneKey();
402 for (BucketT &B : OldBuckets) {
403 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
404 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
405 // Insert the key/value into the new table.
406 BucketT *DestBucket;
407 bool FoundVal = LookupBucketFor(B.getFirst(), DestBucket);
408 (void)FoundVal; // silence warning.
409 assert(!FoundVal && "Key already in new map?");
410 DestBucket->getFirst() = std::move(B.getFirst());
411 ::new (&DestBucket->getSecond()) ValueT(std::move(B.getSecond()));
412 incrementNumEntries();
413
414 // Free the value.
415 B.getSecond().~ValueT();
416 }
417 B.getFirst().~KeyT();
418 }
419 }
420
421 template <typename OtherBaseT>
424 assert(&other != this);
425 assert(getNumBuckets() == other.getNumBuckets());
426
427 setNumEntries(other.getNumEntries());
428 setNumTombstones(other.getNumTombstones());
429
430 BucketT *Buckets = getBuckets();
431 const BucketT *OtherBuckets = other.getBuckets();
432 const size_t NumBuckets = getNumBuckets();
433 if constexpr (std::is_trivially_copyable_v<KeyT> &&
434 std::is_trivially_copyable_v<ValueT>) {
435 memcpy(reinterpret_cast<void *>(Buckets), OtherBuckets,
436 NumBuckets * sizeof(BucketT));
437 } else {
438 const KeyT EmptyKey = getEmptyKey();
439 const KeyT TombstoneKey = getTombstoneKey();
440 for (size_t I = 0; I < NumBuckets; ++I) {
441 ::new (&Buckets[I].getFirst()) KeyT(OtherBuckets[I].getFirst());
442 if (!KeyInfoT::isEqual(Buckets[I].getFirst(), EmptyKey) &&
443 !KeyInfoT::isEqual(Buckets[I].getFirst(), TombstoneKey))
444 ::new (&Buckets[I].getSecond()) ValueT(OtherBuckets[I].getSecond());
445 }
446 }
447 }
448
449 static unsigned getHashValue(const KeyT &Val) {
450 return KeyInfoT::getHashValue(Val);
451 }
452
453 template <typename LookupKeyT>
454 static unsigned getHashValue(const LookupKeyT &Val) {
455 return KeyInfoT::getHashValue(Val);
456 }
457
458 static const KeyT getEmptyKey() {
459 static_assert(std::is_base_of_v<DenseMapBase, DerivedT>,
460 "Must pass the derived type to this template!");
461 return KeyInfoT::getEmptyKey();
462 }
463
464 static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
465
466private:
467 template <typename KeyArgT, typename... Ts>
468 std::pair<BucketT *, bool> lookupOrInsertIntoBucket(KeyArgT &&Key,
469 Ts &&...Args) {
470 BucketT *TheBucket = nullptr;
471 if (LookupBucketFor(Key, TheBucket))
472 return {TheBucket, false}; // Already in the map.
473
474 // Otherwise, insert the new element.
475 TheBucket = findBucketForInsertion(Key, TheBucket);
476 TheBucket->getFirst() = std::forward<KeyArgT>(Key);
477 ::new (&TheBucket->getSecond()) ValueT(std::forward<Ts>(Args)...);
478 return {TheBucket, true};
479 }
480
481 template <typename KeyArgT, typename... Ts>
482 std::pair<iterator, bool> try_emplace_impl(KeyArgT &&Key, Ts &&...Args) {
483 auto [Bucket, Inserted] = lookupOrInsertIntoBucket(
484 std::forward<KeyArgT>(Key), std::forward<Ts>(Args)...);
485 return {makeInsertIterator(Bucket), Inserted};
486 }
487
488 iterator makeIterator(BucketT *P, BucketT *E, DebugEpochBase &Epoch,
489 bool NoAdvance = false) {
490 if (shouldReverseIterate<KeyT>()) {
491 BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
492 return iterator(B, E, Epoch, NoAdvance);
493 }
494 return iterator(P, E, Epoch, NoAdvance);
495 }
496
497 const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
498 const DebugEpochBase &Epoch,
499 const bool NoAdvance = false) const {
500 if (shouldReverseIterate<KeyT>()) {
501 const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
502 return const_iterator(B, E, Epoch, NoAdvance);
503 }
504 return const_iterator(P, E, Epoch, NoAdvance);
505 }
506
507 iterator makeInsertIterator(BucketT *TheBucket) {
508 return makeIterator(TheBucket,
509 shouldReverseIterate<KeyT>() ? getBuckets()
510 : getBucketsEnd(),
511 *this, true);
512 }
513
514 unsigned getNumEntries() const {
515 return static_cast<const DerivedT *>(this)->getNumEntries();
516 }
517
518 void setNumEntries(unsigned Num) {
519 static_cast<DerivedT *>(this)->setNumEntries(Num);
520 }
521
522 void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
523
524 void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
525
526 unsigned getNumTombstones() const {
527 return static_cast<const DerivedT *>(this)->getNumTombstones();
528 }
529
530 void setNumTombstones(unsigned Num) {
531 static_cast<DerivedT *>(this)->setNumTombstones(Num);
532 }
533
534 void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
535
536 void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
537
538 const BucketT *getBuckets() const {
539 return static_cast<const DerivedT *>(this)->getBuckets();
540 }
541
542 BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
543
544 unsigned getNumBuckets() const {
545 return static_cast<const DerivedT *>(this)->getNumBuckets();
546 }
547
548 BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
549
550 const BucketT *getBucketsEnd() const {
551 return getBuckets() + getNumBuckets();
552 }
553
554 iterator_range<BucketT *> buckets() {
555 return llvm::make_range(getBuckets(), getBucketsEnd());
556 }
557
558 void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
559
560 void shrink_and_clear() { static_cast<DerivedT *>(this)->shrink_and_clear(); }
561
562 template <typename LookupKeyT>
563 BucketT *findBucketForInsertion(const LookupKeyT &Lookup,
564 BucketT *TheBucket) {
566
567 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
568 // the buckets are empty (meaning that many are filled with tombstones),
569 // grow the table.
570 //
571 // The later case is tricky. For example, if we had one empty bucket with
572 // tons of tombstones, failing lookups (e.g. for insertion) would have to
573 // probe almost the entire table until it found the empty bucket. If the
574 // table completely filled with tombstones, no lookup would ever succeed,
575 // causing infinite loops in lookup.
576 unsigned NewNumEntries = getNumEntries() + 1;
577 unsigned NumBuckets = getNumBuckets();
578 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
579 this->grow(NumBuckets * 2);
580 LookupBucketFor(Lookup, TheBucket);
581 } else if (LLVM_UNLIKELY(NumBuckets -
582 (NewNumEntries + getNumTombstones()) <=
583 NumBuckets / 8)) {
584 this->grow(NumBuckets);
585 LookupBucketFor(Lookup, TheBucket);
586 }
587 assert(TheBucket);
588
589 // Only update the state after we've grown our bucket space appropriately
590 // so that when growing buckets we have self-consistent entry count.
591 incrementNumEntries();
592
593 // If we are writing over a tombstone, remember this.
594 const KeyT EmptyKey = getEmptyKey();
595 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
596 decrementNumTombstones();
597
598 return TheBucket;
599 }
600
601 template <typename LookupKeyT>
602 const BucketT *doFind(const LookupKeyT &Val) const {
603 const BucketT *BucketsPtr = getBuckets();
604 const unsigned NumBuckets = getNumBuckets();
605 if (NumBuckets == 0)
606 return nullptr;
607
608 const KeyT EmptyKey = getEmptyKey();
609 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
610 unsigned ProbeAmt = 1;
611 while (true) {
612 const BucketT *Bucket = BucketsPtr + BucketNo;
613 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
614 return Bucket;
615 if (LLVM_LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
616 return nullptr;
617
618 // Otherwise, it's a hash collision or a tombstone, continue quadratic
619 // probing.
620 BucketNo += ProbeAmt++;
621 BucketNo &= NumBuckets - 1;
622 }
623 }
624
625 template <typename LookupKeyT> BucketT *doFind(const LookupKeyT &Val) {
626 return const_cast<BucketT *>(
627 static_cast<const DenseMapBase *>(this)->doFind(Val));
628 }
629
630 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
631 /// FoundBucket. If the bucket contains the key and a value, this returns
632 /// true, otherwise it returns a bucket with an empty marker or tombstone and
633 /// returns false.
634 template <typename LookupKeyT>
635 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
636 BucketT *BucketsPtr = getBuckets();
637 const unsigned NumBuckets = getNumBuckets();
638
639 if (NumBuckets == 0) {
640 FoundBucket = nullptr;
641 return false;
642 }
643
644 // FoundTombstone - Keep track of whether we find a tombstone while probing.
645 BucketT *FoundTombstone = nullptr;
646 const KeyT EmptyKey = getEmptyKey();
647 const KeyT TombstoneKey = getTombstoneKey();
648 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
649 !KeyInfoT::isEqual(Val, TombstoneKey) &&
650 "Empty/Tombstone value shouldn't be inserted into map!");
651
652 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
653 unsigned ProbeAmt = 1;
654 while (true) {
655 BucketT *ThisBucket = BucketsPtr + BucketNo;
656 // Found Val's bucket? If so, return it.
657 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
658 FoundBucket = ThisBucket;
659 return true;
660 }
661
662 // If we found an empty bucket, the key doesn't exist in the set.
663 // Insert it and return the default value.
664 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
665 // If we've already seen a tombstone while probing, fill it in instead
666 // of the empty bucket we eventually probed to.
667 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
668 return false;
669 }
670
671 // If this is a tombstone, remember it. If Val ends up not in the map, we
672 // prefer to return it than something that would require more probing.
673 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
674 !FoundTombstone)
675 FoundTombstone = ThisBucket; // Remember the first tombstone found.
676
677 // Otherwise, it's a hash collision or a tombstone, continue quadratic
678 // probing.
679 BucketNo += ProbeAmt++;
680 BucketNo &= (NumBuckets - 1);
681 }
682 }
683
684public:
685 /// Return the approximate size (in bytes) of the actual map.
686 /// This is just the raw memory used by DenseMap.
687 /// If entries are pointers to objects, the size of the referenced objects
688 /// are not included.
689 size_t getMemorySize() const { return getNumBuckets() * sizeof(BucketT); }
690};
691
692/// Equality comparison for DenseMap.
693///
694/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
695/// is also in RHS, and that no additional pairs are in RHS.
696/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
697/// complexity is linear, worst case is O(N^2) (if every hash collides).
698template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
699 typename BucketT>
703 if (LHS.size() != RHS.size())
704 return false;
705
706 for (auto &KV : LHS) {
707 auto I = RHS.find(KV.first);
708 if (I == RHS.end() || I->second != KV.second)
709 return false;
710 }
711
712 return true;
713}
714
715/// Inequality comparison for DenseMap.
716///
717/// Equivalent to !(LHS == RHS). See operator== for performance notes.
718template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
719 typename BucketT>
723 return !(LHS == RHS);
724}
725
726template <typename KeyT, typename ValueT,
727 typename KeyInfoT = DenseMapInfo<KeyT>,
729class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
730 KeyT, ValueT, KeyInfoT, BucketT> {
731 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
732
733 // Lift some types from the dependent base class into this class for
734 // simplicity of referring to them.
736
737 BucketT *Buckets;
738 unsigned NumEntries;
739 unsigned NumTombstones;
740 unsigned NumBuckets;
741
742public:
743 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
744 /// this number of elements can be inserted in the map without grow()
745 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
746
747 DenseMap(const DenseMap &other) : BaseT() {
748 init(0);
749 copyFrom(other);
750 }
751
752 DenseMap(DenseMap &&other) : BaseT() {
753 init(0);
754 swap(other);
755 }
756
757 template <typename InputIt> DenseMap(const InputIt &I, const InputIt &E) {
758 init(std::distance(I, E));
759 this->insert(I, E);
760 }
761
762 template <typename RangeT>
765
766 DenseMap(std::initializer_list<typename BaseT::value_type> Vals)
767 : DenseMap(Vals.begin(), Vals.end()) {}
768
770 this->destroyAll();
771 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
772 }
773
775 this->incrementEpoch();
776 RHS.incrementEpoch();
777 std::swap(Buckets, RHS.Buckets);
778 std::swap(NumEntries, RHS.NumEntries);
779 std::swap(NumTombstones, RHS.NumTombstones);
780 std::swap(NumBuckets, RHS.NumBuckets);
781 }
782
783 DenseMap &operator=(const DenseMap &other) {
784 if (&other != this)
785 copyFrom(other);
786 return *this;
787 }
788
790 this->destroyAll();
791 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
792 init(0);
793 swap(other);
794 return *this;
795 }
796
797 void copyFrom(const DenseMap &other) {
798 this->destroyAll();
799 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
800 if (allocateBuckets(other.NumBuckets)) {
801 this->BaseT::copyFrom(other);
802 } else {
803 NumEntries = 0;
804 NumTombstones = 0;
805 }
806 }
807
808 void grow(unsigned AtLeast) {
809 unsigned OldNumBuckets = NumBuckets;
810 BucketT *OldBuckets = Buckets;
811
812 allocateBuckets(std::max<unsigned>(
813 64, static_cast<unsigned>(NextPowerOf2(AtLeast - 1))));
814 assert(Buckets);
815 if (!OldBuckets) {
816 this->BaseT::initEmpty();
817 return;
818 }
819
820 this->moveFromOldBuckets(
821 llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
822
823 // Free the old table.
824 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
825 alignof(BucketT));
826 }
827
829 unsigned OldNumBuckets = NumBuckets;
830 unsigned OldNumEntries = NumEntries;
831 this->destroyAll();
832
833 // Reduce the number of buckets.
834 unsigned NewNumBuckets = 0;
835 if (OldNumEntries)
836 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
837 if (NewNumBuckets == NumBuckets) {
838 this->BaseT::initEmpty();
839 return;
840 }
841
842 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
843 alignof(BucketT));
844 init(NewNumBuckets);
845 }
846
847private:
848 unsigned getNumEntries() const { return NumEntries; }
849
850 void setNumEntries(unsigned Num) { NumEntries = Num; }
851
852 unsigned getNumTombstones() const { return NumTombstones; }
853
854 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
855
856 BucketT *getBuckets() const { return Buckets; }
857
858 unsigned getNumBuckets() const { return NumBuckets; }
859
860 bool allocateBuckets(unsigned Num) {
861 NumBuckets = Num;
862 if (NumBuckets == 0) {
863 Buckets = nullptr;
864 return false;
865 }
866
867 Buckets = static_cast<BucketT *>(
868 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
869 return true;
870 }
871
872 void init(unsigned InitNumEntries) {
873 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
874 if (allocateBuckets(InitBuckets)) {
875 this->BaseT::initEmpty();
876 } else {
877 NumEntries = 0;
878 NumTombstones = 0;
879 }
880 }
881};
882
883template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
884 typename KeyInfoT = DenseMapInfo<KeyT>,
887 : public DenseMapBase<
888 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
889 ValueT, KeyInfoT, BucketT> {
890 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
891
892 // Lift some types from the dependent base class into this class for
893 // simplicity of referring to them.
895
896 static_assert(isPowerOf2_64(InlineBuckets),
897 "InlineBuckets must be a power of 2.");
898
899 unsigned Small : 1;
900 unsigned NumEntries : 31;
901 unsigned NumTombstones;
902
903 struct LargeRep {
904 BucketT *Buckets;
905 unsigned NumBuckets;
907 return llvm::make_range(Buckets, Buckets + NumBuckets);
908 }
909 };
910
911 /// A "union" of an inline bucket array and the struct representing
912 /// a large bucket. This union will be discriminated by the 'Small' bit.
913 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
914
915public:
916 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
917 if (NumInitBuckets > InlineBuckets)
918 NumInitBuckets = llvm::bit_ceil(NumInitBuckets);
919 init(NumInitBuckets);
920 }
921
923 init(0);
924 copyFrom(other);
925 }
926
928 init(0);
929 swap(other);
930 }
931
932 template <typename InputIt>
933 SmallDenseMap(const InputIt &I, const InputIt &E) {
934 init(NextPowerOf2(std::distance(I, E)));
935 this->insert(I, E);
936 }
937
938 template <typename RangeT>
941
942 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
943 : SmallDenseMap(Vals.begin(), Vals.end()) {}
944
946 this->destroyAll();
947 deallocateBuckets();
948 }
949
951 unsigned TmpNumEntries = RHS.NumEntries;
952 RHS.NumEntries = NumEntries;
953 NumEntries = TmpNumEntries;
954 std::swap(NumTombstones, RHS.NumTombstones);
955
956 const KeyT EmptyKey = this->getEmptyKey();
957 const KeyT TombstoneKey = this->getTombstoneKey();
958 if (Small && RHS.Small) {
959 // If we're swapping inline bucket arrays, we have to cope with some of
960 // the tricky bits of DenseMap's storage system: the buckets are not
961 // fully initialized. Thus we swap every key, but we may have
962 // a one-directional move of the value.
963 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
964 BucketT *LHSB = &getInlineBuckets()[i],
965 *RHSB = &RHS.getInlineBuckets()[i];
966 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
967 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
968 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
969 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
970 if (hasLHSValue && hasRHSValue) {
971 // Swap together if we can...
972 std::swap(*LHSB, *RHSB);
973 continue;
974 }
975 // Swap separately and handle any asymmetry.
976 std::swap(LHSB->getFirst(), RHSB->getFirst());
977 if (hasLHSValue) {
978 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
979 LHSB->getSecond().~ValueT();
980 } else if (hasRHSValue) {
981 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
982 RHSB->getSecond().~ValueT();
983 }
984 }
985 return;
986 }
987 if (!Small && !RHS.Small) {
988 std::swap(*getLargeRep(), *RHS.getLargeRep());
989 return;
990 }
991
992 SmallDenseMap &SmallSide = Small ? *this : RHS;
993 SmallDenseMap &LargeSide = Small ? RHS : *this;
994
995 // First stash the large side's rep and move the small side across.
996 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
997 LargeSide.getLargeRep()->~LargeRep();
998 LargeSide.Small = true;
999 // This is similar to the standard move-from-old-buckets, but the bucket
1000 // count hasn't actually rotated in this case. So we have to carefully
1001 // move construct the keys and values into their new locations, but there
1002 // is no need to re-hash things.
1003 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
1004 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
1005 *OldB = &SmallSide.getInlineBuckets()[i];
1006 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
1007 OldB->getFirst().~KeyT();
1008 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
1009 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
1010 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
1011 OldB->getSecond().~ValueT();
1012 }
1013 }
1014
1015 // The hard part of moving the small buckets across is done, just move
1016 // the TmpRep into its new home.
1017 SmallSide.Small = false;
1018 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1019 }
1020
1022 if (&other != this)
1023 copyFrom(other);
1024 return *this;
1025 }
1026
1028 this->destroyAll();
1029 deallocateBuckets();
1030 init(0);
1031 swap(other);
1032 return *this;
1033 }
1034
1035 void copyFrom(const SmallDenseMap &other) {
1036 this->destroyAll();
1037 deallocateBuckets();
1038 Small = true;
1039 if (other.getNumBuckets() > InlineBuckets) {
1040 Small = false;
1041 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1042 }
1043 this->BaseT::copyFrom(other);
1044 }
1045
1046 void init(unsigned InitBuckets) {
1047 Small = true;
1048 if (InitBuckets > InlineBuckets) {
1049 Small = false;
1050 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1051 }
1052 this->BaseT::initEmpty();
1053 }
1054
1055 void grow(unsigned AtLeast) {
1056 if (AtLeast > InlineBuckets)
1057 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
1058
1059 if (Small) {
1060 // First move the inline buckets into a temporary storage.
1062 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1063 BucketT *TmpEnd = TmpBegin;
1064
1065 // Loop over the buckets, moving non-empty, non-tombstones into the
1066 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1067 const KeyT EmptyKey = this->getEmptyKey();
1068 const KeyT TombstoneKey = this->getTombstoneKey();
1069 for (BucketT &B : inlineBuckets()) {
1070 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
1071 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
1072 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1073 "Too many inline buckets!");
1074 ::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
1075 ::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
1076 ++TmpEnd;
1077 B.getSecond().~ValueT();
1078 }
1079 B.getFirst().~KeyT();
1080 }
1081
1082 // AtLeast == InlineBuckets can happen if there are many tombstones,
1083 // and grow() is used to remove them. Usually we always switch to the
1084 // large rep here.
1085 if (AtLeast > InlineBuckets) {
1086 Small = false;
1087 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1088 }
1089 this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
1090 return;
1091 }
1092
1093 LargeRep OldRep = std::move(*getLargeRep());
1094 getLargeRep()->~LargeRep();
1095 if (AtLeast <= InlineBuckets) {
1096 Small = true;
1097 } else {
1098 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1099 }
1100
1101 this->moveFromOldBuckets(OldRep.buckets());
1102
1103 // Free the old table.
1104 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1105 alignof(BucketT));
1106 }
1107
1109 unsigned OldSize = this->size();
1110 this->destroyAll();
1111
1112 // Reduce the number of buckets.
1113 unsigned NewNumBuckets = 0;
1114 if (OldSize) {
1115 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1116 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1117 NewNumBuckets = 64;
1118 }
1119 if ((Small && NewNumBuckets <= InlineBuckets) ||
1120 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1121 this->BaseT::initEmpty();
1122 return;
1123 }
1124
1125 deallocateBuckets();
1126 init(NewNumBuckets);
1127 }
1128
1129private:
1130 unsigned getNumEntries() const { return NumEntries; }
1131
1132 void setNumEntries(unsigned Num) {
1133 // NumEntries is hardcoded to be 31 bits wide.
1134 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1135 NumEntries = Num;
1136 }
1137
1138 unsigned getNumTombstones() const { return NumTombstones; }
1139
1140 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
1141
1142 const BucketT *getInlineBuckets() const {
1143 assert(Small);
1144 // Note that this cast does not violate aliasing rules as we assert that
1145 // the memory's dynamic type is the small, inline bucket buffer, and the
1146 // 'storage' is a POD containing a char buffer.
1147 return reinterpret_cast<const BucketT *>(&storage);
1148 }
1149
1150 BucketT *getInlineBuckets() {
1151 return const_cast<BucketT *>(
1152 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1153 }
1154
1155 const LargeRep *getLargeRep() const {
1156 assert(!Small);
1157 // Note, same rule about aliasing as with getInlineBuckets.
1158 return reinterpret_cast<const LargeRep *>(&storage);
1159 }
1160
1161 LargeRep *getLargeRep() {
1162 return const_cast<LargeRep *>(
1163 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1164 }
1165
1166 const BucketT *getBuckets() const {
1167 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1168 }
1169
1170 BucketT *getBuckets() {
1171 return const_cast<BucketT *>(
1172 const_cast<const SmallDenseMap *>(this)->getBuckets());
1173 }
1174
1175 unsigned getNumBuckets() const {
1176 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1177 }
1178
1179 iterator_range<BucketT *> inlineBuckets() {
1180 BucketT *Begin = getInlineBuckets();
1181 return llvm::make_range(Begin, Begin + InlineBuckets);
1182 }
1183
1184 void deallocateBuckets() {
1185 if (Small)
1186 return;
1187
1188 deallocate_buffer(getLargeRep()->Buckets,
1189 sizeof(BucketT) * getLargeRep()->NumBuckets,
1190 alignof(BucketT));
1191 getLargeRep()->~LargeRep();
1192 }
1193
1194 LargeRep allocateBuckets(unsigned Num) {
1195 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1196 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1197 sizeof(BucketT) * Num, alignof(BucketT))),
1198 Num};
1199 return Rep;
1200 }
1201};
1202
1203template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1204 bool IsConst>
1206 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1207 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1208
1209public:
1211 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1214 using iterator_category = std::forward_iterator_tag;
1215
1216private:
1217 pointer Ptr = nullptr;
1218 pointer End = nullptr;
1219
1220public:
1221 DenseMapIterator() = default;
1222
1224 bool NoAdvance = false)
1225 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1226 assert(isHandleInSync() && "invalid construction!");
1227
1228 if (NoAdvance)
1229 return;
1230 if (shouldReverseIterate<KeyT>()) {
1231 RetreatPastEmptyBuckets();
1232 return;
1233 }
1234 AdvancePastEmptyBuckets();
1235 }
1236
1237 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1238 // for const iterator destinations so it doesn't end up as a user defined copy
1239 // constructor.
1240 template <bool IsConstSrc,
1241 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1245
1247 assert(isHandleInSync() && "invalid iterator access!");
1248 assert(Ptr != End && "dereferencing end() iterator");
1249 if (shouldReverseIterate<KeyT>())
1250 return Ptr[-1];
1251 return *Ptr;
1252 }
1254 assert(isHandleInSync() && "invalid iterator access!");
1255 assert(Ptr != End && "dereferencing end() iterator");
1256 if (shouldReverseIterate<KeyT>())
1257 return &(Ptr[-1]);
1258 return Ptr;
1259 }
1260
1261 friend bool operator==(const DenseMapIterator &LHS,
1262 const DenseMapIterator &RHS) {
1263 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1264 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1265 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1266 "comparing incomparable iterators!");
1267 return LHS.Ptr == RHS.Ptr;
1268 }
1269
1270 friend bool operator!=(const DenseMapIterator &LHS,
1271 const DenseMapIterator &RHS) {
1272 return !(LHS == RHS);
1273 }
1274
1275 inline DenseMapIterator &operator++() { // Preincrement
1276 assert(isHandleInSync() && "invalid iterator access!");
1277 assert(Ptr != End && "incrementing end() iterator");
1278 if (shouldReverseIterate<KeyT>()) {
1279 --Ptr;
1280 RetreatPastEmptyBuckets();
1281 return *this;
1282 }
1283 ++Ptr;
1284 AdvancePastEmptyBuckets();
1285 return *this;
1286 }
1287 DenseMapIterator operator++(int) { // Postincrement
1288 assert(isHandleInSync() && "invalid iterator access!");
1289 DenseMapIterator tmp = *this;
1290 ++*this;
1291 return tmp;
1292 }
1293
1294private:
1295 void AdvancePastEmptyBuckets() {
1296 assert(Ptr <= End);
1297 const KeyT Empty = KeyInfoT::getEmptyKey();
1298 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1299
1300 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1301 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1302 ++Ptr;
1303 }
1304
1305 void RetreatPastEmptyBuckets() {
1306 assert(Ptr >= End);
1307 const KeyT Empty = KeyInfoT::getEmptyKey();
1308 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1309
1310 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1311 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1312 --Ptr;
1313 }
1314};
1315
1316template <typename KeyT, typename ValueT, typename KeyInfoT>
1318 return X.getMemorySize();
1319}
1320
1321} // end namespace llvm
1322
1323#endif // LLVM_ADT_DENSEMAP_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:336
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:335
This file defines DenseMapInfo traits for DenseMap.
bool End
Definition: ELF_riscv.cpp:480
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define I(x, y, z)
Definition: MD5.cpp:58
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define P(N)
This file contains some templates that are useful if you are working with the STL at all.
This file contains library features backported from future STL versions.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Value * RHS
Value * LHS
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:203
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:177
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:449
static const KeyT getEmptyKey()
Definition: DenseMap.h:458
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition: DenseMap.h:245
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:237
bool erase(const KeyT &Val)
Definition: DenseMap.h:319
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:74
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:263
DenseMapBase()=default
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:193
const_iterator end() const
Definition: DenseMap.h:97
void moveFromOldBuckets(iterator_range< BucketT * > OldBuckets)
Definition: DenseMap.h:396
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:185
unsigned size() const
Definition: DenseMap.h:120
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:178
bool empty() const
Definition: DenseMap.h:119
std::pair< iterator, bool > emplace_or_assign(const KeyT &Key, Ts &&...Args)
Definition: DenseMap.h:304
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:277
iterator begin()
Definition: DenseMap.h:78
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:173
iterator end()
Definition: DenseMap.h:87
static const KeyT getTombstoneKey()
Definition: DenseMap.h:464
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
Definition: DenseMap.h:221
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition: DenseMap.h:349
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:422
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:168
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition: DenseMap.h:253
const_iterator begin() const
Definition: DenseMap.h:90
std::pair< iterator, bool > emplace_or_assign(KeyT &&Key, Ts &&...Args)
Definition: DenseMap.h:312
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition: DenseMap.h:283
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:356
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
Definition: DenseMap.h:296
ValueT lookup_or(const_arg_type_t< KeyT > Val, U &&Default) const
Definition: DenseMap.h:213
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:387
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:454
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:338
BucketT value_type
Definition: DenseMap.h:72
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition: DenseMap.h:76
auto keys() const
Definition: DenseMap.h:111
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:230
void erase(iterator I)
Definition: DenseMap.h:330
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition: DenseMap.h:288
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:124
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:342
auto values() const
Definition: DenseMap.h:115
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:689
std::conditional_t< IsConst, const Bucket, Bucket > value_type
Definition: DenseMap.h:1211
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1270
value_type * pointer
Definition: DenseMap.h:1212
DenseMapIterator & operator++()
Definition: DenseMap.h:1275
pointer operator->() const
Definition: DenseMap.h:1253
reference operator*() const
Definition: DenseMap.h:1246
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
Definition: DenseMap.h:1223
DenseMapIterator operator++(int)
Definition: DenseMap.h:1287
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1242
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1261
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1214
value_type & reference
Definition: DenseMap.h:1213
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:766
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:797
void shrink_and_clear()
Definition: DenseMap.h:828
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:789
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
Definition: DenseMap.h:745
void grow(unsigned AtLeast)
Definition: DenseMap.h:808
DenseMap(llvm::from_range_t, const RangeT &Range)
Definition: DenseMap.h:763
DenseMap(const DenseMap &other)
Definition: DenseMap.h:747
void swap(DenseMap &RHS)
Definition: DenseMap.h:774
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:757
DenseMap(DenseMap &&other)
Definition: DenseMap.h:752
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:783
void grow(unsigned AtLeast)
Definition: DenseMap.h:1055
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:933
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:950
void init(unsigned InitBuckets)
Definition: DenseMap.h:1046
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:1027
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:1021
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:916
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:942
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:927
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:922
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:1035
SmallDenseMap(llvm::from_range_t, const RangeT &Range)
Definition: DenseMap.h:939
void shrink_and_clear()
Definition: DenseMap.h:1108
A range adaptor for a pair of iterators.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:349
constexpr auto adl_begin(RangeT &&range) -> decltype(adl_detail::begin_impl(std::forward< RangeT >(range)))
Returns the begin iterator to range using std::begin and function found through Argument-Dependent Lo...
Definition: ADL.h:78
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition: BitVector.h:833
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2113
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
constexpr auto adl_end(RangeT &&range) -> decltype(adl_detail::end_impl(std::forward< RangeT >(range)))
Returns the end iterator to range using std::end and functions found through Argument-Dependent Looku...
Definition: ADL.h:86
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition: bit.h:295
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
auto map_range(ContainerTy &&C, FuncTy F)
Definition: STLExtras.h:386
LLVM_ABI LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:15
LLVM_ABI void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:27
@ Default
The result values are uniform if and only if all operands are uniform.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:378
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:856
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:858
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:22
const ValueT & getSecond() const
Definition: DenseMap.h:51
const KeyT & getFirst() const
Definition: DenseMap.h:49