LLVM 22.0.0git
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/ADL.h"
20#include "llvm/ADT/STLExtras.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstring>
32#include <initializer_list>
33#include <iterator>
34#include <new>
35#include <type_traits>
36#include <utility>
37
38namespace llvm {
39
40namespace detail {
41
42// We extend a pair to allow users to override the bucket type with their own
43// implementation without requiring two members.
44template <typename KeyT, typename ValueT>
45struct DenseMapPair : public std::pair<KeyT, ValueT> {
46 using std::pair<KeyT, ValueT>::pair;
47
48 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
49 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
50 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
51 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
52};
53
54} // end namespace detail
55
56template <typename KeyT, typename ValueT,
57 typename KeyInfoT = DenseMapInfo<KeyT>,
59 bool IsConst = false>
60class DenseMapIterator;
61
62template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
63 typename BucketT>
65 template <typename T>
66 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
67
68public:
70 using key_type = KeyT;
72 using value_type = BucketT;
73
77
78 inline iterator begin() {
79 return iterator::makeBegin(buckets(), empty(), *this);
80 }
81 inline iterator end() { return iterator::makeEnd(buckets(), *this); }
82 inline const_iterator begin() const {
83 return const_iterator::makeBegin(buckets(), empty(), *this);
84 }
85 inline const_iterator end() const {
86 return const_iterator::makeEnd(buckets(), *this);
87 }
88
89 // Return an iterator to iterate over keys in the map.
90 inline auto keys() {
91 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
92 }
93
94 // Return an iterator to iterate over values in the map.
95 inline auto values() {
96 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
97 }
98
99 inline auto keys() const {
100 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
101 }
102
103 inline auto values() const {
104 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
105 }
106
107 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
108 unsigned size() const { return getNumEntries(); }
109
110 /// Grow the densemap so that it can contain at least \p NumEntries items
111 /// before resizing again.
112 void reserve(size_type NumEntries) {
113 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
115 if (NumBuckets > getNumBuckets())
116 grow(NumBuckets);
117 }
118
119 void clear() {
121 if (getNumEntries() == 0 && getNumTombstones() == 0)
122 return;
123
124 // If the capacity of the array is huge, and the # elements used is small,
125 // shrink the array.
126 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
127 shrink_and_clear();
128 return;
129 }
130
131 const KeyT EmptyKey = getEmptyKey();
132 if constexpr (std::is_trivially_destructible_v<ValueT>) {
133 // Use a simpler loop when values don't need destruction.
134 for (BucketT &B : buckets())
135 B.getFirst() = EmptyKey;
136 } else {
137 const KeyT TombstoneKey = getTombstoneKey();
138 unsigned NumEntries = getNumEntries();
139 for (BucketT &B : buckets()) {
140 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey)) {
141 if (!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
142 B.getSecond().~ValueT();
143 --NumEntries;
144 }
145 B.getFirst() = EmptyKey;
146 }
147 }
148 assert(NumEntries == 0 && "Node count imbalance!");
149 (void)NumEntries;
150 }
151 setNumEntries(0);
152 setNumTombstones(0);
153 }
154
155 /// Return true if the specified key is in the map, false otherwise.
156 bool contains(const_arg_type_t<KeyT> Val) const {
157 return doFind(Val) != nullptr;
158 }
159
160 /// Return 1 if the specified key is in the map, 0 otherwise.
161 size_type count(const_arg_type_t<KeyT> Val) const {
162 return contains(Val) ? 1 : 0;
163 }
164
165 iterator find(const_arg_type_t<KeyT> Val) { return find_as(Val); }
166 const_iterator find(const_arg_type_t<KeyT> Val) const { return find_as(Val); }
167
168 /// Alternate version of find() which allows a different, and possibly
169 /// less expensive, key type.
170 /// The DenseMapInfo is responsible for supplying methods
171 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
172 /// type used.
173 template <class LookupKeyT> iterator find_as(const LookupKeyT &Val) {
174 if (BucketT *Bucket = doFind(Val))
175 return makeIterator(Bucket);
176 return end();
177 }
178 template <class LookupKeyT>
179 const_iterator find_as(const LookupKeyT &Val) const {
180 if (const BucketT *Bucket = doFind(Val))
181 return makeConstIterator(Bucket);
182 return end();
183 }
184
185 /// lookup - Return the entry for the specified key, or a default
186 /// constructed value if no such entry exists.
187 ValueT lookup(const_arg_type_t<KeyT> Val) const {
188 if (const BucketT *Bucket = doFind(Val))
189 return Bucket->getSecond();
190 return ValueT();
191 }
192
193 // Return the entry with the specified key, or \p Default. This variant is
194 // useful, because `lookup` cannot be used with non-default-constructible
195 // values.
196 template <typename U = std::remove_cv_t<ValueT>>
197 ValueT lookup_or(const_arg_type_t<KeyT> Val, U &&Default) const {
198 if (const BucketT *Bucket = doFind(Val))
199 return Bucket->getSecond();
200 return Default;
201 }
202
203 /// at - Return the entry for the specified key, or abort if no such
204 /// entry exists.
205 const ValueT &at(const_arg_type_t<KeyT> Val) const {
206 auto Iter = this->find(std::move(Val));
207 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
208 return Iter->second;
209 }
210
211 // Inserts key,value pair into the map if the key isn't already in the map.
212 // If the key is already in the map, it returns false and doesn't update the
213 // value.
214 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
215 return try_emplace_impl(KV.first, KV.second);
216 }
217
218 // Inserts key,value pair into the map if the key isn't already in the map.
219 // If the key is already in the map, it returns false and doesn't update the
220 // value.
221 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
222 return try_emplace_impl(std::move(KV.first), std::move(KV.second));
223 }
224
225 // Inserts key,value pair into the map if the key isn't already in the map.
226 // The value is constructed in-place if the key is not in the map, otherwise
227 // it is not moved.
228 template <typename... Ts>
229 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&...Args) {
230 return try_emplace_impl(std::move(Key), std::forward<Ts>(Args)...);
231 }
232
233 // Inserts key,value pair into the map if the key isn't already in the map.
234 // The value is constructed in-place if the key is not in the map, otherwise
235 // it is not moved.
236 template <typename... Ts>
237 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&...Args) {
238 return try_emplace_impl(Key, std::forward<Ts>(Args)...);
239 }
240
241 /// Alternate version of insert() which allows a different, and possibly
242 /// less expensive, key type.
243 /// The DenseMapInfo is responsible for supplying methods
244 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
245 /// type used.
246 template <typename LookupKeyT>
247 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
248 const LookupKeyT &Val) {
249 BucketT *TheBucket;
250 if (LookupBucketFor(Val, TheBucket))
251 return {makeIterator(TheBucket), false}; // Already in map.
252
253 // Otherwise, insert the new element.
254 TheBucket = findBucketForInsertion(Val, TheBucket);
255 TheBucket->getFirst() = std::move(KV.first);
256 ::new (&TheBucket->getSecond()) ValueT(std::move(KV.second));
257 return {makeIterator(TheBucket), true};
258 }
259
260 /// insert - Range insertion of pairs.
261 template <typename InputIt> void insert(InputIt I, InputIt E) {
262 for (; I != E; ++I)
263 insert(*I);
264 }
265
266 /// Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
267 template <typename Range> void insert_range(Range &&R) {
268 insert(adl_begin(R), adl_end(R));
269 }
270
271 template <typename V>
272 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
273 auto Ret = try_emplace(Key, std::forward<V>(Val));
274 if (!Ret.second)
275 Ret.first->second = std::forward<V>(Val);
276 return Ret;
277 }
278
279 template <typename V>
280 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
281 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
282 if (!Ret.second)
283 Ret.first->second = std::forward<V>(Val);
284 return Ret;
285 }
286
287 template <typename... Ts>
288 std::pair<iterator, bool> emplace_or_assign(const KeyT &Key, Ts &&...Args) {
289 auto Ret = try_emplace(Key, std::forward<Ts>(Args)...);
290 if (!Ret.second)
291 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
292 return Ret;
293 }
294
295 template <typename... Ts>
296 std::pair<iterator, bool> emplace_or_assign(KeyT &&Key, Ts &&...Args) {
297 auto Ret = try_emplace(std::move(Key), std::forward<Ts>(Args)...);
298 if (!Ret.second)
299 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
300 return Ret;
301 }
302
303 bool erase(const KeyT &Val) {
304 BucketT *TheBucket = doFind(Val);
305 if (!TheBucket)
306 return false; // not in map.
307
308 TheBucket->getSecond().~ValueT();
309 TheBucket->getFirst() = getTombstoneKey();
310 decrementNumEntries();
311 incrementNumTombstones();
312 return true;
313 }
315 BucketT *TheBucket = &*I;
316 TheBucket->getSecond().~ValueT();
317 TheBucket->getFirst() = getTombstoneKey();
318 decrementNumEntries();
319 incrementNumTombstones();
320 }
321
322 ValueT &operator[](const KeyT &Key) {
323 return lookupOrInsertIntoBucket(Key).first->second;
324 }
325
327 return lookupOrInsertIntoBucket(std::move(Key)).first->second;
328 }
329
330 /// isPointerIntoBucketsArray - Return true if the specified pointer points
331 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
332 /// value in the DenseMap).
333 bool isPointerIntoBucketsArray(const void *Ptr) const {
334 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
335 }
336
337 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
338 /// array. In conjunction with the previous method, this can be used to
339 /// determine whether an insertion caused the DenseMap to reallocate.
340 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
341
342protected:
343 DenseMapBase() = default;
344
345 void destroyAll() {
346 if (getNumBuckets() == 0) // Nothing to do.
347 return;
348
349 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
350 for (BucketT &B : buckets()) {
351 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
352 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey))
353 B.getSecond().~ValueT();
354 B.getFirst().~KeyT();
355 }
356 }
357
358 void initEmpty() {
359 setNumEntries(0);
360 setNumTombstones(0);
361
362 assert((getNumBuckets() & (getNumBuckets() - 1)) == 0 &&
363 "# initial buckets must be a power of two!");
364 const KeyT EmptyKey = getEmptyKey();
365 for (BucketT &B : buckets())
366 ::new (&B.getFirst()) KeyT(EmptyKey);
367 }
368
369 /// Returns the number of buckets to allocate to ensure that the DenseMap can
370 /// accommodate \p NumEntries without need to grow().
371 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
372 // Ensure that "NumEntries * 4 < NumBuckets * 3"
373 if (NumEntries == 0)
374 return 0;
375 // +1 is required because of the strict equality.
376 // For example if NumEntries is 48, we need to return 401.
377 return NextPowerOf2(NumEntries * 4 / 3 + 1);
378 }
379
381 initEmpty();
382
383 // Insert all the old elements.
384 const KeyT EmptyKey = getEmptyKey();
385 const KeyT TombstoneKey = getTombstoneKey();
386 for (BucketT &B : OldBuckets) {
387 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
388 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
389 // Insert the key/value into the new table.
390 BucketT *DestBucket;
391 bool FoundVal = LookupBucketFor(B.getFirst(), DestBucket);
392 (void)FoundVal; // silence warning.
393 assert(!FoundVal && "Key already in new map?");
394 DestBucket->getFirst() = std::move(B.getFirst());
395 ::new (&DestBucket->getSecond()) ValueT(std::move(B.getSecond()));
396 incrementNumEntries();
397
398 // Free the value.
399 B.getSecond().~ValueT();
400 }
401 B.getFirst().~KeyT();
402 }
403 }
404
405 template <typename OtherBaseT>
408 assert(&other != this);
409 assert(getNumBuckets() == other.getNumBuckets());
410
411 setNumEntries(other.getNumEntries());
412 setNumTombstones(other.getNumTombstones());
413
414 BucketT *Buckets = getBuckets();
415 const BucketT *OtherBuckets = other.getBuckets();
416 const size_t NumBuckets = getNumBuckets();
417 if constexpr (std::is_trivially_copyable_v<KeyT> &&
418 std::is_trivially_copyable_v<ValueT>) {
419 memcpy(reinterpret_cast<void *>(Buckets), OtherBuckets,
420 NumBuckets * sizeof(BucketT));
421 } else {
422 const KeyT EmptyKey = getEmptyKey();
423 const KeyT TombstoneKey = getTombstoneKey();
424 for (size_t I = 0; I < NumBuckets; ++I) {
425 ::new (&Buckets[I].getFirst()) KeyT(OtherBuckets[I].getFirst());
426 if (!KeyInfoT::isEqual(Buckets[I].getFirst(), EmptyKey) &&
427 !KeyInfoT::isEqual(Buckets[I].getFirst(), TombstoneKey))
428 ::new (&Buckets[I].getSecond()) ValueT(OtherBuckets[I].getSecond());
429 }
430 }
431 }
432
433 static unsigned getHashValue(const KeyT &Val) {
434 return KeyInfoT::getHashValue(Val);
435 }
436
437 template <typename LookupKeyT>
438 static unsigned getHashValue(const LookupKeyT &Val) {
439 return KeyInfoT::getHashValue(Val);
440 }
441
442 static const KeyT getEmptyKey() {
443 static_assert(std::is_base_of_v<DenseMapBase, DerivedT>,
444 "Must pass the derived type to this template!");
445 return KeyInfoT::getEmptyKey();
446 }
447
448 static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
449
450private:
451 template <typename KeyArgT, typename... Ts>
452 std::pair<BucketT *, bool> lookupOrInsertIntoBucket(KeyArgT &&Key,
453 Ts &&...Args) {
454 BucketT *TheBucket = nullptr;
455 if (LookupBucketFor(Key, TheBucket))
456 return {TheBucket, false}; // Already in the map.
457
458 // Otherwise, insert the new element.
459 TheBucket = findBucketForInsertion(Key, TheBucket);
460 TheBucket->getFirst() = std::forward<KeyArgT>(Key);
461 ::new (&TheBucket->getSecond()) ValueT(std::forward<Ts>(Args)...);
462 return {TheBucket, true};
463 }
464
465 template <typename KeyArgT, typename... Ts>
466 std::pair<iterator, bool> try_emplace_impl(KeyArgT &&Key, Ts &&...Args) {
467 auto [Bucket, Inserted] = lookupOrInsertIntoBucket(
468 std::forward<KeyArgT>(Key), std::forward<Ts>(Args)...);
469 return {makeIterator(Bucket), Inserted};
470 }
471
472 iterator makeIterator(BucketT *TheBucket) {
473 return iterator::makeIterator(TheBucket, buckets(), *this);
474 }
475
476 const_iterator makeConstIterator(const BucketT *TheBucket) const {
477 return const_iterator::makeIterator(TheBucket, buckets(), *this);
478 }
479
480 unsigned getNumEntries() const {
481 return static_cast<const DerivedT *>(this)->getNumEntries();
482 }
483
484 void setNumEntries(unsigned Num) {
485 static_cast<DerivedT *>(this)->setNumEntries(Num);
486 }
487
488 void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
489
490 void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
491
492 unsigned getNumTombstones() const {
493 return static_cast<const DerivedT *>(this)->getNumTombstones();
494 }
495
496 void setNumTombstones(unsigned Num) {
497 static_cast<DerivedT *>(this)->setNumTombstones(Num);
498 }
499
500 void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
501
502 void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
503
504 const BucketT *getBuckets() const {
505 return static_cast<const DerivedT *>(this)->getBuckets();
506 }
507
508 BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
509
510 unsigned getNumBuckets() const {
511 return static_cast<const DerivedT *>(this)->getNumBuckets();
512 }
513
514 BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
515
516 const BucketT *getBucketsEnd() const {
517 return getBuckets() + getNumBuckets();
518 }
519
520 iterator_range<BucketT *> buckets() {
521 return llvm::make_range(getBuckets(), getBucketsEnd());
522 }
523
524 iterator_range<const BucketT *> buckets() const {
525 return llvm::make_range(getBuckets(), getBucketsEnd());
526 }
527
528 void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
529
530 void shrink_and_clear() { static_cast<DerivedT *>(this)->shrink_and_clear(); }
531
532 template <typename LookupKeyT>
533 BucketT *findBucketForInsertion(const LookupKeyT &Lookup,
534 BucketT *TheBucket) {
536
537 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
538 // the buckets are empty (meaning that many are filled with tombstones),
539 // grow the table.
540 //
541 // The later case is tricky. For example, if we had one empty bucket with
542 // tons of tombstones, failing lookups (e.g. for insertion) would have to
543 // probe almost the entire table until it found the empty bucket. If the
544 // table completely filled with tombstones, no lookup would ever succeed,
545 // causing infinite loops in lookup.
546 unsigned NewNumEntries = getNumEntries() + 1;
547 unsigned NumBuckets = getNumBuckets();
548 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
549 this->grow(NumBuckets * 2);
550 LookupBucketFor(Lookup, TheBucket);
551 } else if (LLVM_UNLIKELY(NumBuckets -
552 (NewNumEntries + getNumTombstones()) <=
553 NumBuckets / 8)) {
554 this->grow(NumBuckets);
555 LookupBucketFor(Lookup, TheBucket);
556 }
557 assert(TheBucket);
558
559 // Only update the state after we've grown our bucket space appropriately
560 // so that when growing buckets we have self-consistent entry count.
561 incrementNumEntries();
562
563 // If we are writing over a tombstone, remember this.
564 const KeyT EmptyKey = getEmptyKey();
565 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
566 decrementNumTombstones();
567
568 return TheBucket;
569 }
570
571 template <typename LookupKeyT>
572 const BucketT *doFind(const LookupKeyT &Val) const {
573 const BucketT *BucketsPtr = getBuckets();
574 const unsigned NumBuckets = getNumBuckets();
575 if (NumBuckets == 0)
576 return nullptr;
577
578 const KeyT EmptyKey = getEmptyKey();
579 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
580 unsigned ProbeAmt = 1;
581 while (true) {
582 const BucketT *Bucket = BucketsPtr + BucketNo;
583 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
584 return Bucket;
585 if (LLVM_LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
586 return nullptr;
587
588 // Otherwise, it's a hash collision or a tombstone, continue quadratic
589 // probing.
590 BucketNo += ProbeAmt++;
591 BucketNo &= NumBuckets - 1;
592 }
593 }
594
595 template <typename LookupKeyT> BucketT *doFind(const LookupKeyT &Val) {
596 return const_cast<BucketT *>(
597 static_cast<const DenseMapBase *>(this)->doFind(Val));
598 }
599
600 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
601 /// FoundBucket. If the bucket contains the key and a value, this returns
602 /// true, otherwise it returns a bucket with an empty marker or tombstone and
603 /// returns false.
604 template <typename LookupKeyT>
605 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
606 BucketT *BucketsPtr = getBuckets();
607 const unsigned NumBuckets = getNumBuckets();
608
609 if (NumBuckets == 0) {
610 FoundBucket = nullptr;
611 return false;
612 }
613
614 // FoundTombstone - Keep track of whether we find a tombstone while probing.
615 BucketT *FoundTombstone = nullptr;
616 const KeyT EmptyKey = getEmptyKey();
617 const KeyT TombstoneKey = getTombstoneKey();
618 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
619 !KeyInfoT::isEqual(Val, TombstoneKey) &&
620 "Empty/Tombstone value shouldn't be inserted into map!");
621
622 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
623 unsigned ProbeAmt = 1;
624 while (true) {
625 BucketT *ThisBucket = BucketsPtr + BucketNo;
626 // Found Val's bucket? If so, return it.
627 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
628 FoundBucket = ThisBucket;
629 return true;
630 }
631
632 // If we found an empty bucket, the key doesn't exist in the set.
633 // Insert it and return the default value.
634 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
635 // If we've already seen a tombstone while probing, fill it in instead
636 // of the empty bucket we eventually probed to.
637 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
638 return false;
639 }
640
641 // If this is a tombstone, remember it. If Val ends up not in the map, we
642 // prefer to return it than something that would require more probing.
643 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
644 !FoundTombstone)
645 FoundTombstone = ThisBucket; // Remember the first tombstone found.
646
647 // Otherwise, it's a hash collision or a tombstone, continue quadratic
648 // probing.
649 BucketNo += ProbeAmt++;
650 BucketNo &= (NumBuckets - 1);
651 }
652 }
653
654public:
655 /// Return the approximate size (in bytes) of the actual map.
656 /// This is just the raw memory used by DenseMap.
657 /// If entries are pointers to objects, the size of the referenced objects
658 /// are not included.
659 size_t getMemorySize() const { return getNumBuckets() * sizeof(BucketT); }
660};
661
662/// Equality comparison for DenseMap.
663///
664/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
665/// is also in RHS, and that no additional pairs are in RHS.
666/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
667/// complexity is linear, worst case is O(N^2) (if every hash collides).
668template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
669 typename BucketT>
673 if (LHS.size() != RHS.size())
674 return false;
675
676 for (auto &KV : LHS) {
677 auto I = RHS.find(KV.first);
678 if (I == RHS.end() || I->second != KV.second)
679 return false;
680 }
681
682 return true;
683}
684
685/// Inequality comparison for DenseMap.
686///
687/// Equivalent to !(LHS == RHS). See operator== for performance notes.
688template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
689 typename BucketT>
693 return !(LHS == RHS);
694}
695
696template <typename KeyT, typename ValueT,
697 typename KeyInfoT = DenseMapInfo<KeyT>,
699class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
700 KeyT, ValueT, KeyInfoT, BucketT> {
701 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
702
703 // Lift some types from the dependent base class into this class for
704 // simplicity of referring to them.
706
707 BucketT *Buckets;
708 unsigned NumEntries;
709 unsigned NumTombstones;
710 unsigned NumBuckets;
711
712public:
713 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
714 /// this number of elements can be inserted in the map without grow()
715 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
716
717 DenseMap(const DenseMap &other) : BaseT() {
718 init(0);
719 copyFrom(other);
720 }
721
722 DenseMap(DenseMap &&other) : BaseT() {
723 init(0);
724 swap(other);
725 }
726
727 template <typename InputIt> DenseMap(const InputIt &I, const InputIt &E) {
728 init(std::distance(I, E));
729 this->insert(I, E);
730 }
731
732 template <typename RangeT>
735
736 DenseMap(std::initializer_list<typename BaseT::value_type> Vals)
737 : DenseMap(Vals.begin(), Vals.end()) {}
738
740 this->destroyAll();
741 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
742 }
743
745 this->incrementEpoch();
746 RHS.incrementEpoch();
747 std::swap(Buckets, RHS.Buckets);
748 std::swap(NumEntries, RHS.NumEntries);
749 std::swap(NumTombstones, RHS.NumTombstones);
750 std::swap(NumBuckets, RHS.NumBuckets);
751 }
752
753 DenseMap &operator=(const DenseMap &other) {
754 if (&other != this)
755 copyFrom(other);
756 return *this;
757 }
758
760 this->destroyAll();
761 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
762 init(0);
763 swap(other);
764 return *this;
765 }
766
767 void copyFrom(const DenseMap &other) {
768 this->destroyAll();
769 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
770 if (allocateBuckets(other.NumBuckets)) {
771 this->BaseT::copyFrom(other);
772 } else {
773 NumEntries = 0;
774 NumTombstones = 0;
775 }
776 }
777
778 void grow(unsigned AtLeast) {
779 unsigned OldNumBuckets = NumBuckets;
780 BucketT *OldBuckets = Buckets;
781
782 allocateBuckets(std::max<unsigned>(
783 64, static_cast<unsigned>(NextPowerOf2(AtLeast - 1))));
784 assert(Buckets);
785 if (!OldBuckets) {
786 this->BaseT::initEmpty();
787 return;
788 }
789
790 this->moveFromOldBuckets(
791 llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
792
793 // Free the old table.
794 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
795 alignof(BucketT));
796 }
797
799 unsigned OldNumBuckets = NumBuckets;
800 unsigned OldNumEntries = NumEntries;
801 this->destroyAll();
802
803 // Reduce the number of buckets.
804 unsigned NewNumBuckets = 0;
805 if (OldNumEntries)
806 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
807 if (NewNumBuckets == NumBuckets) {
808 this->BaseT::initEmpty();
809 return;
810 }
811
812 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
813 alignof(BucketT));
814 init(NewNumBuckets);
815 }
816
817private:
818 unsigned getNumEntries() const { return NumEntries; }
819
820 void setNumEntries(unsigned Num) { NumEntries = Num; }
821
822 unsigned getNumTombstones() const { return NumTombstones; }
823
824 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
825
826 BucketT *getBuckets() const { return Buckets; }
827
828 unsigned getNumBuckets() const { return NumBuckets; }
829
830 bool allocateBuckets(unsigned Num) {
831 NumBuckets = Num;
832 if (NumBuckets == 0) {
833 Buckets = nullptr;
834 return false;
835 }
836
837 Buckets = static_cast<BucketT *>(
838 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
839 return true;
840 }
841
842 void init(unsigned InitNumEntries) {
843 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
844 if (allocateBuckets(InitBuckets)) {
845 this->BaseT::initEmpty();
846 } else {
847 NumEntries = 0;
848 NumTombstones = 0;
849 }
850 }
851};
852
853template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
854 typename KeyInfoT = DenseMapInfo<KeyT>,
857 : public DenseMapBase<
858 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
859 ValueT, KeyInfoT, BucketT> {
860 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
861
862 // Lift some types from the dependent base class into this class for
863 // simplicity of referring to them.
865
866 static_assert(isPowerOf2_64(InlineBuckets),
867 "InlineBuckets must be a power of 2.");
868
869 unsigned Small : 1;
870 unsigned NumEntries : 31;
871 unsigned NumTombstones;
872
873 struct LargeRep {
874 BucketT *Buckets;
875 unsigned NumBuckets;
877 return llvm::make_range(Buckets, Buckets + NumBuckets);
878 }
879 };
880
881 /// A "union" of an inline bucket array and the struct representing
882 /// a large bucket. This union will be discriminated by the 'Small' bit.
883 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
884
885public:
886 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
887 if (NumInitBuckets > InlineBuckets)
888 NumInitBuckets = llvm::bit_ceil(NumInitBuckets);
889 init(NumInitBuckets);
890 }
891
893 init(0);
894 copyFrom(other);
895 }
896
898 init(0);
899 swap(other);
900 }
901
902 template <typename InputIt>
903 SmallDenseMap(const InputIt &I, const InputIt &E) {
904 init(NextPowerOf2(std::distance(I, E)));
905 this->insert(I, E);
906 }
907
908 template <typename RangeT>
911
912 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
913 : SmallDenseMap(Vals.begin(), Vals.end()) {}
914
916 this->destroyAll();
917 deallocateBuckets();
918 }
919
921 unsigned TmpNumEntries = RHS.NumEntries;
922 RHS.NumEntries = NumEntries;
923 NumEntries = TmpNumEntries;
924 std::swap(NumTombstones, RHS.NumTombstones);
925
926 const KeyT EmptyKey = this->getEmptyKey();
927 const KeyT TombstoneKey = this->getTombstoneKey();
928 if (Small && RHS.Small) {
929 // If we're swapping inline bucket arrays, we have to cope with some of
930 // the tricky bits of DenseMap's storage system: the buckets are not
931 // fully initialized. Thus we swap every key, but we may have
932 // a one-directional move of the value.
933 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
934 BucketT *LHSB = &getInlineBuckets()[i],
935 *RHSB = &RHS.getInlineBuckets()[i];
936 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
937 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
938 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
939 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
940 if (hasLHSValue && hasRHSValue) {
941 // Swap together if we can...
942 std::swap(*LHSB, *RHSB);
943 continue;
944 }
945 // Swap separately and handle any asymmetry.
946 std::swap(LHSB->getFirst(), RHSB->getFirst());
947 if (hasLHSValue) {
948 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
949 LHSB->getSecond().~ValueT();
950 } else if (hasRHSValue) {
951 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
952 RHSB->getSecond().~ValueT();
953 }
954 }
955 return;
956 }
957 if (!Small && !RHS.Small) {
958 std::swap(*getLargeRep(), *RHS.getLargeRep());
959 return;
960 }
961
962 SmallDenseMap &SmallSide = Small ? *this : RHS;
963 SmallDenseMap &LargeSide = Small ? RHS : *this;
964
965 // First stash the large side's rep and move the small side across.
966 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
967 LargeSide.getLargeRep()->~LargeRep();
968 LargeSide.Small = true;
969 // This is similar to the standard move-from-old-buckets, but the bucket
970 // count hasn't actually rotated in this case. So we have to carefully
971 // move construct the keys and values into their new locations, but there
972 // is no need to re-hash things.
973 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
974 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
975 *OldB = &SmallSide.getInlineBuckets()[i];
976 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
977 OldB->getFirst().~KeyT();
978 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
979 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
980 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
981 OldB->getSecond().~ValueT();
982 }
983 }
984
985 // The hard part of moving the small buckets across is done, just move
986 // the TmpRep into its new home.
987 SmallSide.Small = false;
988 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
989 }
990
992 if (&other != this)
993 copyFrom(other);
994 return *this;
995 }
996
998 this->destroyAll();
999 deallocateBuckets();
1000 init(0);
1001 swap(other);
1002 return *this;
1003 }
1004
1005 void copyFrom(const SmallDenseMap &other) {
1006 this->destroyAll();
1007 deallocateBuckets();
1008 Small = true;
1009 if (other.getNumBuckets() > InlineBuckets) {
1010 Small = false;
1011 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1012 }
1013 this->BaseT::copyFrom(other);
1014 }
1015
1016 void init(unsigned InitBuckets) {
1017 Small = true;
1018 if (InitBuckets > InlineBuckets) {
1019 Small = false;
1020 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1021 }
1022 this->BaseT::initEmpty();
1023 }
1024
1025 void grow(unsigned AtLeast) {
1026 if (AtLeast > InlineBuckets)
1027 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
1028
1029 if (Small) {
1030 // First move the inline buckets into a temporary storage.
1032 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1033 BucketT *TmpEnd = TmpBegin;
1034
1035 // Loop over the buckets, moving non-empty, non-tombstones into the
1036 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1037 const KeyT EmptyKey = this->getEmptyKey();
1038 const KeyT TombstoneKey = this->getTombstoneKey();
1039 for (BucketT &B : inlineBuckets()) {
1040 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
1041 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
1042 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1043 "Too many inline buckets!");
1044 ::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
1045 ::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
1046 ++TmpEnd;
1047 B.getSecond().~ValueT();
1048 }
1049 B.getFirst().~KeyT();
1050 }
1051
1052 // AtLeast == InlineBuckets can happen if there are many tombstones,
1053 // and grow() is used to remove them. Usually we always switch to the
1054 // large rep here.
1055 if (AtLeast > InlineBuckets) {
1056 Small = false;
1057 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1058 }
1059 this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
1060 return;
1061 }
1062
1063 LargeRep OldRep = std::move(*getLargeRep());
1064 getLargeRep()->~LargeRep();
1065 if (AtLeast <= InlineBuckets) {
1066 Small = true;
1067 } else {
1068 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1069 }
1070
1071 this->moveFromOldBuckets(OldRep.buckets());
1072
1073 // Free the old table.
1074 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1075 alignof(BucketT));
1076 }
1077
1079 unsigned OldSize = this->size();
1080 this->destroyAll();
1081
1082 // Reduce the number of buckets.
1083 unsigned NewNumBuckets = 0;
1084 if (OldSize) {
1085 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1086 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1087 NewNumBuckets = 64;
1088 }
1089 if ((Small && NewNumBuckets <= InlineBuckets) ||
1090 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1091 this->BaseT::initEmpty();
1092 return;
1093 }
1094
1095 deallocateBuckets();
1096 init(NewNumBuckets);
1097 }
1098
1099private:
1100 unsigned getNumEntries() const { return NumEntries; }
1101
1102 void setNumEntries(unsigned Num) {
1103 // NumEntries is hardcoded to be 31 bits wide.
1104 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1105 NumEntries = Num;
1106 }
1107
1108 unsigned getNumTombstones() const { return NumTombstones; }
1109
1110 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
1111
1112 const BucketT *getInlineBuckets() const {
1113 assert(Small);
1114 // Note that this cast does not violate aliasing rules as we assert that
1115 // the memory's dynamic type is the small, inline bucket buffer, and the
1116 // 'storage' is a POD containing a char buffer.
1117 return reinterpret_cast<const BucketT *>(&storage);
1118 }
1119
1120 BucketT *getInlineBuckets() {
1121 return const_cast<BucketT *>(
1122 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1123 }
1124
1125 const LargeRep *getLargeRep() const {
1126 assert(!Small);
1127 // Note, same rule about aliasing as with getInlineBuckets.
1128 return reinterpret_cast<const LargeRep *>(&storage);
1129 }
1130
1131 LargeRep *getLargeRep() {
1132 return const_cast<LargeRep *>(
1133 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1134 }
1135
1136 const BucketT *getBuckets() const {
1137 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1138 }
1139
1140 BucketT *getBuckets() {
1141 return const_cast<BucketT *>(
1142 const_cast<const SmallDenseMap *>(this)->getBuckets());
1143 }
1144
1145 unsigned getNumBuckets() const {
1146 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1147 }
1148
1149 iterator_range<BucketT *> inlineBuckets() {
1150 BucketT *Begin = getInlineBuckets();
1151 return llvm::make_range(Begin, Begin + InlineBuckets);
1152 }
1153
1154 void deallocateBuckets() {
1155 if (Small)
1156 return;
1157
1158 deallocate_buffer(getLargeRep()->Buckets,
1159 sizeof(BucketT) * getLargeRep()->NumBuckets,
1160 alignof(BucketT));
1161 getLargeRep()->~LargeRep();
1162 }
1163
1164 LargeRep allocateBuckets(unsigned Num) {
1165 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1166 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1167 sizeof(BucketT) * Num, alignof(BucketT))),
1168 Num};
1169 return Rep;
1170 }
1171};
1172
1173template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1174 bool IsConst>
1176 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1177 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1178
1179public:
1181 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1184 using iterator_category = std::forward_iterator_tag;
1185
1186private:
1187 pointer Ptr = nullptr;
1188 pointer End = nullptr;
1189
1190 DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch)
1191 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1192 assert(isHandleInSync() && "invalid construction!");
1193 }
1194
1195public:
1196 DenseMapIterator() = default;
1197
1199 bool IsEmpty, const DebugEpochBase &Epoch) {
1200 // When the map is empty, avoid the overhead of advancing/retreating past
1201 // empty buckets.
1202 if (IsEmpty)
1203 return makeEnd(Buckets, Epoch);
1204 if (shouldReverseIterate<KeyT>()) {
1205 DenseMapIterator Iter(Buckets.end(), Buckets.begin(), Epoch);
1206 Iter.RetreatPastEmptyBuckets();
1207 return Iter;
1208 }
1209 DenseMapIterator Iter(Buckets.begin(), Buckets.end(), Epoch);
1210 Iter.AdvancePastEmptyBuckets();
1211 return Iter;
1212 }
1213
1215 const DebugEpochBase &Epoch) {
1216 if (shouldReverseIterate<KeyT>())
1217 return DenseMapIterator(Buckets.begin(), Buckets.begin(), Epoch);
1218 return DenseMapIterator(Buckets.end(), Buckets.end(), Epoch);
1219 }
1220
1223 const DebugEpochBase &Epoch) {
1224 if (shouldReverseIterate<KeyT>())
1225 return DenseMapIterator(P + 1, Buckets.begin(), Epoch);
1226 return DenseMapIterator(P, Buckets.end(), Epoch);
1227 }
1228
1229 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1230 // for const iterator destinations so it doesn't end up as a user defined copy
1231 // constructor.
1232 template <bool IsConstSrc,
1233 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1237
1239 assert(isHandleInSync() && "invalid iterator access!");
1240 assert(Ptr != End && "dereferencing end() iterator");
1241 if (shouldReverseIterate<KeyT>())
1242 return Ptr[-1];
1243 return *Ptr;
1244 }
1246 assert(isHandleInSync() && "invalid iterator access!");
1247 assert(Ptr != End && "dereferencing end() iterator");
1248 if (shouldReverseIterate<KeyT>())
1249 return &(Ptr[-1]);
1250 return Ptr;
1251 }
1252
1253 friend bool operator==(const DenseMapIterator &LHS,
1254 const DenseMapIterator &RHS) {
1255 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
1256 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1257 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1258 "comparing incomparable iterators!");
1259 return LHS.Ptr == RHS.Ptr;
1260 }
1261
1262 friend bool operator!=(const DenseMapIterator &LHS,
1263 const DenseMapIterator &RHS) {
1264 return !(LHS == RHS);
1265 }
1266
1267 inline DenseMapIterator &operator++() { // Preincrement
1268 assert(isHandleInSync() && "invalid iterator access!");
1269 assert(Ptr != End && "incrementing end() iterator");
1270 if (shouldReverseIterate<KeyT>()) {
1271 --Ptr;
1272 RetreatPastEmptyBuckets();
1273 return *this;
1274 }
1275 ++Ptr;
1276 AdvancePastEmptyBuckets();
1277 return *this;
1278 }
1279 DenseMapIterator operator++(int) { // Postincrement
1280 assert(isHandleInSync() && "invalid iterator access!");
1281 DenseMapIterator tmp = *this;
1282 ++*this;
1283 return tmp;
1284 }
1285
1286private:
1287 void AdvancePastEmptyBuckets() {
1288 assert(Ptr <= End);
1289 const KeyT Empty = KeyInfoT::getEmptyKey();
1290 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1291
1292 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1293 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1294 ++Ptr;
1295 }
1296
1297 void RetreatPastEmptyBuckets() {
1298 assert(Ptr >= End);
1299 const KeyT Empty = KeyInfoT::getEmptyKey();
1300 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1301
1302 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1303 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1304 --Ptr;
1305 }
1306};
1307
1308template <typename KeyT, typename ValueT, typename KeyInfoT>
1310 return X.getMemorySize();
1311}
1312
1313} // end namespace llvm
1314
1315#endif // LLVM_ADT_DENSEMAP_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition: Compiler.h:336
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:335
This file defines DenseMapInfo traits for DenseMap.
bool End
Definition: ELF_riscv.cpp:480
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define I(x, y, z)
Definition: MD5.cpp:58
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define P(N)
This file contains some templates that are useful if you are working with the STL at all.
This file contains library features backported from future STL versions.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Value * RHS
Value * LHS
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: DenseMap.h:187
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:165
static unsigned getHashValue(const KeyT &Val)
Definition: DenseMap.h:433
static const KeyT getEmptyKey()
Definition: DenseMap.h:442
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition: DenseMap.h:229
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition: DenseMap.h:221
bool erase(const KeyT &Val)
Definition: DenseMap.h:303
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition: DenseMap.h:74
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:247
DenseMapBase()=default
const_iterator find_as(const LookupKeyT &Val) const
Definition: DenseMap.h:179
const_iterator end() const
Definition: DenseMap.h:85
void moveFromOldBuckets(iterator_range< BucketT * > OldBuckets)
Definition: DenseMap.h:380
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:173
unsigned size() const
Definition: DenseMap.h:108
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition: DenseMap.h:166
bool empty() const
Definition: DenseMap.h:107
std::pair< iterator, bool > emplace_or_assign(const KeyT &Key, Ts &&...Args)
Definition: DenseMap.h:288
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition: DenseMap.h:261
iterator begin()
Definition: DenseMap.h:78
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:161
iterator end()
Definition: DenseMap.h:81
static const KeyT getTombstoneKey()
Definition: DenseMap.h:448
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
Definition: DenseMap.h:205
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition: DenseMap.h:333
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition: DenseMap.h:406
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition: DenseMap.h:156
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition: DenseMap.h:237
const_iterator begin() const
Definition: DenseMap.h:82
std::pair< iterator, bool > emplace_or_assign(KeyT &&Key, Ts &&...Args)
Definition: DenseMap.h:296
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition: DenseMap.h:267
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition: DenseMap.h:340
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
Definition: DenseMap.h:280
ValueT lookup_or(const_arg_type_t< KeyT > Val, U &&Default) const
Definition: DenseMap.h:197
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition: DenseMap.h:371
static unsigned getHashValue(const LookupKeyT &Val)
Definition: DenseMap.h:438
ValueT & operator[](const KeyT &Key)
Definition: DenseMap.h:322
BucketT value_type
Definition: DenseMap.h:72
auto keys() const
Definition: DenseMap.h:99
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:214
void erase(iterator I)
Definition: DenseMap.h:314
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition: DenseMap.h:272
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition: DenseMap.h:112
ValueT & operator[](KeyT &&Key)
Definition: DenseMap.h:326
auto values() const
Definition: DenseMap.h:103
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition: DenseMap.h:659
std::conditional_t< IsConst, const Bucket, Bucket > value_type
Definition: DenseMap.h:1181
static DenseMapIterator makeIterator(pointer P, iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition: DenseMap.h:1221
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1262
value_type * pointer
Definition: DenseMap.h:1182
DenseMapIterator & operator++()
Definition: DenseMap.h:1267
pointer operator->() const
Definition: DenseMap.h:1245
reference operator*() const
Definition: DenseMap.h:1238
static DenseMapIterator makeBegin(iterator_range< pointer > Buckets, bool IsEmpty, const DebugEpochBase &Epoch)
Definition: DenseMap.h:1198
DenseMapIterator operator++(int)
Definition: DenseMap.h:1279
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition: DenseMap.h:1234
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition: DenseMap.h:1253
static DenseMapIterator makeEnd(iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition: DenseMap.h:1214
std::forward_iterator_tag iterator_category
Definition: DenseMap.h:1184
value_type & reference
Definition: DenseMap.h:1183
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:736
void copyFrom(const DenseMap &other)
Definition: DenseMap.h:767
void shrink_and_clear()
Definition: DenseMap.h:798
DenseMap & operator=(DenseMap &&other)
Definition: DenseMap.h:759
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
Definition: DenseMap.h:715
void grow(unsigned AtLeast)
Definition: DenseMap.h:778
DenseMap(llvm::from_range_t, const RangeT &Range)
Definition: DenseMap.h:733
DenseMap(const DenseMap &other)
Definition: DenseMap.h:717
void swap(DenseMap &RHS)
Definition: DenseMap.h:744
DenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:727
DenseMap(DenseMap &&other)
Definition: DenseMap.h:722
DenseMap & operator=(const DenseMap &other)
Definition: DenseMap.h:753
void grow(unsigned AtLeast)
Definition: DenseMap.h:1025
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition: DenseMap.h:903
void swap(SmallDenseMap &RHS)
Definition: DenseMap.h:920
void init(unsigned InitBuckets)
Definition: DenseMap.h:1016
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition: DenseMap.h:997
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition: DenseMap.h:991
SmallDenseMap(unsigned NumInitBuckets=0)
Definition: DenseMap.h:886
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition: DenseMap.h:912
SmallDenseMap(SmallDenseMap &&other)
Definition: DenseMap.h:897
SmallDenseMap(const SmallDenseMap &other)
Definition: DenseMap.h:892
void copyFrom(const SmallDenseMap &other)
Definition: DenseMap.h:1005
SmallDenseMap(llvm::from_range_t, const RangeT &Range)
Definition: DenseMap.h:909
void shrink_and_clear()
Definition: DenseMap.h:1078
A range adaptor for a pair of iterators.
IteratorT end() const
IteratorT begin() const
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:349
constexpr auto adl_begin(RangeT &&range) -> decltype(adl_detail::begin_impl(std::forward< RangeT >(range)))
Returns the begin iterator to range using std::begin and function found through Argument-Dependent Lo...
Definition: ADL.h:78
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition: BitVector.h:828
bool operator!=(uint64_t V1, const APInt &V2)
Definition: APInt.h:2113
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:293
constexpr auto adl_end(RangeT &&range) -> decltype(adl_detail::end_impl(std::forward< RangeT >(range)))
Returns the end iterator to range using std::end and functions found through Argument-Dependent Looku...
Definition: ADL.h:86
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition: bit.h:295
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
auto map_range(ContainerTy &&C, FuncTy F)
Definition: STLExtras.h:386
LLVM_ABI LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:15
LLVM_ABI void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition: MemAlloc.cpp:27
@ Default
The result values are uniform if and only if all operands are uniform.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition: MathExtras.h:378
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:851
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:853
A suitably aligned and sized character array member which can hold elements of any type.
Definition: AlignOf.h:22
const ValueT & getSecond() const
Definition: DenseMap.h:51
const KeyT & getFirst() const
Definition: DenseMap.h:49