LLVM 22.0.0git
DenseMap.h
Go to the documentation of this file.
1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/ADL.h"
20#include "llvm/ADT/STLExtras.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstring>
32#include <initializer_list>
33#include <iterator>
34#include <new>
35#include <type_traits>
36#include <utility>
37
38namespace llvm {
39
40namespace detail {
41
42// We extend a pair to allow users to override the bucket type with their own
43// implementation without requiring two members.
44template <typename KeyT, typename ValueT>
45struct DenseMapPair : public std::pair<KeyT, ValueT> {
46 using std::pair<KeyT, ValueT>::pair;
47
48 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
49 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
50 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
51 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
52};
53
54} // end namespace detail
55
56template <typename KeyT, typename ValueT,
57 typename KeyInfoT = DenseMapInfo<KeyT>,
59 bool IsConst = false>
60class DenseMapIterator;
61
62template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
63 typename BucketT>
65 template <typename T>
66 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
67
68public:
70 using key_type = KeyT;
71 using mapped_type = ValueT;
72 using value_type = BucketT;
73
77
78 [[nodiscard]] inline iterator begin() {
79 return iterator::makeBegin(buckets(), empty(), *this);
80 }
81 [[nodiscard]] inline iterator end() {
82 return iterator::makeEnd(buckets(), *this);
83 }
84 [[nodiscard]] inline const_iterator begin() const {
85 return const_iterator::makeBegin(buckets(), empty(), *this);
86 }
87 [[nodiscard]] inline const_iterator end() const {
88 return const_iterator::makeEnd(buckets(), *this);
89 }
90
91 // Return an iterator to iterate over keys in the map.
92 [[nodiscard]] inline auto keys() {
93 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
94 }
95
96 // Return an iterator to iterate over values in the map.
97 [[nodiscard]] inline auto values() {
98 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
99 }
100
101 [[nodiscard]] inline auto keys() const {
102 return map_range(*this, [](const BucketT &P) { return P.getFirst(); });
103 }
104
105 [[nodiscard]] inline auto values() const {
106 return map_range(*this, [](const BucketT &P) { return P.getSecond(); });
107 }
108
109 [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
110 [[nodiscard]] unsigned size() const { return getNumEntries(); }
111
112 /// Grow the densemap so that it can contain at least \p NumEntries items
113 /// before resizing again.
114 void reserve(size_type NumEntries) {
115 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
117 if (NumBuckets > getNumBuckets())
118 grow(NumBuckets);
119 }
120
121 void clear() {
123 if (getNumEntries() == 0 && getNumTombstones() == 0)
124 return;
125
126 // If the capacity of the array is huge, and the # elements used is small,
127 // shrink the array.
128 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
129 shrink_and_clear();
130 return;
131 }
132
133 const KeyT EmptyKey = getEmptyKey();
134 if constexpr (std::is_trivially_destructible_v<ValueT>) {
135 // Use a simpler loop when values don't need destruction.
136 for (BucketT &B : buckets())
137 B.getFirst() = EmptyKey;
138 } else {
139 const KeyT TombstoneKey = getTombstoneKey();
140 unsigned NumEntries = getNumEntries();
141 for (BucketT &B : buckets()) {
142 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey)) {
143 if (!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
144 B.getSecond().~ValueT();
145 --NumEntries;
146 }
147 B.getFirst() = EmptyKey;
148 }
149 }
150 assert(NumEntries == 0 && "Node count imbalance!");
151 (void)NumEntries;
152 }
153 setNumEntries(0);
154 setNumTombstones(0);
155 }
156
157 /// Return true if the specified key is in the map, false otherwise.
158 [[nodiscard]] bool contains(const_arg_type_t<KeyT> Val) const {
159 return doFind(Val) != nullptr;
160 }
161
162 /// Return 1 if the specified key is in the map, 0 otherwise.
163 [[nodiscard]] size_type count(const_arg_type_t<KeyT> Val) const {
164 return contains(Val) ? 1 : 0;
165 }
166
167 [[nodiscard]] iterator find(const_arg_type_t<KeyT> Val) {
168 return find_as(Val);
169 }
170 [[nodiscard]] const_iterator find(const_arg_type_t<KeyT> Val) const {
171 return find_as(Val);
172 }
173
174 /// Alternate version of find() which allows a different, and possibly
175 /// less expensive, key type.
176 /// The DenseMapInfo is responsible for supplying methods
177 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
178 /// type used.
179 template <class LookupKeyT>
180 [[nodiscard]] iterator find_as(const LookupKeyT &Val) {
181 if (BucketT *Bucket = doFind(Val))
182 return makeIterator(Bucket);
183 return end();
184 }
185 template <class LookupKeyT>
186 [[nodiscard]] const_iterator find_as(const LookupKeyT &Val) const {
187 if (const BucketT *Bucket = doFind(Val))
188 return makeConstIterator(Bucket);
189 return end();
190 }
191
192 /// lookup - Return the entry for the specified key, or a default
193 /// constructed value if no such entry exists.
194 [[nodiscard]] ValueT lookup(const_arg_type_t<KeyT> Val) const {
195 if (const BucketT *Bucket = doFind(Val))
196 return Bucket->getSecond();
197 return ValueT();
198 }
199
200 // Return the entry with the specified key, or \p Default. This variant is
201 // useful, because `lookup` cannot be used with non-default-constructible
202 // values.
203 template <typename U = std::remove_cv_t<ValueT>>
204 [[nodiscard]] ValueT lookup_or(const_arg_type_t<KeyT> Val,
205 U &&Default) const {
206 if (const BucketT *Bucket = doFind(Val))
207 return Bucket->getSecond();
208 return Default;
209 }
210
211 /// at - Return the entry for the specified key, or abort if no such
212 /// entry exists.
213 [[nodiscard]] const ValueT &at(const_arg_type_t<KeyT> Val) const {
214 auto Iter = this->find(std::move(Val));
215 assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
216 return Iter->second;
217 }
218
219 // Inserts key,value pair into the map if the key isn't already in the map.
220 // If the key is already in the map, it returns false and doesn't update the
221 // value.
222 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
223 return try_emplace_impl(KV.first, KV.second);
224 }
225
226 // Inserts key,value pair into the map if the key isn't already in the map.
227 // If the key is already in the map, it returns false and doesn't update the
228 // value.
229 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
230 return try_emplace_impl(std::move(KV.first), std::move(KV.second));
231 }
232
233 // Inserts key,value pair into the map if the key isn't already in the map.
234 // The value is constructed in-place if the key is not in the map, otherwise
235 // it is not moved.
236 template <typename... Ts>
237 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&...Args) {
238 return try_emplace_impl(std::move(Key), std::forward<Ts>(Args)...);
239 }
240
241 // Inserts key,value pair into the map if the key isn't already in the map.
242 // The value is constructed in-place if the key is not in the map, otherwise
243 // it is not moved.
244 template <typename... Ts>
245 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&...Args) {
246 return try_emplace_impl(Key, std::forward<Ts>(Args)...);
247 }
248
249 /// Alternate version of insert() which allows a different, and possibly
250 /// less expensive, key type.
251 /// The DenseMapInfo is responsible for supplying methods
252 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
253 /// type used.
254 template <typename LookupKeyT>
255 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
256 const LookupKeyT &Val) {
257 BucketT *TheBucket;
258 if (LookupBucketFor(Val, TheBucket))
259 return {makeIterator(TheBucket), false}; // Already in map.
260
261 // Otherwise, insert the new element.
262 TheBucket = findBucketForInsertion(Val, TheBucket);
263 TheBucket->getFirst() = std::move(KV.first);
264 ::new (&TheBucket->getSecond()) ValueT(std::move(KV.second));
265 return {makeIterator(TheBucket), true};
266 }
267
268 /// insert - Range insertion of pairs.
269 template <typename InputIt> void insert(InputIt I, InputIt E) {
270 for (; I != E; ++I)
271 insert(*I);
272 }
273
274 /// Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
275 template <typename Range> void insert_range(Range &&R) {
276 insert(adl_begin(R), adl_end(R));
277 }
278
279 template <typename V>
280 std::pair<iterator, bool> insert_or_assign(const KeyT &Key, V &&Val) {
281 auto Ret = try_emplace(Key, std::forward<V>(Val));
282 if (!Ret.second)
283 Ret.first->second = std::forward<V>(Val);
284 return Ret;
285 }
286
287 template <typename V>
288 std::pair<iterator, bool> insert_or_assign(KeyT &&Key, V &&Val) {
289 auto Ret = try_emplace(std::move(Key), std::forward<V>(Val));
290 if (!Ret.second)
291 Ret.first->second = std::forward<V>(Val);
292 return Ret;
293 }
294
295 template <typename... Ts>
296 std::pair<iterator, bool> emplace_or_assign(const KeyT &Key, Ts &&...Args) {
297 auto Ret = try_emplace(Key, std::forward<Ts>(Args)...);
298 if (!Ret.second)
299 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
300 return Ret;
301 }
302
303 template <typename... Ts>
304 std::pair<iterator, bool> emplace_or_assign(KeyT &&Key, Ts &&...Args) {
305 auto Ret = try_emplace(std::move(Key), std::forward<Ts>(Args)...);
306 if (!Ret.second)
307 Ret.first->second = ValueT(std::forward<Ts>(Args)...);
308 return Ret;
309 }
310
311 bool erase(const KeyT &Val) {
312 BucketT *TheBucket = doFind(Val);
313 if (!TheBucket)
314 return false; // not in map.
315
316 TheBucket->getSecond().~ValueT();
317 TheBucket->getFirst() = getTombstoneKey();
318 decrementNumEntries();
319 incrementNumTombstones();
320 return true;
321 }
323 BucketT *TheBucket = &*I;
324 TheBucket->getSecond().~ValueT();
325 TheBucket->getFirst() = getTombstoneKey();
326 decrementNumEntries();
327 incrementNumTombstones();
328 }
329
330 ValueT &operator[](const KeyT &Key) {
331 return lookupOrInsertIntoBucket(Key).first->second;
332 }
333
334 ValueT &operator[](KeyT &&Key) {
335 return lookupOrInsertIntoBucket(std::move(Key)).first->second;
336 }
337
338 /// isPointerIntoBucketsArray - Return true if the specified pointer points
339 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
340 /// value in the DenseMap).
341 [[nodiscard]] bool isPointerIntoBucketsArray(const void *Ptr) const {
342 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
343 }
344
345 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
346 /// array. In conjunction with the previous method, this can be used to
347 /// determine whether an insertion caused the DenseMap to reallocate.
348 [[nodiscard]] const void *getPointerIntoBucketsArray() const {
349 return getBuckets();
350 }
351
352protected:
353 DenseMapBase() = default;
354
355 void destroyAll() {
356 if (getNumBuckets() == 0) // Nothing to do.
357 return;
358
359 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
360 for (BucketT &B : buckets()) {
361 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
362 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey))
363 B.getSecond().~ValueT();
364 B.getFirst().~KeyT();
365 }
366 }
367
368 void initEmpty() {
369 setNumEntries(0);
370 setNumTombstones(0);
371
372 assert((getNumBuckets() & (getNumBuckets() - 1)) == 0 &&
373 "# initial buckets must be a power of two!");
374 const KeyT EmptyKey = getEmptyKey();
375 for (BucketT &B : buckets())
376 ::new (&B.getFirst()) KeyT(EmptyKey);
377 }
378
379 /// Returns the number of buckets to allocate to ensure that the DenseMap can
380 /// accommodate \p NumEntries without need to grow().
381 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
382 // Ensure that "NumEntries * 4 < NumBuckets * 3"
383 if (NumEntries == 0)
384 return 0;
385 // +1 is required because of the strict inequality.
386 // For example, if NumEntries is 48, we need to return 128.
387 return NextPowerOf2(NumEntries * 4 / 3 + 1);
388 }
389
391 initEmpty();
392
393 // Insert all the old elements.
394 const KeyT EmptyKey = getEmptyKey();
395 const KeyT TombstoneKey = getTombstoneKey();
396 for (BucketT &B : OldBuckets) {
397 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
398 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
399 // Insert the key/value into the new table.
400 BucketT *DestBucket;
401 bool FoundVal = LookupBucketFor(B.getFirst(), DestBucket);
402 (void)FoundVal; // silence warning.
403 assert(!FoundVal && "Key already in new map?");
404 DestBucket->getFirst() = std::move(B.getFirst());
405 ::new (&DestBucket->getSecond()) ValueT(std::move(B.getSecond()));
406 incrementNumEntries();
407
408 // Free the value.
409 B.getSecond().~ValueT();
410 }
411 B.getFirst().~KeyT();
412 }
413 }
414
415 template <typename OtherBaseT>
418 assert(&other != this);
419 assert(getNumBuckets() == other.getNumBuckets());
420
421 setNumEntries(other.getNumEntries());
422 setNumTombstones(other.getNumTombstones());
423
424 BucketT *Buckets = getBuckets();
425 const BucketT *OtherBuckets = other.getBuckets();
426 const size_t NumBuckets = getNumBuckets();
427 if constexpr (std::is_trivially_copyable_v<KeyT> &&
428 std::is_trivially_copyable_v<ValueT>) {
429 memcpy(reinterpret_cast<void *>(Buckets), OtherBuckets,
430 NumBuckets * sizeof(BucketT));
431 } else {
432 const KeyT EmptyKey = getEmptyKey();
433 const KeyT TombstoneKey = getTombstoneKey();
434 for (size_t I = 0; I < NumBuckets; ++I) {
435 ::new (&Buckets[I].getFirst()) KeyT(OtherBuckets[I].getFirst());
436 if (!KeyInfoT::isEqual(Buckets[I].getFirst(), EmptyKey) &&
437 !KeyInfoT::isEqual(Buckets[I].getFirst(), TombstoneKey))
438 ::new (&Buckets[I].getSecond()) ValueT(OtherBuckets[I].getSecond());
439 }
440 }
441 }
442
443 template <typename LookupKeyT>
444 static unsigned getHashValue(const LookupKeyT &Val) {
445 return KeyInfoT::getHashValue(Val);
446 }
447
448 static const KeyT getEmptyKey() {
449 static_assert(std::is_base_of_v<DenseMapBase, DerivedT>,
450 "Must pass the derived type to this template!");
451 return KeyInfoT::getEmptyKey();
452 }
453
454 static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
455
456private:
457 DerivedT &derived() { return *static_cast<DerivedT *>(this); }
458 const DerivedT &derived() const {
459 return *static_cast<const DerivedT *>(this);
460 }
461
462 template <typename KeyArgT, typename... Ts>
463 std::pair<BucketT *, bool> lookupOrInsertIntoBucket(KeyArgT &&Key,
464 Ts &&...Args) {
465 BucketT *TheBucket = nullptr;
466 if (LookupBucketFor(Key, TheBucket))
467 return {TheBucket, false}; // Already in the map.
468
469 // Otherwise, insert the new element.
470 TheBucket = findBucketForInsertion(Key, TheBucket);
471 TheBucket->getFirst() = std::forward<KeyArgT>(Key);
472 ::new (&TheBucket->getSecond()) ValueT(std::forward<Ts>(Args)...);
473 return {TheBucket, true};
474 }
475
476 template <typename KeyArgT, typename... Ts>
477 std::pair<iterator, bool> try_emplace_impl(KeyArgT &&Key, Ts &&...Args) {
478 auto [Bucket, Inserted] = lookupOrInsertIntoBucket(
479 std::forward<KeyArgT>(Key), std::forward<Ts>(Args)...);
480 return {makeIterator(Bucket), Inserted};
481 }
482
483 iterator makeIterator(BucketT *TheBucket) {
484 return iterator::makeIterator(TheBucket, buckets(), *this);
485 }
486
487 const_iterator makeConstIterator(const BucketT *TheBucket) const {
488 return const_iterator::makeIterator(TheBucket, buckets(), *this);
489 }
490
491 unsigned getNumEntries() const { return derived().getNumEntries(); }
492
493 void setNumEntries(unsigned Num) { derived().setNumEntries(Num); }
494
495 void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
496
497 void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
498
499 unsigned getNumTombstones() const { return derived().getNumTombstones(); }
500
501 void setNumTombstones(unsigned Num) { derived().setNumTombstones(Num); }
502
503 void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
504
505 void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
506
507 const BucketT *getBuckets() const { return derived().getBuckets(); }
508
509 BucketT *getBuckets() { return derived().getBuckets(); }
510
511 unsigned getNumBuckets() const { return derived().getNumBuckets(); }
512
513 BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
514
515 const BucketT *getBucketsEnd() const {
516 return getBuckets() + getNumBuckets();
517 }
518
519 iterator_range<BucketT *> buckets() {
520 return llvm::make_range(getBuckets(), getBucketsEnd());
521 }
522
523 iterator_range<const BucketT *> buckets() const {
524 return llvm::make_range(getBuckets(), getBucketsEnd());
525 }
526
527 void grow(unsigned AtLeast) { derived().grow(AtLeast); }
528
529 void shrink_and_clear() { derived().shrink_and_clear(); }
530
531 template <typename LookupKeyT>
532 BucketT *findBucketForInsertion(const LookupKeyT &Lookup,
533 BucketT *TheBucket) {
535
536 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
537 // the buckets are empty (meaning that many are filled with tombstones),
538 // grow the table.
539 //
540 // The later case is tricky. For example, if we had one empty bucket with
541 // tons of tombstones, failing lookups (e.g. for insertion) would have to
542 // probe almost the entire table until it found the empty bucket. If the
543 // table completely filled with tombstones, no lookup would ever succeed,
544 // causing infinite loops in lookup.
545 unsigned NewNumEntries = getNumEntries() + 1;
546 unsigned NumBuckets = getNumBuckets();
547 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
548 this->grow(NumBuckets * 2);
549 LookupBucketFor(Lookup, TheBucket);
550 } else if (LLVM_UNLIKELY(NumBuckets -
551 (NewNumEntries + getNumTombstones()) <=
552 NumBuckets / 8)) {
553 this->grow(NumBuckets);
554 LookupBucketFor(Lookup, TheBucket);
555 }
556 assert(TheBucket);
557
558 // Only update the state after we've grown our bucket space appropriately
559 // so that when growing buckets we have self-consistent entry count.
560 incrementNumEntries();
561
562 // If we are writing over a tombstone, remember this.
563 const KeyT EmptyKey = getEmptyKey();
564 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
565 decrementNumTombstones();
566
567 return TheBucket;
568 }
569
570 template <typename LookupKeyT>
571 const BucketT *doFind(const LookupKeyT &Val) const {
572 const BucketT *BucketsPtr = getBuckets();
573 const unsigned NumBuckets = getNumBuckets();
574 if (NumBuckets == 0)
575 return nullptr;
576
577 const KeyT EmptyKey = getEmptyKey();
578 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
579 unsigned ProbeAmt = 1;
580 while (true) {
581 const BucketT *Bucket = BucketsPtr + BucketNo;
582 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
583 return Bucket;
584 if (LLVM_LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
585 return nullptr;
586
587 // Otherwise, it's a hash collision or a tombstone, continue quadratic
588 // probing.
589 BucketNo += ProbeAmt++;
590 BucketNo &= NumBuckets - 1;
591 }
592 }
593
594 template <typename LookupKeyT> BucketT *doFind(const LookupKeyT &Val) {
595 return const_cast<BucketT *>(
596 static_cast<const DenseMapBase *>(this)->doFind(Val));
597 }
598
599 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
600 /// FoundBucket. If the bucket contains the key and a value, this returns
601 /// true, otherwise it returns a bucket with an empty marker or tombstone and
602 /// returns false.
603 template <typename LookupKeyT>
604 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
605 BucketT *BucketsPtr = getBuckets();
606 const unsigned NumBuckets = getNumBuckets();
607
608 if (NumBuckets == 0) {
609 FoundBucket = nullptr;
610 return false;
611 }
612
613 // FoundTombstone - Keep track of whether we find a tombstone while probing.
614 BucketT *FoundTombstone = nullptr;
615 const KeyT EmptyKey = getEmptyKey();
616 const KeyT TombstoneKey = getTombstoneKey();
617 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
618 !KeyInfoT::isEqual(Val, TombstoneKey) &&
619 "Empty/Tombstone value shouldn't be inserted into map!");
620
621 unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
622 unsigned ProbeAmt = 1;
623 while (true) {
624 BucketT *ThisBucket = BucketsPtr + BucketNo;
625 // Found Val's bucket? If so, return it.
626 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
627 FoundBucket = ThisBucket;
628 return true;
629 }
630
631 // If we found an empty bucket, the key doesn't exist in the set.
632 // Insert it and return the default value.
633 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
634 // If we've already seen a tombstone while probing, fill it in instead
635 // of the empty bucket we eventually probed to.
636 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
637 return false;
638 }
639
640 // If this is a tombstone, remember it. If Val ends up not in the map, we
641 // prefer to return it than something that would require more probing.
642 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
643 !FoundTombstone)
644 FoundTombstone = ThisBucket; // Remember the first tombstone found.
645
646 // Otherwise, it's a hash collision or a tombstone, continue quadratic
647 // probing.
648 BucketNo += ProbeAmt++;
649 BucketNo &= (NumBuckets - 1);
650 }
651 }
652
653public:
654 /// Return the approximate size (in bytes) of the actual map.
655 /// This is just the raw memory used by DenseMap.
656 /// If entries are pointers to objects, the size of the referenced objects
657 /// are not included.
658 [[nodiscard]] size_t getMemorySize() const {
659 return getNumBuckets() * sizeof(BucketT);
660 }
661};
662
663/// Equality comparison for DenseMap.
664///
665/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
666/// is also in RHS, and that no additional pairs are in RHS.
667/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
668/// complexity is linear, worst case is O(N^2) (if every hash collides).
669template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
670 typename BucketT>
671[[nodiscard]] bool
674 if (LHS.size() != RHS.size())
675 return false;
676
677 for (auto &KV : LHS) {
678 auto I = RHS.find(KV.first);
679 if (I == RHS.end() || I->second != KV.second)
680 return false;
681 }
682
683 return true;
684}
685
686/// Inequality comparison for DenseMap.
687///
688/// Equivalent to !(LHS == RHS). See operator== for performance notes.
689template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
690 typename BucketT>
691[[nodiscard]] bool
696
697template <typename KeyT, typename ValueT,
698 typename KeyInfoT = DenseMapInfo<KeyT>,
700class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
701 KeyT, ValueT, KeyInfoT, BucketT> {
702 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
703
704 // Lift some types from the dependent base class into this class for
705 // simplicity of referring to them.
707
708 BucketT *Buckets;
709 unsigned NumEntries;
710 unsigned NumTombstones;
711 unsigned NumBuckets;
712
713public:
714 /// Create a DenseMap with an optional \p NumElementsToReserve to guarantee
715 /// that this number of elements can be inserted in the map without grow().
716 explicit DenseMap(unsigned NumElementsToReserve = 0) {
717 init(NumElementsToReserve);
718 }
719
720 DenseMap(const DenseMap &other) : BaseT() {
721 init(0);
722 copyFrom(other);
723 }
724
725 DenseMap(DenseMap &&other) : BaseT() {
726 init(0);
727 swap(other);
728 }
729
730 template <typename InputIt> DenseMap(const InputIt &I, const InputIt &E) {
731 init(std::distance(I, E));
732 this->insert(I, E);
733 }
734
735 template <typename RangeT>
738
739 DenseMap(std::initializer_list<typename BaseT::value_type> Vals)
740 : DenseMap(Vals.begin(), Vals.end()) {}
741
743 this->destroyAll();
744 deallocateBuckets();
745 }
746
748 this->incrementEpoch();
749 RHS.incrementEpoch();
750 std::swap(Buckets, RHS.Buckets);
751 std::swap(NumEntries, RHS.NumEntries);
752 std::swap(NumTombstones, RHS.NumTombstones);
753 std::swap(NumBuckets, RHS.NumBuckets);
754 }
755
756 DenseMap &operator=(const DenseMap &other) {
757 if (&other != this)
758 copyFrom(other);
759 return *this;
760 }
761
763 this->destroyAll();
764 deallocateBuckets();
765 init(0);
766 swap(other);
767 return *this;
768 }
769
770 void copyFrom(const DenseMap &other) {
771 this->destroyAll();
772 deallocateBuckets();
773 if (allocateBuckets(other.NumBuckets)) {
774 this->BaseT::copyFrom(other);
775 } else {
776 NumEntries = 0;
777 NumTombstones = 0;
778 }
779 }
780
781 void grow(unsigned AtLeast) {
782 unsigned OldNumBuckets = NumBuckets;
783 BucketT *OldBuckets = Buckets;
784
785 allocateBuckets(std::max<unsigned>(
786 64, static_cast<unsigned>(NextPowerOf2(AtLeast - 1))));
787 assert(Buckets);
788 if (!OldBuckets) {
789 this->BaseT::initEmpty();
790 return;
791 }
792
793 this->moveFromOldBuckets(
794 llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
795
796 // Free the old table.
797 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
798 alignof(BucketT));
799 }
800
802 unsigned OldNumBuckets = NumBuckets;
803 unsigned OldNumEntries = NumEntries;
804 this->destroyAll();
805
806 // Reduce the number of buckets.
807 unsigned NewNumBuckets = 0;
808 if (OldNumEntries)
809 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
810 if (NewNumBuckets == NumBuckets) {
811 this->BaseT::initEmpty();
812 return;
813 }
814
815 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
816 alignof(BucketT));
817 init(NewNumBuckets);
818 }
819
820private:
821 unsigned getNumEntries() const { return NumEntries; }
822
823 void setNumEntries(unsigned Num) { NumEntries = Num; }
824
825 unsigned getNumTombstones() const { return NumTombstones; }
826
827 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
828
829 BucketT *getBuckets() const { return Buckets; }
830
831 unsigned getNumBuckets() const { return NumBuckets; }
832
833 void deallocateBuckets() {
834 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
835 }
836
837 bool allocateBuckets(unsigned Num) {
838 NumBuckets = Num;
839 if (NumBuckets == 0) {
840 Buckets = nullptr;
841 return false;
842 }
843
844 Buckets = static_cast<BucketT *>(
845 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
846 return true;
847 }
848
849 void init(unsigned InitNumEntries) {
850 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
851 if (allocateBuckets(InitBuckets)) {
852 this->BaseT::initEmpty();
853 } else {
854 NumEntries = 0;
855 NumTombstones = 0;
856 }
857 }
858};
859
860template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
861 typename KeyInfoT = DenseMapInfo<KeyT>,
864 : public DenseMapBase<
865 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
866 ValueT, KeyInfoT, BucketT> {
867 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
868
869 // Lift some types from the dependent base class into this class for
870 // simplicity of referring to them.
872
873 static_assert(isPowerOf2_64(InlineBuckets),
874 "InlineBuckets must be a power of 2.");
875
876 unsigned Small : 1;
877 unsigned NumEntries : 31;
878 unsigned NumTombstones;
879
880 struct LargeRep {
881 BucketT *Buckets;
882 unsigned NumBuckets;
884 return llvm::make_range(Buckets, Buckets + NumBuckets);
885 }
886 };
887
888 /// A "union" of an inline bucket array and the struct representing
889 /// a large bucket. This union will be discriminated by the 'Small' bit.
890 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
891
892public:
893 explicit SmallDenseMap(unsigned NumElementsToReserve = 0) {
894 init(NumElementsToReserve);
895 }
896
897 SmallDenseMap(const SmallDenseMap &other) : BaseT() {
898 init(0);
899 copyFrom(other);
900 }
901
902 SmallDenseMap(SmallDenseMap &&other) : BaseT() {
903 init(0);
904 swap(other);
905 }
906
907 template <typename InputIt>
908 SmallDenseMap(const InputIt &I, const InputIt &E) {
909 init(std::distance(I, E));
910 this->insert(I, E);
911 }
912
913 template <typename RangeT>
916
917 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
918 : SmallDenseMap(Vals.begin(), Vals.end()) {}
919
921 this->destroyAll();
922 deallocateBuckets();
923 }
924
926 unsigned TmpNumEntries = RHS.NumEntries;
927 RHS.NumEntries = NumEntries;
928 NumEntries = TmpNumEntries;
929 std::swap(NumTombstones, RHS.NumTombstones);
930
931 const KeyT EmptyKey = this->getEmptyKey();
932 const KeyT TombstoneKey = this->getTombstoneKey();
933 if (Small && RHS.Small) {
934 // If we're swapping inline bucket arrays, we have to cope with some of
935 // the tricky bits of DenseMap's storage system: the buckets are not
936 // fully initialized. Thus we swap every key, but we may have
937 // a one-directional move of the value.
938 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
939 BucketT *LHSB = &getInlineBuckets()[i],
940 *RHSB = &RHS.getInlineBuckets()[i];
941 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
942 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
943 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
944 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
945 if (hasLHSValue && hasRHSValue) {
946 // Swap together if we can...
947 std::swap(*LHSB, *RHSB);
948 continue;
949 }
950 // Swap separately and handle any asymmetry.
951 std::swap(LHSB->getFirst(), RHSB->getFirst());
952 if (hasLHSValue) {
953 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
954 LHSB->getSecond().~ValueT();
955 } else if (hasRHSValue) {
956 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
957 RHSB->getSecond().~ValueT();
958 }
959 }
960 return;
961 }
962 if (!Small && !RHS.Small) {
963 std::swap(*getLargeRep(), *RHS.getLargeRep());
964 return;
965 }
966
967 SmallDenseMap &SmallSide = Small ? *this : RHS;
968 SmallDenseMap &LargeSide = Small ? RHS : *this;
969
970 // First stash the large side's rep and move the small side across.
971 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
972 LargeSide.getLargeRep()->~LargeRep();
973 LargeSide.Small = true;
974 // This is similar to the standard move-from-old-buckets, but the bucket
975 // count hasn't actually rotated in this case. So we have to carefully
976 // move construct the keys and values into their new locations, but there
977 // is no need to re-hash things.
978 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
979 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
980 *OldB = &SmallSide.getInlineBuckets()[i];
981 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
982 OldB->getFirst().~KeyT();
983 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
984 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
985 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
986 OldB->getSecond().~ValueT();
987 }
988 }
989
990 // The hard part of moving the small buckets across is done, just move
991 // the TmpRep into its new home.
992 SmallSide.Small = false;
993 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
994 }
995
997 if (&other != this)
998 copyFrom(other);
999 return *this;
1000 }
1001
1003 this->destroyAll();
1004 deallocateBuckets();
1005 init(0);
1006 swap(other);
1007 return *this;
1008 }
1009
1010 void copyFrom(const SmallDenseMap &other) {
1011 this->destroyAll();
1012 deallocateBuckets();
1013 allocateBuckets(other.getNumBuckets());
1014 this->BaseT::copyFrom(other);
1015 }
1016
1017 void init(unsigned InitNumEntries) {
1018 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
1019 allocateBuckets(InitBuckets);
1020 this->BaseT::initEmpty();
1021 }
1022
1023 void grow(unsigned AtLeast) {
1024 if (AtLeast > InlineBuckets)
1025 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
1026
1027 if (Small) {
1028 // First move the inline buckets into a temporary storage.
1030 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1031 BucketT *TmpEnd = TmpBegin;
1032
1033 // Loop over the buckets, moving non-empty, non-tombstones into the
1034 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1035 const KeyT EmptyKey = this->getEmptyKey();
1036 const KeyT TombstoneKey = this->getTombstoneKey();
1037 for (BucketT &B : inlineBuckets()) {
1038 if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
1039 !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
1040 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1041 "Too many inline buckets!");
1042 ::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
1043 ::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
1044 ++TmpEnd;
1045 B.getSecond().~ValueT();
1046 }
1047 B.getFirst().~KeyT();
1048 }
1049
1050 // AtLeast == InlineBuckets can happen if there are many tombstones,
1051 // and grow() is used to remove them. Usually we always switch to the
1052 // large rep here.
1053 allocateBuckets(AtLeast);
1054 this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
1055 return;
1056 }
1057
1058 LargeRep OldRep = std::move(*getLargeRep());
1059 getLargeRep()->~LargeRep();
1060 allocateBuckets(AtLeast);
1061
1062 this->moveFromOldBuckets(OldRep.buckets());
1063
1064 // Free the old table.
1065 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1066 alignof(BucketT));
1067 }
1068
1070 unsigned OldSize = this->size();
1071 this->destroyAll();
1072
1073 // Reduce the number of buckets.
1074 unsigned NewNumBuckets = 0;
1075 if (OldSize) {
1076 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1077 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1078 NewNumBuckets = 64;
1079 }
1080 if ((Small && NewNumBuckets <= InlineBuckets) ||
1081 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1082 this->BaseT::initEmpty();
1083 return;
1084 }
1085
1086 deallocateBuckets();
1087 init(NewNumBuckets);
1088 }
1089
1090private:
1091 unsigned getNumEntries() const { return NumEntries; }
1092
1093 void setNumEntries(unsigned Num) {
1094 // NumEntries is hardcoded to be 31 bits wide.
1095 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
1096 NumEntries = Num;
1097 }
1098
1099 unsigned getNumTombstones() const { return NumTombstones; }
1100
1101 void setNumTombstones(unsigned Num) { NumTombstones = Num; }
1102
1103 const BucketT *getInlineBuckets() const {
1104 assert(Small);
1105 // Note that this cast does not violate aliasing rules as we assert that
1106 // the memory's dynamic type is the small, inline bucket buffer, and the
1107 // 'storage' is a POD containing a char buffer.
1108 return reinterpret_cast<const BucketT *>(&storage);
1109 }
1110
1111 BucketT *getInlineBuckets() {
1112 return const_cast<BucketT *>(
1113 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1114 }
1115
1116 const LargeRep *getLargeRep() const {
1117 assert(!Small);
1118 // Note, same rule about aliasing as with getInlineBuckets.
1119 return reinterpret_cast<const LargeRep *>(&storage);
1120 }
1121
1122 LargeRep *getLargeRep() {
1123 return const_cast<LargeRep *>(
1124 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1125 }
1126
1127 const BucketT *getBuckets() const {
1128 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1129 }
1130
1131 BucketT *getBuckets() {
1132 return const_cast<BucketT *>(
1133 const_cast<const SmallDenseMap *>(this)->getBuckets());
1134 }
1135
1136 unsigned getNumBuckets() const {
1137 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1138 }
1139
1140 iterator_range<BucketT *> inlineBuckets() {
1141 BucketT *Begin = getInlineBuckets();
1142 return llvm::make_range(Begin, Begin + InlineBuckets);
1143 }
1144
1145 void deallocateBuckets() {
1146 if (Small)
1147 return;
1148
1149 deallocate_buffer(getLargeRep()->Buckets,
1150 sizeof(BucketT) * getLargeRep()->NumBuckets,
1151 alignof(BucketT));
1152 getLargeRep()->~LargeRep();
1153 }
1154
1155 void allocateBuckets(unsigned Num) {
1156 if (Num <= InlineBuckets) {
1157 Small = true;
1158 } else {
1159 Small = false;
1160 BucketT *NewBuckets = static_cast<BucketT *>(
1161 allocate_buffer(sizeof(BucketT) * Num, alignof(BucketT)));
1162 new (getLargeRep()) LargeRep{NewBuckets, Num};
1163 }
1164 }
1165};
1166
1167template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1168 bool IsConst>
1169class DenseMapIterator : DebugEpochBase::HandleBase {
1170 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1171 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1172
1173public:
1175 using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1178 using iterator_category = std::forward_iterator_tag;
1179
1180private:
1181 using BucketItTy =
1182 std::conditional_t<shouldReverseIterate<KeyT>(),
1183 std::reverse_iterator<pointer>, pointer>;
1184
1185 BucketItTy Ptr = {};
1186 BucketItTy End = {};
1187
1188 DenseMapIterator(BucketItTy Pos, BucketItTy E, const DebugEpochBase &Epoch)
1189 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1190 assert(isHandleInSync() && "invalid construction!");
1191 }
1192
1193public:
1194 DenseMapIterator() = default;
1195
1196 static DenseMapIterator makeBegin(iterator_range<pointer> Buckets,
1197 bool IsEmpty, const DebugEpochBase &Epoch) {
1198 // When the map is empty, avoid the overhead of advancing/retreating past
1199 // empty buckets.
1200 if (IsEmpty)
1201 return makeEnd(Buckets, Epoch);
1202 auto R = maybeReverse(Buckets);
1203 DenseMapIterator Iter(R.begin(), R.end(), Epoch);
1204 Iter.AdvancePastEmptyBuckets();
1205 return Iter;
1206 }
1207
1208 static DenseMapIterator makeEnd(iterator_range<pointer> Buckets,
1209 const DebugEpochBase &Epoch) {
1210 auto R = maybeReverse(Buckets);
1211 return DenseMapIterator(R.end(), R.end(), Epoch);
1212 }
1213
1214 static DenseMapIterator makeIterator(pointer P,
1216 const DebugEpochBase &Epoch) {
1217 auto R = maybeReverse(Buckets);
1218 constexpr int Offset = shouldReverseIterate<KeyT>() ? 1 : 0;
1219 return DenseMapIterator(BucketItTy(P + Offset), R.end(), Epoch);
1220 }
1221
1222 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1223 // for const iterator destinations so it doesn't end up as a user defined copy
1224 // constructor.
1225 template <bool IsConstSrc,
1226 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1228 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1229 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1230
1231 [[nodiscard]] reference operator*() const {
1232 assert(isHandleInSync() && "invalid iterator access!");
1233 assert(Ptr != End && "dereferencing end() iterator");
1234 return *Ptr;
1235 }
1236 [[nodiscard]] pointer operator->() const { return &operator*(); }
1237
1238 [[nodiscard]] friend bool operator==(const DenseMapIterator &LHS,
1239 const DenseMapIterator &RHS) {
1240 assert((!LHS.getEpochAddress() || LHS.isHandleInSync()) &&
1241 "handle not in sync!");
1242 assert((!RHS.getEpochAddress() || RHS.isHandleInSync()) &&
1243 "handle not in sync!");
1244 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
1245 "comparing incomparable iterators!");
1246 return LHS.Ptr == RHS.Ptr;
1247 }
1248
1249 [[nodiscard]] friend bool operator!=(const DenseMapIterator &LHS,
1250 const DenseMapIterator &RHS) {
1251 return !(LHS == RHS);
1252 }
1253
1254 inline DenseMapIterator &operator++() { // Preincrement
1255 assert(isHandleInSync() && "invalid iterator access!");
1256 assert(Ptr != End && "incrementing end() iterator");
1257 ++Ptr;
1258 AdvancePastEmptyBuckets();
1259 return *this;
1260 }
1261 DenseMapIterator operator++(int) { // Postincrement
1262 assert(isHandleInSync() && "invalid iterator access!");
1263 DenseMapIterator tmp = *this;
1264 ++*this;
1265 return tmp;
1266 }
1267
1268private:
1269 void AdvancePastEmptyBuckets() {
1270 assert(Ptr <= End);
1271 const KeyT Empty = KeyInfoT::getEmptyKey();
1272 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1273
1274 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1275 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1276 ++Ptr;
1277 }
1278
1279 static auto maybeReverse(iterator_range<pointer> Range) {
1280 if constexpr (shouldReverseIterate<KeyT>())
1281 return reverse(Range);
1282 else
1283 return Range;
1284 }
1285};
1286
1287template <typename KeyT, typename ValueT, typename KeyInfoT>
1288[[nodiscard]] inline size_t
1290 return X.getMemorySize();
1291}
1292
1293} // end namespace llvm
1294
1295#endif // LLVM_ADT_DENSEMAP_H
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define LLVM_UNLIKELY(EXPR)
Definition Compiler.h:336
#define LLVM_LIKELY(EXPR)
Definition Compiler.h:335
This file defines DenseMapInfo traits for DenseMap.
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
#define I(x, y, z)
Definition MD5.cpp:58
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
#define P(N)
This file contains some templates that are useful if you are working with the STL at all.
This file contains library features backported from future STL versions.
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
LocallyHashedType DenseMapInfo< LocallyHashedType >::Tombstone
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
Value * RHS
Value * LHS
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition DenseMap.h:194
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:167
unsigned size_type
Definition DenseMap.h:69
static const KeyT getEmptyKey()
Definition DenseMap.h:448
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:237
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
Definition DenseMap.h:229
bool erase(const KeyT &Val)
Definition DenseMap.h:311
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
Definition DenseMap.h:74
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
Definition DenseMap.h:255
DenseMapBase()=default
const_iterator find_as(const LookupKeyT &Val) const
Definition DenseMap.h:186
const_iterator end() const
Definition DenseMap.h:87
void moveFromOldBuckets(iterator_range< BucketT * > OldBuckets)
Definition DenseMap.h:390
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition DenseMap.h:180
unsigned size() const
Definition DenseMap.h:110
const_iterator find(const_arg_type_t< KeyT > Val) const
Definition DenseMap.h:170
bool empty() const
Definition DenseMap.h:109
std::pair< iterator, bool > emplace_or_assign(const KeyT &Key, Ts &&...Args)
Definition DenseMap.h:296
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
Definition DenseMap.h:269
iterator begin()
Definition DenseMap.h:78
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition DenseMap.h:163
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
iterator end()
Definition DenseMap.h:81
static const KeyT getTombstoneKey()
Definition DenseMap.h:454
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
Definition DenseMap.h:213
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
Definition DenseMap.h:341
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
Definition DenseMap.h:416
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Definition DenseMap.h:158
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Definition DenseMap.h:245
const_iterator begin() const
Definition DenseMap.h:84
std::pair< iterator, bool > emplace_or_assign(KeyT &&Key, Ts &&...Args)
Definition DenseMap.h:304
void insert_range(Range &&R)
Inserts range of 'std::pair<KeyT, ValueT>' values into the map.
Definition DenseMap.h:275
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
Definition DenseMap.h:348
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
Definition DenseMap.h:288
ValueT lookup_or(const_arg_type_t< KeyT > Val, U &&Default) const
Definition DenseMap.h:204
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
Definition DenseMap.h:381
static unsigned getHashValue(const LookupKeyT &Val)
Definition DenseMap.h:444
ValueT & operator[](const KeyT &Key)
Definition DenseMap.h:330
BucketT value_type
Definition DenseMap.h:72
auto keys() const
Definition DenseMap.h:101
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:222
void erase(iterator I)
Definition DenseMap.h:322
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
Definition DenseMap.h:280
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Definition DenseMap.h:114
ValueT & operator[](KeyT &&Key)
Definition DenseMap.h:334
auto values() const
Definition DenseMap.h:105
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
Definition DenseMap.h:658
std::conditional_t< IsConst, const BucketT, BucketT > value_type
Definition DenseMap.h:1175
static DenseMapIterator makeIterator(pointer P, iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition DenseMap.h:1214
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1249
DenseMapIterator & operator++()
Definition DenseMap.h:1254
pointer operator->() const
Definition DenseMap.h:1236
reference operator*() const
Definition DenseMap.h:1231
static DenseMapIterator makeBegin(iterator_range< pointer > Buckets, bool IsEmpty, const DebugEpochBase &Epoch)
Definition DenseMap.h:1196
DenseMapIterator operator++(int)
Definition DenseMap.h:1261
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
Definition DenseMap.h:1227
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
Definition DenseMap.h:1238
static DenseMapIterator makeEnd(iterator_range< pointer > Buckets, const DebugEpochBase &Epoch)
Definition DenseMap.h:1208
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:739
DenseMap(unsigned NumElementsToReserve=0)
Create a DenseMap with an optional NumElementsToReserve to guarantee that this number of elements can...
Definition DenseMap.h:716
void copyFrom(const DenseMap &other)
Definition DenseMap.h:770
void shrink_and_clear()
Definition DenseMap.h:801
DenseMap & operator=(DenseMap &&other)
Definition DenseMap.h:762
void grow(unsigned AtLeast)
Definition DenseMap.h:781
DenseMap(llvm::from_range_t, const RangeT &Range)
Definition DenseMap.h:736
DenseMap(const DenseMap &other)
Definition DenseMap.h:720
void swap(DenseMap &RHS)
Definition DenseMap.h:747
DenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:730
DenseMap(DenseMap &&other)
Definition DenseMap.h:725
DenseMap & operator=(const DenseMap &other)
Definition DenseMap.h:756
void grow(unsigned AtLeast)
Definition DenseMap.h:1023
SmallDenseMap(const InputIt &I, const InputIt &E)
Definition DenseMap.h:908
void swap(SmallDenseMap &RHS)
Definition DenseMap.h:925
SmallDenseMap & operator=(SmallDenseMap &&other)
Definition DenseMap.h:1002
SmallDenseMap & operator=(const SmallDenseMap &other)
Definition DenseMap.h:996
SmallDenseMap(unsigned NumElementsToReserve=0)
Definition DenseMap.h:893
void init(unsigned InitNumEntries)
Definition DenseMap.h:1017
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
Definition DenseMap.h:917
SmallDenseMap(SmallDenseMap &&other)
Definition DenseMap.h:902
SmallDenseMap(const SmallDenseMap &other)
Definition DenseMap.h:897
void copyFrom(const SmallDenseMap &other)
Definition DenseMap.h:1010
SmallDenseMap(llvm::from_range_t, const RangeT &Range)
Definition DenseMap.h:914
A range adaptor for a pair of iterators.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
These are wrappers over isa* function that allow them to be used in generic algorithms such as llvm:a...
Definition ADL.h:123
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:355
@ Offset
Definition DWP.cpp:477
constexpr auto adl_begin(RangeT &&range) -> decltype(adl_detail::begin_impl(std::forward< RangeT >(range)))
Returns the begin iterator to range using std::begin and function found through Argument-Dependent Lo...
Definition ADL.h:78
BitVector::size_type capacity_in_bytes(const BitVector &X)
Definition BitVector.h:847
bool operator!=(uint64_t V1, const APInt &V2)
Definition APInt.h:2113
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition MathExtras.h:293
constexpr auto adl_end(RangeT &&range) -> decltype(adl_detail::end_impl(std::forward< RangeT >(range)))
Returns the end iterator to range using std::end and functions found through Argument-Dependent Looku...
Definition ADL.h:86
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
auto map_range(ContainerTy &&C, FuncTy F)
Definition STLExtras.h:366
LLVM_ABI LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
Definition MemAlloc.cpp:15
auto reverse(ContainerTy &&C)
Definition STLExtras.h:408
LLVM_ABI void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
Definition MemAlloc.cpp:27
constexpr bool shouldReverseIterate()
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
iterator_range(Container &&) -> iterator_range< llvm::detail::IterOfRange< Container > >
@ Default
The result values are uniform if and only if all operands are uniform.
Definition Uniformity.h:20
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:384
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
A suitably aligned and sized character array member which can hold elements of any type.
Definition AlignOf.h:22
std::conditional_t< std::is_pointer_v< T >, typename add_const_past_pointer< T >::type, const T & > type
Definition type_traits.h:54
const ValueT & getSecond() const
Definition DenseMap.h:51
const KeyT & getFirst() const
Definition DenseMap.h:49