1 | /* |
2 | * Copyright (C) 2005-2019 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2008 David Levin <[email protected]> |
4 | * |
5 | * This library is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU Library General Public |
7 | * License as published by the Free Software Foundation; either |
8 | * version 2 of the License, or (at your option) any later version. |
9 | * |
10 | * This library is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * Library General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU Library General Public License |
16 | * along with this library; see the file COPYING.LIB. If not, write to |
17 | * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, |
18 | * Boston, MA 02110-1301, USA. |
19 | * |
20 | */ |
21 | |
22 | #pragma once |
23 | |
24 | #include <atomic> |
25 | #include <iterator> |
26 | #include <mutex> |
27 | #include <string.h> |
28 | #include <type_traits> |
29 | #include <utility> |
30 | #include <wtf/Assertions.h> |
31 | #include <wtf/FastMalloc.h> |
32 | #include <wtf/HashTraits.h> |
33 | #include <wtf/Lock.h> |
34 | #include <wtf/MathExtras.h> |
35 | #include <wtf/RandomNumber.h> |
36 | #include <wtf/StdLibExtras.h> |
37 | #include <wtf/ValueCheck.h> |
38 | |
39 | #define DUMP_HASHTABLE_STATS 0 |
40 | #define DUMP_HASHTABLE_STATS_PER_TABLE 0 |
41 | |
42 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
43 | #include <wtf/DataLog.h> |
44 | #endif |
45 | |
46 | namespace WTF { |
47 | |
48 | // Enables internal WTF consistency checks that are invoked automatically. Non-WTF callers can call checkTableConsistency() even if internal checks are disabled. |
49 | #define CHECK_HASHTABLE_CONSISTENCY 0 |
50 | |
51 | #ifdef NDEBUG |
52 | #define CHECK_HASHTABLE_ITERATORS 0 |
53 | #define CHECK_HASHTABLE_USE_AFTER_DESTRUCTION 0 |
54 | #else |
55 | #define CHECK_HASHTABLE_ITERATORS 1 |
56 | #define CHECK_HASHTABLE_USE_AFTER_DESTRUCTION 1 |
57 | #endif |
58 | |
59 | #if DUMP_HASHTABLE_STATS |
60 | |
61 | struct HashTableStats { |
62 | // The following variables are all atomically incremented when modified. |
63 | WTF_EXPORT_PRIVATE static std::atomic<unsigned> numAccesses; |
64 | WTF_EXPORT_PRIVATE static std::atomic<unsigned> numRehashes; |
65 | WTF_EXPORT_PRIVATE static std::atomic<unsigned> numRemoves; |
66 | WTF_EXPORT_PRIVATE static std::atomic<unsigned> numReinserts; |
67 | |
68 | // The following variables are only modified in the recordCollisionAtCount method within a mutex. |
69 | WTF_EXPORT_PRIVATE static unsigned maxCollisions; |
70 | WTF_EXPORT_PRIVATE static unsigned numCollisions; |
71 | WTF_EXPORT_PRIVATE static unsigned collisionGraph[4096]; |
72 | |
73 | WTF_EXPORT_PRIVATE static void recordCollisionAtCount(unsigned count); |
74 | WTF_EXPORT_PRIVATE static void dumpStats(); |
75 | }; |
76 | |
77 | #endif |
78 | |
79 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
80 | class HashTable; |
81 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
82 | class HashTableIterator; |
83 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
84 | class HashTableConstIterator; |
85 | |
86 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
87 | void addIterator(const HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*, |
88 | HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*); |
89 | |
90 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
91 | void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*); |
92 | |
93 | #if !CHECK_HASHTABLE_ITERATORS |
94 | |
95 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
96 | inline void addIterator(const HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*, |
97 | HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*) { } |
98 | |
99 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
100 | inline void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>*) { } |
101 | |
102 | #endif |
103 | |
104 | typedef enum { HashItemKnownGood } HashItemKnownGoodTag; |
105 | |
106 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
107 | class HashTableConstIterator : public std::iterator<std::forward_iterator_tag, Value, std::ptrdiff_t, const Value*, const Value&> { |
108 | WTF_MAKE_FAST_ALLOCATED; |
109 | private: |
110 | typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType; |
111 | typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator; |
112 | typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator; |
113 | typedef Value ValueType; |
114 | typedef const ValueType& ReferenceType; |
115 | typedef const ValueType* PointerType; |
116 | |
117 | friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>; |
118 | friend class HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>; |
119 | |
120 | void skipEmptyBuckets() |
121 | { |
122 | while (m_position != m_endPosition && HashTableType::isEmptyOrDeletedBucket(*m_position)) |
123 | ++m_position; |
124 | } |
125 | |
126 | HashTableConstIterator(const HashTableType* table, PointerType position, PointerType endPosition) |
127 | : m_position(position), m_endPosition(endPosition) |
128 | { |
129 | addIterator(table, this); |
130 | skipEmptyBuckets(); |
131 | } |
132 | |
133 | HashTableConstIterator(const HashTableType* table, PointerType position, PointerType endPosition, HashItemKnownGoodTag) |
134 | : m_position(position), m_endPosition(endPosition) |
135 | { |
136 | addIterator(table, this); |
137 | } |
138 | |
139 | public: |
140 | HashTableConstIterator() |
141 | { |
142 | addIterator(static_cast<const HashTableType*>(0), this); |
143 | } |
144 | |
145 | // default copy, assignment and destructor are OK if CHECK_HASHTABLE_ITERATORS is 0 |
146 | |
147 | #if CHECK_HASHTABLE_ITERATORS |
148 | ~HashTableConstIterator() |
149 | { |
150 | removeIterator(this); |
151 | } |
152 | |
153 | HashTableConstIterator(const const_iterator& other) |
154 | : m_position(other.m_position), m_endPosition(other.m_endPosition) |
155 | { |
156 | addIterator(other.m_table, this); |
157 | } |
158 | |
159 | const_iterator& operator=(const const_iterator& other) |
160 | { |
161 | m_position = other.m_position; |
162 | m_endPosition = other.m_endPosition; |
163 | |
164 | removeIterator(this); |
165 | addIterator(other.m_table, this); |
166 | |
167 | return *this; |
168 | } |
169 | #endif |
170 | |
171 | PointerType get() const |
172 | { |
173 | checkValidity(); |
174 | return m_position; |
175 | } |
176 | ReferenceType operator*() const { return *get(); } |
177 | PointerType operator->() const { return get(); } |
178 | |
179 | const_iterator& operator++() |
180 | { |
181 | checkValidity(); |
182 | ASSERT(m_position != m_endPosition); |
183 | ++m_position; |
184 | skipEmptyBuckets(); |
185 | return *this; |
186 | } |
187 | |
188 | // postfix ++ intentionally omitted |
189 | |
190 | // Comparison. |
191 | bool operator==(const const_iterator& other) const |
192 | { |
193 | checkValidity(other); |
194 | return m_position == other.m_position; |
195 | } |
196 | bool operator!=(const const_iterator& other) const |
197 | { |
198 | checkValidity(other); |
199 | return m_position != other.m_position; |
200 | } |
201 | bool operator==(const iterator& other) const |
202 | { |
203 | return *this == static_cast<const_iterator>(other); |
204 | } |
205 | bool operator!=(const iterator& other) const |
206 | { |
207 | return *this != static_cast<const_iterator>(other); |
208 | } |
209 | |
210 | private: |
211 | void checkValidity() const |
212 | { |
213 | #if CHECK_HASHTABLE_ITERATORS |
214 | ASSERT(m_table); |
215 | #endif |
216 | } |
217 | |
218 | |
219 | #if CHECK_HASHTABLE_ITERATORS |
220 | void checkValidity(const const_iterator& other) const |
221 | { |
222 | ASSERT(m_table); |
223 | ASSERT_UNUSED(other, other.m_table); |
224 | ASSERT(m_table == other.m_table); |
225 | } |
226 | #else |
227 | void checkValidity(const const_iterator&) const { } |
228 | #endif |
229 | |
230 | PointerType m_position { nullptr }; |
231 | PointerType m_endPosition { nullptr }; |
232 | |
233 | #if CHECK_HASHTABLE_ITERATORS |
234 | public: |
235 | // Any modifications of the m_next or m_previous of an iterator that is in a linked list of a HashTable::m_iterator, |
236 | // should be guarded with m_table->m_mutex. |
237 | mutable const HashTableType* m_table; |
238 | mutable const_iterator* m_next; |
239 | mutable const_iterator* m_previous; |
240 | #endif |
241 | }; |
242 | |
243 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
244 | class HashTableIterator : public std::iterator<std::forward_iterator_tag, Value, std::ptrdiff_t, Value*, Value&> { |
245 | WTF_MAKE_FAST_ALLOCATED; |
246 | private: |
247 | typedef HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> HashTableType; |
248 | typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator; |
249 | typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator; |
250 | typedef Value ValueType; |
251 | typedef ValueType& ReferenceType; |
252 | typedef ValueType* PointerType; |
253 | |
254 | friend class HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>; |
255 | |
256 | HashTableIterator(HashTableType* table, PointerType pos, PointerType end) : m_iterator(table, pos, end) { } |
257 | HashTableIterator(HashTableType* table, PointerType pos, PointerType end, HashItemKnownGoodTag tag) : m_iterator(table, pos, end, tag) { } |
258 | |
259 | public: |
260 | HashTableIterator() { } |
261 | |
262 | // default copy, assignment and destructor are OK |
263 | |
264 | PointerType get() const { return const_cast<PointerType>(m_iterator.get()); } |
265 | ReferenceType operator*() const { return *get(); } |
266 | PointerType operator->() const { return get(); } |
267 | |
268 | iterator& operator++() { ++m_iterator; return *this; } |
269 | |
270 | // postfix ++ intentionally omitted |
271 | |
272 | // Comparison. |
273 | bool operator==(const iterator& other) const { return m_iterator == other.m_iterator; } |
274 | bool operator!=(const iterator& other) const { return m_iterator != other.m_iterator; } |
275 | bool operator==(const const_iterator& other) const { return m_iterator == other; } |
276 | bool operator!=(const const_iterator& other) const { return m_iterator != other; } |
277 | |
278 | operator const_iterator() const { return m_iterator; } |
279 | |
280 | private: |
281 | const_iterator m_iterator; |
282 | }; |
283 | |
284 | template<typename ValueTraits, typename HashFunctions> class IdentityHashTranslator { |
285 | public: |
286 | template<typename T> static unsigned hash(const T& key) { return HashFunctions::hash(key); } |
287 | template<typename T, typename U> static bool equal(const T& a, const U& b) { return HashFunctions::equal(a, b); } |
288 | template<typename T, typename U, typename V> static void translate(T& location, const U&, V&& value) |
289 | { |
290 | ValueTraits::assignToEmpty(location, std::forward<V>(value)); |
291 | } |
292 | }; |
293 | |
294 | template<typename IteratorType> struct HashTableAddResult { |
295 | HashTableAddResult() : isNewEntry(false) { } |
296 | HashTableAddResult(IteratorType iter, bool isNewEntry) : iterator(iter), isNewEntry(isNewEntry) { } |
297 | IteratorType iterator; |
298 | bool isNewEntry; |
299 | |
300 | explicit operator bool() const { return isNewEntry; } |
301 | }; |
302 | |
303 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
304 | class HashTable { |
305 | public: |
306 | typedef HashTableIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> iterator; |
307 | typedef HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits> const_iterator; |
308 | typedef Traits ValueTraits; |
309 | typedef Key KeyType; |
310 | typedef Value ValueType; |
311 | typedef IdentityHashTranslator<ValueTraits, HashFunctions> IdentityTranslatorType; |
312 | typedef HashTableAddResult<iterator> AddResult; |
313 | |
314 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
315 | struct Stats { |
316 | WTF_MAKE_STRUCT_FAST_ALLOCATED; |
317 | |
318 | Stats() |
319 | : numAccesses(0) |
320 | , numRehashes(0) |
321 | , numRemoves(0) |
322 | , numReinserts(0) |
323 | , maxCollisions(0) |
324 | , numCollisions(0) |
325 | , collisionGraph() |
326 | { |
327 | } |
328 | |
329 | unsigned numAccesses; |
330 | unsigned numRehashes; |
331 | unsigned numRemoves; |
332 | unsigned numReinserts; |
333 | |
334 | unsigned maxCollisions; |
335 | unsigned numCollisions; |
336 | unsigned collisionGraph[4096]; |
337 | |
338 | void recordCollisionAtCount(unsigned count) |
339 | { |
340 | if (count > maxCollisions) |
341 | maxCollisions = count; |
342 | numCollisions++; |
343 | collisionGraph[count]++; |
344 | } |
345 | |
346 | void dumpStats() |
347 | { |
348 | dataLogF("\nWTF::HashTable::Stats dump\n\n" ); |
349 | dataLogF("%d accesses\n" , numAccesses); |
350 | dataLogF("%d total collisions, average %.2f probes per access\n" , numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses); |
351 | dataLogF("longest collision chain: %d\n" , maxCollisions); |
352 | for (unsigned i = 1; i <= maxCollisions; i++) { |
353 | dataLogF(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n" , collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses); |
354 | } |
355 | dataLogF("%d rehashes\n" , numRehashes); |
356 | dataLogF("%d reinserts\n" , numReinserts); |
357 | } |
358 | }; |
359 | #endif |
360 | |
361 | HashTable(); |
362 | ~HashTable() |
363 | { |
364 | invalidateIterators(); |
365 | if (m_table) |
366 | deallocateTable(m_table, m_tableSize); |
367 | #if CHECK_HASHTABLE_USE_AFTER_DESTRUCTION |
368 | m_table = (ValueType*)(uintptr_t)0xbbadbeef; |
369 | #endif |
370 | } |
371 | |
372 | HashTable(const HashTable&); |
373 | void swap(HashTable&); |
374 | HashTable& operator=(const HashTable&); |
375 | |
376 | HashTable(HashTable&&); |
377 | HashTable& operator=(HashTable&&); |
378 | |
379 | // When the hash table is empty, just return the same iterator for end as for begin. |
380 | // This is more efficient because we don't have to skip all the empty and deleted |
381 | // buckets, and iterating an empty table is a common case that's worth optimizing. |
382 | iterator begin() { return isEmpty() ? end() : makeIterator(m_table); } |
383 | iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); } |
384 | const_iterator begin() const { return isEmpty() ? end() : makeConstIterator(m_table); } |
385 | const_iterator end() const { return makeKnownGoodConstIterator(m_table + m_tableSize); } |
386 | |
387 | iterator random() |
388 | { |
389 | if (isEmpty()) |
390 | return end(); |
391 | |
392 | while (1) { |
393 | auto& bucket = m_table[weakRandomUint32() & m_tableSizeMask]; |
394 | if (!isEmptyOrDeletedBucket(bucket)) |
395 | return makeKnownGoodIterator(&bucket); |
396 | }; |
397 | } |
398 | |
399 | const_iterator random() const { return static_cast<const_iterator>(const_cast<HashTable*>(this)->random()); } |
400 | |
401 | unsigned size() const { return m_keyCount; } |
402 | unsigned capacity() const { return m_tableSize; } |
403 | bool isEmpty() const { return !m_keyCount; } |
404 | |
405 | void reserveInitialCapacity(unsigned keyCount) |
406 | { |
407 | ASSERT(!m_table); |
408 | ASSERT(!m_tableSize); |
409 | ASSERT(!m_deletedCount); |
410 | |
411 | unsigned minimumTableSize = KeyTraits::minimumTableSize; |
412 | unsigned newTableSize = std::max(minimumTableSize, computeBestTableSize(keyCount)); |
413 | |
414 | m_tableSize = newTableSize; |
415 | m_tableSizeMask = newTableSize - 1; |
416 | m_table = allocateTable(newTableSize); |
417 | } |
418 | |
419 | AddResult add(const ValueType& value) { return add<IdentityTranslatorType>(Extractor::extract(value), value); } |
420 | AddResult add(ValueType&& value) { return add<IdentityTranslatorType>(Extractor::extract(value), WTFMove(value)); } |
421 | |
422 | // A special version of add() that finds the object by hashing and comparing |
423 | // with some other type, to avoid the cost of type conversion if the object is already |
424 | // in the table. |
425 | template<typename HashTranslator, typename T, typename Extra> AddResult add(T&& key, Extra&&); |
426 | template<typename HashTranslator, typename T, typename Extra> AddResult addPassingHashCode(T&& key, Extra&&); |
427 | |
428 | iterator find(const KeyType& key) { return find<IdentityTranslatorType>(key); } |
429 | const_iterator find(const KeyType& key) const { return find<IdentityTranslatorType>(key); } |
430 | bool contains(const KeyType& key) const { return contains<IdentityTranslatorType>(key); } |
431 | |
432 | template<typename HashTranslator, typename T> iterator find(const T&); |
433 | template<typename HashTranslator, typename T> const_iterator find(const T&) const; |
434 | template<typename HashTranslator, typename T> bool contains(const T&) const; |
435 | |
436 | void remove(const KeyType&); |
437 | void remove(iterator); |
438 | void removeWithoutEntryConsistencyCheck(iterator); |
439 | void removeWithoutEntryConsistencyCheck(const_iterator); |
440 | template<typename Functor> |
441 | bool removeIf(const Functor&); |
442 | void clear(); |
443 | |
444 | static bool isEmptyBucket(const ValueType& value) { return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); } |
445 | static bool isReleasedWeakBucket(const ValueType& value) { return isHashTraitsReleasedWeakValue<KeyTraits>(Extractor::extract(value)); } |
446 | static bool isDeletedBucket(const ValueType& value) { return KeyTraits::isDeletedValue(Extractor::extract(value)); } |
447 | static bool isEmptyOrDeletedBucket(const ValueType& value) { return isEmptyBucket(value) || isDeletedBucket(value); } |
448 | |
449 | ValueType* lookup(const Key& key) { return lookup<IdentityTranslatorType>(key); } |
450 | template<typename HashTranslator, typename T> ValueType* lookup(const T&); |
451 | template<typename HashTranslator, typename T> ValueType* inlineLookup(const T&); |
452 | |
453 | #if !ASSERT_DISABLED |
454 | void checkTableConsistency() const; |
455 | #else |
456 | static void checkTableConsistency() { } |
457 | #endif |
458 | #if CHECK_HASHTABLE_CONSISTENCY |
459 | void internalCheckTableConsistency() const { checkTableConsistency(); } |
460 | void internalCheckTableConsistencyExceptSize() const { checkTableConsistencyExceptSize(); } |
461 | #else |
462 | static void internalCheckTableConsistencyExceptSize() { } |
463 | static void internalCheckTableConsistency() { } |
464 | #endif |
465 | |
466 | private: |
467 | static ValueType* allocateTable(unsigned size); |
468 | static void deallocateTable(ValueType* table, unsigned size); |
469 | |
470 | typedef std::pair<ValueType*, bool> LookupType; |
471 | typedef std::pair<LookupType, unsigned> FullLookupType; |
472 | |
473 | LookupType lookupForWriting(const Key& key) { return lookupForWriting<IdentityTranslatorType>(key); }; |
474 | template<typename HashTranslator, typename T> FullLookupType fullLookupForWriting(const T&); |
475 | template<typename HashTranslator, typename T> LookupType lookupForWriting(const T&); |
476 | |
477 | template<typename HashTranslator, typename T, typename Extra> void addUniqueForInitialization(T&& key, Extra&&); |
478 | |
479 | template<typename HashTranslator, typename T> void checkKey(const T&); |
480 | |
481 | void removeAndInvalidateWithoutEntryConsistencyCheck(ValueType*); |
482 | void removeAndInvalidate(ValueType*); |
483 | void remove(ValueType*); |
484 | |
485 | static constexpr unsigned computeBestTableSize(unsigned keyCount); |
486 | bool shouldExpand() const { return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; } |
487 | bool mustRehashInPlace() const { return m_keyCount * m_minLoad < m_tableSize * 2; } |
488 | bool shouldShrink() const { return m_keyCount * m_minLoad < m_tableSize && m_tableSize > KeyTraits::minimumTableSize; } |
489 | ValueType* expand(ValueType* entry = nullptr); |
490 | void shrink() { rehash(m_tableSize / 2, nullptr); } |
491 | void shrinkToBestSize(); |
492 | |
493 | void deleteReleasedWeakBuckets(); |
494 | |
495 | ValueType* rehash(unsigned newTableSize, ValueType* entry); |
496 | ValueType* reinsert(ValueType&&); |
497 | |
498 | static void initializeBucket(ValueType& bucket); |
499 | static void deleteBucket(ValueType& bucket) { hashTraitsDeleteBucket<Traits>(bucket); } |
500 | |
501 | FullLookupType makeLookupResult(ValueType* position, bool found, unsigned hash) |
502 | { return FullLookupType(LookupType(position, found), hash); } |
503 | |
504 | iterator makeIterator(ValueType* pos) { return iterator(this, pos, m_table + m_tableSize); } |
505 | const_iterator makeConstIterator(ValueType* pos) const { return const_iterator(this, pos, m_table + m_tableSize); } |
506 | iterator makeKnownGoodIterator(ValueType* pos) { return iterator(this, pos, m_table + m_tableSize, HashItemKnownGood); } |
507 | const_iterator makeKnownGoodConstIterator(ValueType* pos) const { return const_iterator(this, pos, m_table + m_tableSize, HashItemKnownGood); } |
508 | |
509 | #if !ASSERT_DISABLED |
510 | void checkTableConsistencyExceptSize() const; |
511 | #else |
512 | static void checkTableConsistencyExceptSize() { } |
513 | #endif |
514 | |
515 | #if CHECK_HASHTABLE_ITERATORS |
516 | void invalidateIterators(); |
517 | #else |
518 | static void invalidateIterators() { } |
519 | #endif |
520 | |
521 | static constexpr unsigned m_maxLoad = 2; |
522 | static constexpr unsigned m_minLoad = 6; |
523 | |
524 | ValueType* m_table; |
525 | unsigned m_tableSize; |
526 | unsigned m_tableSizeMask; |
527 | unsigned m_keyCount; |
528 | unsigned m_deletedCount; |
529 | |
530 | #if CHECK_HASHTABLE_ITERATORS |
531 | public: |
532 | // All access to m_iterators should be guarded with m_mutex. |
533 | mutable const_iterator* m_iterators; |
534 | // Use std::unique_ptr so HashTable can still be memmove'd or memcpy'ed. |
535 | mutable std::unique_ptr<Lock> m_mutex; |
536 | #endif |
537 | |
538 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
539 | public: |
540 | mutable std::unique_ptr<Stats> m_stats; |
541 | #endif |
542 | }; |
543 | |
544 | // Set all the bits to one after the most significant bit: 00110101010 -> 00111111111. |
545 | template<unsigned size> struct OneifyLowBits; |
546 | template<> |
547 | struct OneifyLowBits<0> { |
548 | static constexpr unsigned value = 0; |
549 | }; |
550 | template<unsigned number> |
551 | struct OneifyLowBits { |
552 | static constexpr unsigned value = number | OneifyLowBits<(number >> 1)>::value; |
553 | }; |
554 | // Compute the first power of two integer that is an upper bound of the parameter 'number'. |
555 | template<unsigned number> |
556 | struct UpperPowerOfTwoBound { |
557 | static constexpr unsigned value = (OneifyLowBits<number - 1>::value + 1) * 2; |
558 | }; |
559 | |
560 | // Because power of two numbers are the limit of maxLoad, their capacity is twice the |
561 | // UpperPowerOfTwoBound, or 4 times their values. |
562 | template<unsigned size, bool isPowerOfTwo> struct HashTableCapacityForSizeSplitter; |
563 | template<unsigned size> |
564 | struct HashTableCapacityForSizeSplitter<size, true> { |
565 | static constexpr unsigned value = size * 4; |
566 | }; |
567 | template<unsigned size> |
568 | struct HashTableCapacityForSizeSplitter<size, false> { |
569 | static constexpr unsigned value = UpperPowerOfTwoBound<size>::value; |
570 | }; |
571 | |
572 | // HashTableCapacityForSize computes the upper power of two capacity to hold the size parameter. |
573 | // This is done at compile time to initialize the HashTraits. |
574 | template<unsigned size> |
575 | struct HashTableCapacityForSize { |
576 | static constexpr unsigned value = HashTableCapacityForSizeSplitter<size, !(size & (size - 1))>::value; |
577 | COMPILE_ASSERT(size > 0, HashTableNonZeroMinimumCapacity); |
578 | COMPILE_ASSERT(!static_cast<unsigned>(value >> 31), HashTableNoCapacityOverflow); |
579 | COMPILE_ASSERT(value > (2 * size), HashTableCapacityHoldsContentSize); |
580 | }; |
581 | |
582 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
583 | inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::HashTable() |
584 | : m_table(0) |
585 | , m_tableSize(0) |
586 | , m_tableSizeMask(0) |
587 | , m_keyCount(0) |
588 | , m_deletedCount(0) |
589 | #if CHECK_HASHTABLE_ITERATORS |
590 | , m_iterators(0) |
591 | , m_mutex(makeUnique<Lock>()) |
592 | #endif |
593 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
594 | , m_stats(makeUnique<Stats>()) |
595 | #endif |
596 | { |
597 | } |
598 | |
599 | inline unsigned doubleHash(unsigned key) |
600 | { |
601 | key = ~key + (key >> 23); |
602 | key ^= (key << 12); |
603 | key ^= (key >> 7); |
604 | key ^= (key << 2); |
605 | key ^= (key >> 20); |
606 | return key; |
607 | } |
608 | |
609 | #if ASSERT_DISABLED |
610 | |
611 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
612 | template<typename HashTranslator, typename T> |
613 | inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkKey(const T&) |
614 | { |
615 | } |
616 | |
617 | #else |
618 | |
619 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
620 | template<typename HashTranslator, typename T> |
621 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkKey(const T& key) |
622 | { |
623 | if (!HashFunctions::safeToCompareToEmptyOrDeleted) |
624 | return; |
625 | ASSERT(!HashTranslator::equal(KeyTraits::emptyValue(), key)); |
626 | typename std::aligned_storage<sizeof(ValueType), std::alignment_of<ValueType>::value>::type deletedValueBuffer; |
627 | ValueType* deletedValuePtr = reinterpret_cast_ptr<ValueType*>(&deletedValueBuffer); |
628 | ValueType& deletedValue = *deletedValuePtr; |
629 | Traits::constructDeletedValue(deletedValue); |
630 | ASSERT(!HashTranslator::equal(Extractor::extract(deletedValue), key)); |
631 | } |
632 | |
633 | #endif |
634 | |
635 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
636 | template<typename HashTranslator, typename T> |
637 | inline auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::lookup(const T& key) -> ValueType* |
638 | { |
639 | return inlineLookup<HashTranslator>(key); |
640 | } |
641 | |
642 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
643 | template<typename HashTranslator, typename T> |
644 | ALWAYS_INLINE auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::inlineLookup(const T& key) -> ValueType* |
645 | { |
646 | checkKey<HashTranslator>(key); |
647 | |
648 | unsigned k = 0; |
649 | unsigned sizeMask = m_tableSizeMask; |
650 | ValueType* table = m_table; |
651 | unsigned h = HashTranslator::hash(key); |
652 | unsigned i = h & sizeMask; |
653 | |
654 | if (!table) |
655 | return 0; |
656 | |
657 | #if DUMP_HASHTABLE_STATS |
658 | ++HashTableStats::numAccesses; |
659 | unsigned probeCount = 0; |
660 | #endif |
661 | |
662 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
663 | ++m_stats->numAccesses; |
664 | #endif |
665 | |
666 | while (1) { |
667 | ValueType* entry = table + i; |
668 | |
669 | // we count on the compiler to optimize out this branch |
670 | if (HashFunctions::safeToCompareToEmptyOrDeleted) { |
671 | if (HashTranslator::equal(Extractor::extract(*entry), key)) |
672 | return entry; |
673 | |
674 | if (isEmptyBucket(*entry)) |
675 | return 0; |
676 | } else { |
677 | if (isEmptyBucket(*entry)) |
678 | return 0; |
679 | |
680 | if (!isDeletedBucket(*entry) && HashTranslator::equal(Extractor::extract(*entry), key)) |
681 | return entry; |
682 | } |
683 | #if DUMP_HASHTABLE_STATS |
684 | ++probeCount; |
685 | HashTableStats::recordCollisionAtCount(probeCount); |
686 | #endif |
687 | |
688 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
689 | m_stats->recordCollisionAtCount(probeCount); |
690 | #endif |
691 | |
692 | if (k == 0) |
693 | k = 1 | doubleHash(h); |
694 | i = (i + k) & sizeMask; |
695 | } |
696 | } |
697 | |
698 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
699 | template<typename HashTranslator, typename T> |
700 | inline auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::lookupForWriting(const T& key) -> LookupType |
701 | { |
702 | ASSERT(m_table); |
703 | checkKey<HashTranslator>(key); |
704 | |
705 | unsigned k = 0; |
706 | ValueType* table = m_table; |
707 | unsigned sizeMask = m_tableSizeMask; |
708 | unsigned h = HashTranslator::hash(key); |
709 | unsigned i = h & sizeMask; |
710 | |
711 | #if DUMP_HASHTABLE_STATS |
712 | ++HashTableStats::numAccesses; |
713 | unsigned probeCount = 0; |
714 | #endif |
715 | |
716 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
717 | ++m_stats->numAccesses; |
718 | #endif |
719 | |
720 | ValueType* deletedEntry = 0; |
721 | |
722 | while (1) { |
723 | ValueType* entry = table + i; |
724 | |
725 | // we count on the compiler to optimize out this branch |
726 | if (HashFunctions::safeToCompareToEmptyOrDeleted) { |
727 | if (isEmptyBucket(*entry)) |
728 | return LookupType(deletedEntry ? deletedEntry : entry, false); |
729 | |
730 | if (HashTranslator::equal(Extractor::extract(*entry), key)) |
731 | return LookupType(entry, true); |
732 | |
733 | if (isDeletedBucket(*entry)) |
734 | deletedEntry = entry; |
735 | } else { |
736 | if (isEmptyBucket(*entry)) |
737 | return LookupType(deletedEntry ? deletedEntry : entry, false); |
738 | |
739 | if (isDeletedBucket(*entry)) |
740 | deletedEntry = entry; |
741 | else if (HashTranslator::equal(Extractor::extract(*entry), key)) |
742 | return LookupType(entry, true); |
743 | } |
744 | #if DUMP_HASHTABLE_STATS |
745 | ++probeCount; |
746 | HashTableStats::recordCollisionAtCount(probeCount); |
747 | #endif |
748 | |
749 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
750 | m_stats->recordCollisionAtCount(probeCount); |
751 | #endif |
752 | |
753 | if (k == 0) |
754 | k = 1 | doubleHash(h); |
755 | i = (i + k) & sizeMask; |
756 | } |
757 | } |
758 | |
759 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
760 | template<typename HashTranslator, typename T> |
761 | inline auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::fullLookupForWriting(const T& key) -> FullLookupType |
762 | { |
763 | ASSERT(m_table); |
764 | checkKey<HashTranslator>(key); |
765 | |
766 | unsigned k = 0; |
767 | ValueType* table = m_table; |
768 | unsigned sizeMask = m_tableSizeMask; |
769 | unsigned h = HashTranslator::hash(key); |
770 | unsigned i = h & sizeMask; |
771 | |
772 | #if DUMP_HASHTABLE_STATS |
773 | ++HashTableStats::numAccesses; |
774 | unsigned probeCount = 0; |
775 | #endif |
776 | |
777 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
778 | ++m_stats->numAccesses; |
779 | #endif |
780 | |
781 | ValueType* deletedEntry = 0; |
782 | |
783 | while (1) { |
784 | ValueType* entry = table + i; |
785 | |
786 | // we count on the compiler to optimize out this branch |
787 | if (HashFunctions::safeToCompareToEmptyOrDeleted) { |
788 | if (isEmptyBucket(*entry)) |
789 | return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h); |
790 | |
791 | if (HashTranslator::equal(Extractor::extract(*entry), key)) |
792 | return makeLookupResult(entry, true, h); |
793 | |
794 | if (isDeletedBucket(*entry)) |
795 | deletedEntry = entry; |
796 | } else { |
797 | if (isEmptyBucket(*entry)) |
798 | return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h); |
799 | |
800 | if (isDeletedBucket(*entry)) |
801 | deletedEntry = entry; |
802 | else if (HashTranslator::equal(Extractor::extract(*entry), key)) |
803 | return makeLookupResult(entry, true, h); |
804 | } |
805 | #if DUMP_HASHTABLE_STATS |
806 | ++probeCount; |
807 | HashTableStats::recordCollisionAtCount(probeCount); |
808 | #endif |
809 | |
810 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
811 | m_stats->recordCollisionAtCount(probeCount); |
812 | #endif |
813 | |
814 | if (k == 0) |
815 | k = 1 | doubleHash(h); |
816 | i = (i + k) & sizeMask; |
817 | } |
818 | } |
819 | |
820 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
821 | template<typename HashTranslator, typename T, typename Extra> |
822 | ALWAYS_INLINE void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::addUniqueForInitialization(T&& key, Extra&& ) |
823 | { |
824 | ASSERT(m_table); |
825 | |
826 | checkKey<HashTranslator>(key); |
827 | |
828 | invalidateIterators(); |
829 | |
830 | internalCheckTableConsistency(); |
831 | |
832 | unsigned k = 0; |
833 | ValueType* table = m_table; |
834 | unsigned sizeMask = m_tableSizeMask; |
835 | unsigned h = HashTranslator::hash(key); |
836 | unsigned i = h & sizeMask; |
837 | |
838 | #if DUMP_HASHTABLE_STATS |
839 | ++HashTableStats::numAccesses; |
840 | unsigned probeCount = 0; |
841 | #endif |
842 | |
843 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
844 | ++m_stats->numAccesses; |
845 | #endif |
846 | |
847 | ValueType* entry; |
848 | while (1) { |
849 | entry = table + i; |
850 | |
851 | if (isEmptyBucket(*entry)) |
852 | break; |
853 | |
854 | #if DUMP_HASHTABLE_STATS |
855 | ++probeCount; |
856 | HashTableStats::recordCollisionAtCount(probeCount); |
857 | #endif |
858 | |
859 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
860 | m_stats->recordCollisionAtCount(probeCount); |
861 | #endif |
862 | |
863 | if (k == 0) |
864 | k = 1 | doubleHash(h); |
865 | i = (i + k) & sizeMask; |
866 | } |
867 | |
868 | HashTranslator::translate(*entry, std::forward<T>(key), std::forward<Extra>(extra)); |
869 | |
870 | internalCheckTableConsistency(); |
871 | } |
872 | |
873 | template<bool emptyValueIsZero> struct HashTableBucketInitializer; |
874 | |
875 | template<> struct HashTableBucketInitializer<false> { |
876 | template<typename Traits, typename Value> static void initialize(Value& bucket) |
877 | { |
878 | Traits::template constructEmptyValue<Traits>(bucket); |
879 | } |
880 | }; |
881 | |
882 | template<> struct HashTableBucketInitializer<true> { |
883 | template<typename Traits, typename Value> static void initialize(Value& bucket) |
884 | { |
885 | // This initializes the bucket without copying the empty value. |
886 | // That makes it possible to use this with types that don't support copying. |
887 | // The memset to 0 looks like a slow operation but is optimized by the compilers. |
888 | memset(static_cast<void*>(std::addressof(bucket)), 0, sizeof(bucket)); |
889 | } |
890 | }; |
891 | |
892 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
893 | inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::initializeBucket(ValueType& bucket) |
894 | { |
895 | HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize<Traits>(bucket); |
896 | } |
897 | |
898 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
899 | template<typename HashTranslator, typename T, typename Extra> |
900 | ALWAYS_INLINE auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::add(T&& key, Extra&& ) -> AddResult |
901 | { |
902 | checkKey<HashTranslator>(key); |
903 | |
904 | invalidateIterators(); |
905 | |
906 | if (!m_table) |
907 | expand(nullptr); |
908 | |
909 | internalCheckTableConsistency(); |
910 | |
911 | ASSERT(m_table); |
912 | |
913 | unsigned k = 0; |
914 | ValueType* table = m_table; |
915 | unsigned sizeMask = m_tableSizeMask; |
916 | unsigned h = HashTranslator::hash(key); |
917 | unsigned i = h & sizeMask; |
918 | |
919 | #if DUMP_HASHTABLE_STATS |
920 | ++HashTableStats::numAccesses; |
921 | unsigned probeCount = 0; |
922 | #endif |
923 | |
924 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
925 | ++m_stats->numAccesses; |
926 | #endif |
927 | |
928 | ValueType* deletedEntry = 0; |
929 | ValueType* entry; |
930 | while (1) { |
931 | entry = table + i; |
932 | |
933 | // we count on the compiler to optimize out this branch |
934 | if (HashFunctions::safeToCompareToEmptyOrDeleted) { |
935 | if (isEmptyBucket(*entry)) |
936 | break; |
937 | |
938 | if (HashTranslator::equal(Extractor::extract(*entry), key)) |
939 | return AddResult(makeKnownGoodIterator(entry), false); |
940 | |
941 | if (isDeletedBucket(*entry)) |
942 | deletedEntry = entry; |
943 | } else { |
944 | if (isEmptyBucket(*entry)) |
945 | break; |
946 | |
947 | if (isDeletedBucket(*entry)) |
948 | deletedEntry = entry; |
949 | else if (HashTranslator::equal(Extractor::extract(*entry), key)) |
950 | return AddResult(makeKnownGoodIterator(entry), false); |
951 | } |
952 | #if DUMP_HASHTABLE_STATS |
953 | ++probeCount; |
954 | HashTableStats::recordCollisionAtCount(probeCount); |
955 | #endif |
956 | |
957 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
958 | m_stats->recordCollisionAtCount(probeCount); |
959 | #endif |
960 | |
961 | if (k == 0) |
962 | k = 1 | doubleHash(h); |
963 | i = (i + k) & sizeMask; |
964 | } |
965 | |
966 | if (deletedEntry) { |
967 | initializeBucket(*deletedEntry); |
968 | entry = deletedEntry; |
969 | --m_deletedCount; |
970 | } |
971 | |
972 | HashTranslator::translate(*entry, std::forward<T>(key), std::forward<Extra>(extra)); |
973 | ++m_keyCount; |
974 | |
975 | if (shouldExpand()) |
976 | entry = expand(entry); |
977 | |
978 | internalCheckTableConsistency(); |
979 | |
980 | return AddResult(makeKnownGoodIterator(entry), true); |
981 | } |
982 | |
983 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
984 | template<typename HashTranslator, typename T, typename Extra> |
985 | inline auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::addPassingHashCode(T&& key, Extra&& ) -> AddResult |
986 | { |
987 | checkKey<HashTranslator>(key); |
988 | |
989 | invalidateIterators(); |
990 | |
991 | if (!m_table) |
992 | expand(); |
993 | |
994 | internalCheckTableConsistency(); |
995 | |
996 | FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key); |
997 | |
998 | ValueType* entry = lookupResult.first.first; |
999 | bool found = lookupResult.first.second; |
1000 | unsigned h = lookupResult.second; |
1001 | |
1002 | if (found) |
1003 | return AddResult(makeKnownGoodIterator(entry), false); |
1004 | |
1005 | if (isDeletedBucket(*entry)) { |
1006 | initializeBucket(*entry); |
1007 | --m_deletedCount; |
1008 | } |
1009 | |
1010 | HashTranslator::translate(*entry, std::forward<T>(key), std::forward<Extra>(extra), h); |
1011 | ++m_keyCount; |
1012 | |
1013 | if (shouldExpand()) |
1014 | entry = expand(entry); |
1015 | |
1016 | internalCheckTableConsistency(); |
1017 | |
1018 | return AddResult(makeKnownGoodIterator(entry), true); |
1019 | } |
1020 | |
1021 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1022 | inline auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::reinsert(ValueType&& entry) -> ValueType* |
1023 | { |
1024 | ASSERT(m_table); |
1025 | ASSERT(!lookupForWriting(Extractor::extract(entry)).second); |
1026 | ASSERT(!isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first))); |
1027 | #if DUMP_HASHTABLE_STATS |
1028 | ++HashTableStats::numReinserts; |
1029 | #endif |
1030 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
1031 | ++m_stats->numReinserts; |
1032 | #endif |
1033 | |
1034 | Value* newEntry = lookupForWriting(Extractor::extract(entry)).first; |
1035 | newEntry->~Value(); |
1036 | new (NotNull, newEntry) ValueType(WTFMove(entry)); |
1037 | |
1038 | return newEntry; |
1039 | } |
1040 | |
1041 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1042 | template <typename HashTranslator, typename T> |
1043 | auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::find(const T& key) -> iterator |
1044 | { |
1045 | if (!m_table) |
1046 | return end(); |
1047 | |
1048 | ValueType* entry = lookup<HashTranslator>(key); |
1049 | if (!entry) |
1050 | return end(); |
1051 | |
1052 | return makeKnownGoodIterator(entry); |
1053 | } |
1054 | |
1055 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1056 | template <typename HashTranslator, typename T> |
1057 | auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::find(const T& key) const -> const_iterator |
1058 | { |
1059 | if (!m_table) |
1060 | return end(); |
1061 | |
1062 | ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key); |
1063 | if (!entry) |
1064 | return end(); |
1065 | |
1066 | return makeKnownGoodConstIterator(entry); |
1067 | } |
1068 | |
1069 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1070 | template <typename HashTranslator, typename T> |
1071 | bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::contains(const T& key) const |
1072 | { |
1073 | if (!m_table) |
1074 | return false; |
1075 | |
1076 | return const_cast<HashTable*>(this)->lookup<HashTranslator>(key); |
1077 | } |
1078 | |
1079 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1080 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeAndInvalidateWithoutEntryConsistencyCheck(ValueType* pos) |
1081 | { |
1082 | invalidateIterators(); |
1083 | remove(pos); |
1084 | } |
1085 | |
1086 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1087 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeAndInvalidate(ValueType* pos) |
1088 | { |
1089 | invalidateIterators(); |
1090 | internalCheckTableConsistency(); |
1091 | remove(pos); |
1092 | } |
1093 | |
1094 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1095 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::remove(ValueType* pos) |
1096 | { |
1097 | #if DUMP_HASHTABLE_STATS |
1098 | ++HashTableStats::numRemoves; |
1099 | #endif |
1100 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
1101 | ++m_stats->numRemoves; |
1102 | #endif |
1103 | |
1104 | deleteBucket(*pos); |
1105 | ++m_deletedCount; |
1106 | --m_keyCount; |
1107 | |
1108 | if (shouldShrink()) |
1109 | shrink(); |
1110 | |
1111 | internalCheckTableConsistency(); |
1112 | } |
1113 | |
1114 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1115 | inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::remove(iterator it) |
1116 | { |
1117 | if (it == end()) |
1118 | return; |
1119 | |
1120 | removeAndInvalidate(const_cast<ValueType*>(it.m_iterator.m_position)); |
1121 | } |
1122 | |
1123 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1124 | inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeWithoutEntryConsistencyCheck(iterator it) |
1125 | { |
1126 | if (it == end()) |
1127 | return; |
1128 | |
1129 | removeAndInvalidateWithoutEntryConsistencyCheck(const_cast<ValueType*>(it.m_iterator.m_position)); |
1130 | } |
1131 | |
1132 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1133 | inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeWithoutEntryConsistencyCheck(const_iterator it) |
1134 | { |
1135 | if (it == end()) |
1136 | return; |
1137 | |
1138 | removeAndInvalidateWithoutEntryConsistencyCheck(const_cast<ValueType*>(it.m_position)); |
1139 | } |
1140 | |
1141 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1142 | inline void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::remove(const KeyType& key) |
1143 | { |
1144 | remove(find(key)); |
1145 | } |
1146 | |
1147 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1148 | template<typename Functor> |
1149 | inline bool HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::removeIf(const Functor& functor) |
1150 | { |
1151 | // We must use local copies in case "functor" or "deleteBucket" |
1152 | // make a function call, which prevents the compiler from keeping |
1153 | // the values in register. |
1154 | unsigned removedBucketCount = 0; |
1155 | ValueType* table = m_table; |
1156 | |
1157 | for (unsigned i = m_tableSize; i--;) { |
1158 | ValueType& bucket = table[i]; |
1159 | if (isEmptyOrDeletedBucket(bucket)) |
1160 | continue; |
1161 | |
1162 | if (!functor(bucket)) |
1163 | continue; |
1164 | |
1165 | deleteBucket(bucket); |
1166 | ++removedBucketCount; |
1167 | } |
1168 | m_deletedCount += removedBucketCount; |
1169 | m_keyCount -= removedBucketCount; |
1170 | |
1171 | if (shouldShrink()) |
1172 | shrinkToBestSize(); |
1173 | |
1174 | internalCheckTableConsistency(); |
1175 | return removedBucketCount; |
1176 | } |
1177 | |
1178 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1179 | auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::allocateTable(unsigned size) -> ValueType* |
1180 | { |
1181 | // would use a template member function with explicit specializations here, but |
1182 | // gcc doesn't appear to support that |
1183 | if (Traits::emptyValueIsZero) |
1184 | return static_cast<ValueType*>(fastZeroedMalloc(size * sizeof(ValueType))); |
1185 | ValueType* result = static_cast<ValueType*>(fastMalloc(size * sizeof(ValueType))); |
1186 | for (unsigned i = 0; i < size; i++) |
1187 | initializeBucket(result[i]); |
1188 | return result; |
1189 | } |
1190 | |
1191 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1192 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::deallocateTable(ValueType* table, unsigned size) |
1193 | { |
1194 | for (unsigned i = 0; i < size; ++i) { |
1195 | if (!isDeletedBucket(table[i])) |
1196 | table[i].~ValueType(); |
1197 | } |
1198 | fastFree(table); |
1199 | } |
1200 | |
1201 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1202 | auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::expand(ValueType* entry) -> ValueType* |
1203 | { |
1204 | if (KeyTraits::hasIsReleasedWeakValueFunction) |
1205 | deleteReleasedWeakBuckets(); |
1206 | |
1207 | unsigned newSize; |
1208 | if (m_tableSize == 0) |
1209 | newSize = KeyTraits::minimumTableSize; |
1210 | else if (mustRehashInPlace()) |
1211 | newSize = m_tableSize; |
1212 | else |
1213 | newSize = m_tableSize * 2; |
1214 | |
1215 | return rehash(newSize, entry); |
1216 | } |
1217 | |
1218 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1219 | constexpr unsigned HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::computeBestTableSize(unsigned keyCount) |
1220 | { |
1221 | unsigned bestTableSize = WTF::roundUpToPowerOfTwo(keyCount) * 2; |
1222 | |
1223 | // With maxLoad at 1/2 and minLoad at 1/6, our average load is 2/6. |
1224 | // If we are getting halfway between 2/6 and 1/2 (past 5/12), we double the size to avoid being too close to |
1225 | // loadMax and bring the ratio close to 2/6. This give us a load in the bounds [3/12, 5/12). |
1226 | bool aboveThreeQuarterLoad = keyCount * 12 >= bestTableSize * 5; |
1227 | if (aboveThreeQuarterLoad) |
1228 | bestTableSize *= 2; |
1229 | |
1230 | unsigned minimumTableSize = KeyTraits::minimumTableSize; |
1231 | return std::max(bestTableSize, minimumTableSize); |
1232 | } |
1233 | |
1234 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1235 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::shrinkToBestSize() |
1236 | { |
1237 | unsigned minimumTableSize = KeyTraits::minimumTableSize; |
1238 | rehash(std::max(minimumTableSize, computeBestTableSize(m_keyCount)), nullptr); |
1239 | } |
1240 | |
1241 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1242 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::deleteReleasedWeakBuckets() |
1243 | { |
1244 | for (unsigned i = 0; i < m_tableSize; ++i) { |
1245 | auto& entry = m_table[i]; |
1246 | if (isReleasedWeakBucket(entry)) { |
1247 | deleteBucket(entry); |
1248 | ++m_deletedCount; |
1249 | --m_keyCount; |
1250 | } |
1251 | } |
1252 | } |
1253 | |
1254 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1255 | auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::rehash(unsigned newTableSize, ValueType* entry) -> ValueType* |
1256 | { |
1257 | internalCheckTableConsistencyExceptSize(); |
1258 | |
1259 | unsigned oldTableSize = m_tableSize; |
1260 | ValueType* oldTable = m_table; |
1261 | |
1262 | #if DUMP_HASHTABLE_STATS |
1263 | if (oldTableSize != 0) |
1264 | ++HashTableStats::numRehashes; |
1265 | #endif |
1266 | |
1267 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
1268 | if (oldTableSize != 0) |
1269 | ++m_stats->numRehashes; |
1270 | #endif |
1271 | |
1272 | m_tableSize = newTableSize; |
1273 | m_tableSizeMask = newTableSize - 1; |
1274 | m_table = allocateTable(newTableSize); |
1275 | |
1276 | Value* newEntry = nullptr; |
1277 | for (unsigned i = 0; i != oldTableSize; ++i) { |
1278 | auto& oldEntry = oldTable[i]; |
1279 | if (isDeletedBucket(oldEntry)) { |
1280 | ASSERT(std::addressof(oldEntry) != entry); |
1281 | continue; |
1282 | } |
1283 | |
1284 | if (isEmptyBucket(oldEntry)) { |
1285 | ASSERT(std::addressof(oldEntry) != entry); |
1286 | oldTable[i].~ValueType(); |
1287 | continue; |
1288 | } |
1289 | |
1290 | if (isReleasedWeakBucket(oldEntry)) { |
1291 | ASSERT(std::addressof(oldEntry) != entry); |
1292 | oldEntry.~ValueType(); |
1293 | --m_keyCount; |
1294 | continue; |
1295 | } |
1296 | |
1297 | Value* reinsertedEntry = reinsert(WTFMove(oldEntry)); |
1298 | oldEntry.~ValueType(); |
1299 | if (std::addressof(oldEntry) == entry) { |
1300 | ASSERT(!newEntry); |
1301 | newEntry = reinsertedEntry; |
1302 | } |
1303 | } |
1304 | |
1305 | m_deletedCount = 0; |
1306 | |
1307 | fastFree(oldTable); |
1308 | |
1309 | internalCheckTableConsistency(); |
1310 | return newEntry; |
1311 | } |
1312 | |
1313 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1314 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::clear() |
1315 | { |
1316 | invalidateIterators(); |
1317 | if (!m_table) |
1318 | return; |
1319 | |
1320 | deallocateTable(m_table, m_tableSize); |
1321 | m_table = 0; |
1322 | m_tableSize = 0; |
1323 | m_tableSizeMask = 0; |
1324 | m_keyCount = 0; |
1325 | m_deletedCount = 0; |
1326 | } |
1327 | |
1328 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1329 | HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::HashTable(const HashTable& other) |
1330 | : m_table(nullptr) |
1331 | , m_tableSize(0) |
1332 | , m_tableSizeMask(0) |
1333 | , m_keyCount(0) |
1334 | , m_deletedCount(0) |
1335 | #if CHECK_HASHTABLE_ITERATORS |
1336 | , m_iterators(nullptr) |
1337 | , m_mutex(makeUnique<Lock>()) |
1338 | #endif |
1339 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
1340 | , m_stats(makeUnique<Stats>(*other.m_stats)) |
1341 | #endif |
1342 | { |
1343 | unsigned otherKeyCount = other.size(); |
1344 | if (!otherKeyCount) |
1345 | return; |
1346 | |
1347 | m_tableSize = computeBestTableSize(otherKeyCount); |
1348 | m_tableSizeMask = m_tableSize - 1; |
1349 | m_keyCount = otherKeyCount; |
1350 | m_table = allocateTable(m_tableSize); |
1351 | |
1352 | for (const auto& otherValue : other) |
1353 | addUniqueForInitialization<IdentityTranslatorType>(Extractor::extract(otherValue), otherValue); |
1354 | } |
1355 | |
1356 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1357 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::swap(HashTable& other) |
1358 | { |
1359 | invalidateIterators(); |
1360 | other.invalidateIterators(); |
1361 | |
1362 | std::swap(m_table, other.m_table); |
1363 | std::swap(m_tableSize, other.m_tableSize); |
1364 | std::swap(m_tableSizeMask, other.m_tableSizeMask); |
1365 | std::swap(m_keyCount, other.m_keyCount); |
1366 | std::swap(m_deletedCount, other.m_deletedCount); |
1367 | |
1368 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
1369 | m_stats.swap(other.m_stats); |
1370 | #endif |
1371 | } |
1372 | |
1373 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1374 | auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::operator=(const HashTable& other) -> HashTable& |
1375 | { |
1376 | HashTable tmp(other); |
1377 | swap(tmp); |
1378 | return *this; |
1379 | } |
1380 | |
1381 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1382 | inline HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::HashTable(HashTable&& other) |
1383 | #if CHECK_HASHTABLE_ITERATORS |
1384 | : m_iterators(nullptr) |
1385 | , m_mutex(makeUnique<Lock>()) |
1386 | #endif |
1387 | { |
1388 | other.invalidateIterators(); |
1389 | |
1390 | m_table = other.m_table; |
1391 | m_tableSize = other.m_tableSize; |
1392 | m_tableSizeMask = other.m_tableSizeMask; |
1393 | m_keyCount = other.m_keyCount; |
1394 | m_deletedCount = other.m_deletedCount; |
1395 | |
1396 | other.m_table = nullptr; |
1397 | other.m_tableSize = 0; |
1398 | other.m_tableSizeMask = 0; |
1399 | other.m_keyCount = 0; |
1400 | other.m_deletedCount = 0; |
1401 | |
1402 | #if DUMP_HASHTABLE_STATS_PER_TABLE |
1403 | m_stats = WTFMove(other.m_stats); |
1404 | other.m_stats = nullptr; |
1405 | #endif |
1406 | } |
1407 | |
1408 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1409 | inline auto HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::operator=(HashTable&& other) -> HashTable& |
1410 | { |
1411 | HashTable temp = WTFMove(other); |
1412 | swap(temp); |
1413 | return *this; |
1414 | } |
1415 | |
1416 | #if !ASSERT_DISABLED |
1417 | |
1418 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1419 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkTableConsistency() const |
1420 | { |
1421 | checkTableConsistencyExceptSize(); |
1422 | ASSERT(!m_table || !shouldExpand()); |
1423 | ASSERT(!shouldShrink()); |
1424 | } |
1425 | |
1426 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1427 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::checkTableConsistencyExceptSize() const |
1428 | { |
1429 | if (!m_table) |
1430 | return; |
1431 | |
1432 | unsigned count = 0; |
1433 | unsigned deletedCount = 0; |
1434 | for (unsigned j = 0; j < m_tableSize; ++j) { |
1435 | ValueType* entry = m_table + j; |
1436 | if (isEmptyBucket(*entry)) |
1437 | continue; |
1438 | |
1439 | if (isDeletedBucket(*entry)) { |
1440 | ++deletedCount; |
1441 | continue; |
1442 | } |
1443 | |
1444 | auto& key = Extractor::extract(*entry); |
1445 | const_iterator it = find(key); |
1446 | ASSERT(entry == it.m_position); |
1447 | ++count; |
1448 | |
1449 | ValueCheck<Key>::checkConsistency(key); |
1450 | } |
1451 | |
1452 | ASSERT(count == m_keyCount); |
1453 | ASSERT(deletedCount == m_deletedCount); |
1454 | ASSERT(m_tableSize >= KeyTraits::minimumTableSize); |
1455 | ASSERT(m_tableSizeMask); |
1456 | ASSERT(m_tableSize == m_tableSizeMask + 1); |
1457 | } |
1458 | |
1459 | #endif // ASSERT_DISABLED |
1460 | |
1461 | #if CHECK_HASHTABLE_ITERATORS |
1462 | |
1463 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1464 | void HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>::invalidateIterators() |
1465 | { |
1466 | std::lock_guard<Lock> lock(*m_mutex); |
1467 | const_iterator* next; |
1468 | for (const_iterator* p = m_iterators; p; p = next) { |
1469 | next = p->m_next; |
1470 | p->m_table = 0; |
1471 | p->m_next = 0; |
1472 | p->m_previous = 0; |
1473 | } |
1474 | m_iterators = 0; |
1475 | } |
1476 | |
1477 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1478 | void addIterator(const HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* table, |
1479 | HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* it) |
1480 | { |
1481 | it->m_table = table; |
1482 | it->m_previous = 0; |
1483 | |
1484 | // Insert iterator at head of doubly-linked list of iterators. |
1485 | if (!table) { |
1486 | it->m_next = 0; |
1487 | } else { |
1488 | std::lock_guard<Lock> lock(*table->m_mutex); |
1489 | ASSERT(table->m_iterators != it); |
1490 | it->m_next = table->m_iterators; |
1491 | table->m_iterators = it; |
1492 | if (it->m_next) { |
1493 | ASSERT(!it->m_next->m_previous); |
1494 | it->m_next->m_previous = it; |
1495 | } |
1496 | } |
1497 | } |
1498 | |
1499 | template<typename Key, typename Value, typename Extractor, typename HashFunctions, typename Traits, typename KeyTraits> |
1500 | void removeIterator(HashTableConstIterator<Key, Value, Extractor, HashFunctions, Traits, KeyTraits>* it) |
1501 | { |
1502 | // Delete iterator from doubly-linked list of iterators. |
1503 | if (!it->m_table) { |
1504 | ASSERT(!it->m_next); |
1505 | ASSERT(!it->m_previous); |
1506 | } else { |
1507 | std::lock_guard<Lock> lock(*it->m_table->m_mutex); |
1508 | if (it->m_next) { |
1509 | ASSERT(it->m_next->m_previous == it); |
1510 | it->m_next->m_previous = it->m_previous; |
1511 | } |
1512 | if (it->m_previous) { |
1513 | ASSERT(it->m_table->m_iterators != it); |
1514 | ASSERT(it->m_previous->m_next == it); |
1515 | it->m_previous->m_next = it->m_next; |
1516 | } else { |
1517 | ASSERT(it->m_table->m_iterators == it); |
1518 | it->m_table->m_iterators = it->m_next; |
1519 | } |
1520 | } |
1521 | |
1522 | it->m_table = 0; |
1523 | it->m_next = 0; |
1524 | it->m_previous = 0; |
1525 | } |
1526 | |
1527 | #endif // CHECK_HASHTABLE_ITERATORS |
1528 | |
1529 | // iterator adapters |
1530 | |
1531 | template<typename HashTableType, typename ValueType> struct HashTableConstIteratorAdapter : public std::iterator<std::forward_iterator_tag, ValueType, std::ptrdiff_t, const ValueType*, const ValueType&> { |
1532 | HashTableConstIteratorAdapter() {} |
1533 | HashTableConstIteratorAdapter(const typename HashTableType::const_iterator& impl) : m_impl(impl) {} |
1534 | |
1535 | const ValueType* get() const { return (const ValueType*)m_impl.get(); } |
1536 | const ValueType& operator*() const { return *get(); } |
1537 | const ValueType* operator->() const { return get(); } |
1538 | |
1539 | HashTableConstIteratorAdapter& operator++() { ++m_impl; return *this; } |
1540 | // postfix ++ intentionally omitted |
1541 | |
1542 | typename HashTableType::const_iterator m_impl; |
1543 | }; |
1544 | |
1545 | template<typename HashTableType, typename ValueType> struct HashTableIteratorAdapter : public std::iterator<std::forward_iterator_tag, ValueType, std::ptrdiff_t, ValueType*, ValueType&> { |
1546 | HashTableIteratorAdapter() {} |
1547 | HashTableIteratorAdapter(const typename HashTableType::iterator& impl) : m_impl(impl) {} |
1548 | |
1549 | ValueType* get() const { return (ValueType*)m_impl.get(); } |
1550 | ValueType& operator*() const { return *get(); } |
1551 | ValueType* operator->() const { return get(); } |
1552 | |
1553 | HashTableIteratorAdapter& operator++() { ++m_impl; return *this; } |
1554 | // postfix ++ intentionally omitted |
1555 | |
1556 | operator HashTableConstIteratorAdapter<HashTableType, ValueType>() { |
1557 | typename HashTableType::const_iterator i = m_impl; |
1558 | return i; |
1559 | } |
1560 | |
1561 | typename HashTableType::iterator m_impl; |
1562 | }; |
1563 | |
1564 | template<typename T, typename U> |
1565 | inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b) |
1566 | { |
1567 | return a.m_impl == b.m_impl; |
1568 | } |
1569 | |
1570 | template<typename T, typename U> |
1571 | inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b) |
1572 | { |
1573 | return a.m_impl != b.m_impl; |
1574 | } |
1575 | |
1576 | template<typename T, typename U> |
1577 | inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b) |
1578 | { |
1579 | return a.m_impl == b.m_impl; |
1580 | } |
1581 | |
1582 | template<typename T, typename U> |
1583 | inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b) |
1584 | { |
1585 | return a.m_impl != b.m_impl; |
1586 | } |
1587 | |
1588 | // All 4 combinations of ==, != and Const,non const. |
1589 | template<typename T, typename U> |
1590 | inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b) |
1591 | { |
1592 | return a.m_impl == b.m_impl; |
1593 | } |
1594 | |
1595 | template<typename T, typename U> |
1596 | inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, const HashTableIteratorAdapter<T, U>& b) |
1597 | { |
1598 | return a.m_impl != b.m_impl; |
1599 | } |
1600 | |
1601 | template<typename T, typename U> |
1602 | inline bool operator==(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b) |
1603 | { |
1604 | return a.m_impl == b.m_impl; |
1605 | } |
1606 | |
1607 | template<typename T, typename U> |
1608 | inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, const HashTableConstIteratorAdapter<T, U>& b) |
1609 | { |
1610 | return a.m_impl != b.m_impl; |
1611 | } |
1612 | |
1613 | } // namespace WTF |
1614 | |
1615 | #include <wtf/HashIterators.h> |
1616 | |