1 | /* |
2 | * Copyright (C) 2016-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #include "ExceptionHelpers.h" |
29 | #include "JSCJSValueInlines.h" |
30 | #include "JSObject.h" |
31 | |
32 | namespace JSC { |
33 | |
34 | JS_EXPORT_PRIVATE const ClassInfo* getHashMapBucketKeyClassInfo(); |
35 | JS_EXPORT_PRIVATE const ClassInfo* getHashMapBucketKeyValueClassInfo(); |
36 | JS_EXPORT_PRIVATE const ClassInfo* getHashMapImplKeyClassInfo(); |
37 | JS_EXPORT_PRIVATE const ClassInfo* getHashMapImplKeyValueClassInfo(); |
38 | |
39 | enum class HashTableType { |
40 | Key, |
41 | KeyValue |
42 | }; |
43 | |
44 | struct HashMapBucketDataKey { |
45 | static const HashTableType Type = HashTableType::Key; |
46 | WriteBarrier<Unknown> key; |
47 | }; |
48 | |
49 | struct HashMapBucketDataKeyValue { |
50 | static const HashTableType Type = HashTableType::KeyValue; |
51 | WriteBarrier<Unknown> key; |
52 | WriteBarrier<Unknown> value; |
53 | }; |
54 | |
55 | template <typename Data> |
56 | class HashMapBucket : public JSCell { |
57 | typedef JSCell Base; |
58 | |
59 | template <typename T = Data> |
60 | static typename std::enable_if<std::is_same<T, HashMapBucketDataKey>::value, Structure*>::type selectStructure(VM& vm) |
61 | { |
62 | return vm.hashMapBucketSetStructure.get(); |
63 | } |
64 | |
65 | template <typename T = Data> |
66 | static typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, Structure*>::type selectStructure(VM& vm) |
67 | { |
68 | return vm.hashMapBucketMapStructure.get(); |
69 | } |
70 | |
71 | public: |
72 | static const HashTableType Type = Data::Type; |
73 | static const ClassInfo s_info; // This is never accessed directly, since that would break linkage on some compilers. |
74 | |
75 | |
76 | static const ClassInfo* info() |
77 | { |
78 | switch (Type) { |
79 | case HashTableType::Key: |
80 | return getHashMapBucketKeyClassInfo(); |
81 | case HashTableType::KeyValue: |
82 | return getHashMapBucketKeyValueClassInfo(); |
83 | } |
84 | RELEASE_ASSERT_NOT_REACHED(); |
85 | } |
86 | |
87 | static Structure* createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype) |
88 | { |
89 | return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info()); |
90 | } |
91 | |
92 | static HashMapBucket* create(VM& vm) |
93 | { |
94 | HashMapBucket* bucket = new (NotNull, allocateCell<HashMapBucket<Data>>(vm.heap)) HashMapBucket(vm, selectStructure(vm)); |
95 | bucket->finishCreation(vm); |
96 | ASSERT(!bucket->next()); |
97 | ASSERT(!bucket->prev()); |
98 | return bucket; |
99 | } |
100 | |
101 | static HashMapBucket* createSentinel(VM& vm) |
102 | { |
103 | auto* bucket = create(vm); |
104 | bucket->setKey(vm, jsUndefined()); |
105 | bucket->setValue(vm, jsUndefined()); |
106 | ASSERT(!bucket->deleted()); |
107 | return bucket; |
108 | } |
109 | |
110 | HashMapBucket(VM& vm, Structure* structure) |
111 | : Base(vm, structure) |
112 | { |
113 | ASSERT(deleted()); |
114 | } |
115 | |
116 | ALWAYS_INLINE void setNext(VM& vm, HashMapBucket* bucket) |
117 | { |
118 | m_next.set(vm, this, bucket); |
119 | } |
120 | ALWAYS_INLINE void setPrev(VM& vm, HashMapBucket* bucket) |
121 | { |
122 | m_prev.set(vm, this, bucket); |
123 | } |
124 | |
125 | ALWAYS_INLINE void setKey(VM& vm, JSValue key) |
126 | { |
127 | m_data.key.set(vm, this, key); |
128 | } |
129 | |
130 | template <typename T = Data> |
131 | ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value>::type setValue(VM& vm, JSValue value) |
132 | { |
133 | m_data.value.set(vm, this, value); |
134 | } |
135 | template <typename T = Data> |
136 | ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucketDataKey>::value>::type setValue(VM&, JSValue) { } |
137 | |
138 | ALWAYS_INLINE JSValue key() const { return m_data.key.get(); } |
139 | |
140 | template <typename T = Data> |
141 | ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, JSValue>::type value() const |
142 | { |
143 | return m_data.value.get(); |
144 | } |
145 | |
146 | static void visitChildren(JSCell*, SlotVisitor&); |
147 | |
148 | ALWAYS_INLINE HashMapBucket* next() const { return m_next.get(); } |
149 | ALWAYS_INLINE HashMapBucket* prev() const { return m_prev.get(); } |
150 | |
151 | ALWAYS_INLINE bool deleted() const { return !key(); } |
152 | ALWAYS_INLINE void makeDeleted(VM& vm) |
153 | { |
154 | setKey(vm, JSValue()); |
155 | setValue(vm, JSValue()); |
156 | } |
157 | |
158 | static ptrdiff_t offsetOfKey() |
159 | { |
160 | return OBJECT_OFFSETOF(HashMapBucket, m_data) + OBJECT_OFFSETOF(Data, key); |
161 | } |
162 | |
163 | template <typename T = Data> |
164 | static typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, ptrdiff_t>::type offsetOfValue() |
165 | { |
166 | return OBJECT_OFFSETOF(HashMapBucket, m_data) + OBJECT_OFFSETOF(Data, value); |
167 | } |
168 | |
169 | static ptrdiff_t offsetOfNext() |
170 | { |
171 | return OBJECT_OFFSETOF(HashMapBucket, m_next); |
172 | } |
173 | |
174 | template <typename T = Data> |
175 | ALWAYS_INLINE static typename std::enable_if<std::is_same<T, HashMapBucketDataKeyValue>::value, JSValue>::type (const HashMapBucket& bucket) |
176 | { |
177 | return bucket.value(); |
178 | } |
179 | |
180 | template <typename T = Data> |
181 | ALWAYS_INLINE static typename std::enable_if<std::is_same<T, HashMapBucketDataKey>::value, JSValue>::type (const HashMapBucket&) |
182 | { |
183 | return JSValue(); |
184 | } |
185 | |
186 | private: |
187 | WriteBarrier<HashMapBucket> m_next; |
188 | WriteBarrier<HashMapBucket> m_prev; |
189 | Data m_data; |
190 | }; |
191 | |
192 | template <typename BucketType> |
193 | class HashMapBuffer { |
194 | public: |
195 | HashMapBuffer() = delete; |
196 | |
197 | static size_t allocationSize(Checked<size_t> capacity) |
198 | { |
199 | return (capacity * sizeof(BucketType*)).unsafeGet(); |
200 | } |
201 | |
202 | ALWAYS_INLINE BucketType** buffer() const |
203 | { |
204 | return bitwise_cast<BucketType**>(this); |
205 | } |
206 | |
207 | static HashMapBuffer* create(JSGlobalObject* globalObject, VM& vm, JSCell*, uint32_t capacity) |
208 | { |
209 | auto scope = DECLARE_THROW_SCOPE(vm); |
210 | size_t allocationSize = HashMapBuffer::allocationSize(capacity); |
211 | void* data = vm.jsValueGigacageAuxiliarySpace.allocateNonVirtual(vm, allocationSize, nullptr, AllocationFailureMode::ReturnNull); |
212 | if (!data) { |
213 | throwOutOfMemoryError(globalObject, scope); |
214 | return nullptr; |
215 | } |
216 | |
217 | HashMapBuffer* buffer = static_cast<HashMapBuffer*>(data); |
218 | buffer->reset(capacity); |
219 | return buffer; |
220 | } |
221 | |
222 | ALWAYS_INLINE void reset(uint32_t capacity) |
223 | { |
224 | memset(this, -1, allocationSize(capacity)); |
225 | } |
226 | }; |
227 | |
228 | ALWAYS_INLINE static bool areKeysEqual(JSGlobalObject* globalObject, JSValue a, JSValue b) |
229 | { |
230 | // We want +0 and -0 to be compared to true here. sameValue() itself doesn't |
231 | // guarantee that, however, we normalize all keys before comparing and storing |
232 | // them in the map. The normalization will convert -0.0 and 0.0 to the integer |
233 | // representation for 0. |
234 | return sameValue(globalObject, a, b); |
235 | } |
236 | |
237 | // Note that normalization is inlined in DFG's NormalizeMapKey. |
238 | // Keep in sync with the implementation of DFG and FTL normalization. |
239 | ALWAYS_INLINE JSValue normalizeMapKey(JSValue key) |
240 | { |
241 | if (!key.isNumber()) |
242 | return key; |
243 | |
244 | if (key.isInt32()) |
245 | return key; |
246 | |
247 | double d = key.asDouble(); |
248 | if (std::isnan(d)) |
249 | return jsNaN(); |
250 | |
251 | int i = static_cast<int>(d); |
252 | if (i == d) { |
253 | // When a key is -0, we convert it to positive zero. |
254 | // When a key is the double representation for an integer, we convert it to an integer. |
255 | return jsNumber(i); |
256 | } |
257 | // This means key is definitely not negative zero, and it's definitely not a double representation of an integer. |
258 | return key; |
259 | } |
260 | |
261 | static ALWAYS_INLINE uint32_t wangsInt64Hash(uint64_t key) |
262 | { |
263 | key += ~(key << 32); |
264 | key ^= (key >> 22); |
265 | key += ~(key << 13); |
266 | key ^= (key >> 8); |
267 | key += (key << 3); |
268 | key ^= (key >> 15); |
269 | key += ~(key << 27); |
270 | key ^= (key >> 31); |
271 | return static_cast<unsigned>(key); |
272 | } |
273 | |
274 | ALWAYS_INLINE uint32_t jsMapHash(JSGlobalObject* globalObject, VM& vm, JSValue value) |
275 | { |
276 | ASSERT_WITH_MESSAGE(normalizeMapKey(value) == value, "We expect normalized values flowing into this function." ); |
277 | |
278 | if (value.isString()) { |
279 | auto scope = DECLARE_THROW_SCOPE(vm); |
280 | const String& wtfString = asString(value)->value(globalObject); |
281 | RETURN_IF_EXCEPTION(scope, UINT_MAX); |
282 | return wtfString.impl()->hash(); |
283 | } |
284 | |
285 | return wangsInt64Hash(JSValue::encode(value)); |
286 | } |
287 | |
288 | ALWAYS_INLINE Optional<uint32_t> concurrentJSMapHash(JSValue key) |
289 | { |
290 | key = normalizeMapKey(key); |
291 | if (key.isString()) { |
292 | JSString* string = asString(key); |
293 | if (string->length() > 10 * 1024) |
294 | return WTF::nullopt; |
295 | const StringImpl* impl = string->tryGetValueImpl(); |
296 | if (!impl) |
297 | return WTF::nullopt; |
298 | return impl->concurrentHash(); |
299 | } |
300 | |
301 | uint64_t rawValue = JSValue::encode(key); |
302 | return wangsInt64Hash(rawValue); |
303 | } |
304 | |
305 | ALWAYS_INLINE uint32_t shouldShrink(uint32_t capacity, uint32_t keyCount) |
306 | { |
307 | return 8 * keyCount <= capacity && capacity > 4; |
308 | } |
309 | |
310 | ALWAYS_INLINE uint32_t shouldRehashAfterAdd(uint32_t capacity, uint32_t keyCount, uint32_t deleteCount) |
311 | { |
312 | return 2 * (keyCount + deleteCount) >= capacity; |
313 | } |
314 | |
315 | ALWAYS_INLINE uint32_t nextCapacity(uint32_t capacity, uint32_t keyCount) |
316 | { |
317 | if (shouldShrink(capacity, keyCount)) { |
318 | ASSERT((capacity / 2) >= 4); |
319 | return capacity / 2; |
320 | } |
321 | |
322 | if (3 * keyCount <= capacity && capacity > 64) { |
323 | // We stay at the same size if rehashing would cause us to be no more than |
324 | // 1/3rd full. This comes up for programs like this: |
325 | // Say the hash table grew to a key count of 64, causing it to grow to a capacity of 256. |
326 | // Then, the table added 63 items. The load is now 127. Then, 63 items are deleted. |
327 | // The load is still 127. Then, another item is added. The load is now 128, and we |
328 | // decide that we need to rehash. The key count is 65, almost exactly what it was |
329 | // when we grew to a capacity of 256. We don't really need to grow to a capacity |
330 | // of 512 in this situation. Instead, we choose to rehash at the same size. This |
331 | // will bring the load down to 65. We rehash into the same size when we determine |
332 | // that the new load ratio will be under 1/3rd. (We also pick a minumum capacity |
333 | // at which this rule kicks in because otherwise we will be too sensitive to rehashing |
334 | // at the same capacity). |
335 | return capacity; |
336 | } |
337 | return (Checked<uint32_t>(capacity) * 2).unsafeGet(); |
338 | } |
339 | |
340 | template <typename HashMapBucketType> |
341 | class HashMapImpl : public JSNonFinalObject { |
342 | using Base = JSNonFinalObject; |
343 | using HashMapBufferType = HashMapBuffer<HashMapBucketType>; |
344 | |
345 | public: |
346 | using BucketType = HashMapBucketType; |
347 | |
348 | static void visitChildren(JSCell*, SlotVisitor&); |
349 | |
350 | static size_t estimatedSize(JSCell*, VM&); |
351 | |
352 | HashMapImpl(VM& vm, Structure* structure) |
353 | : Base(vm, structure) |
354 | , m_keyCount(0) |
355 | , m_deleteCount(0) |
356 | , m_capacity(4) |
357 | { |
358 | } |
359 | |
360 | HashMapImpl(VM& vm, Structure* structure, uint32_t sizeHint) |
361 | : Base(vm, structure) |
362 | , m_keyCount(0) |
363 | , m_deleteCount(0) |
364 | { |
365 | uint32_t capacity = ((Checked<uint32_t>(sizeHint) * 2) + 1).unsafeGet(); |
366 | capacity = std::max<uint32_t>(WTF::roundUpToPowerOfTwo(capacity), 4U); |
367 | m_capacity = capacity; |
368 | } |
369 | |
370 | ALWAYS_INLINE HashMapBucketType** buffer() const |
371 | { |
372 | return m_buffer->buffer(); |
373 | } |
374 | |
375 | void finishCreation(JSGlobalObject* globalObject, VM& vm) |
376 | { |
377 | ASSERT_WITH_MESSAGE(HashMapBucket<HashMapBucketDataKey>::offsetOfKey() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey(), "We assume this to be true in the DFG and FTL JIT." ); |
378 | |
379 | auto scope = DECLARE_THROW_SCOPE(vm); |
380 | Base::finishCreation(vm); |
381 | |
382 | makeAndSetNewBuffer(globalObject, vm); |
383 | RETURN_IF_EXCEPTION(scope, void()); |
384 | |
385 | setUpHeadAndTail(globalObject, vm); |
386 | } |
387 | |
388 | void finishCreation(JSGlobalObject* globalObject, VM& vm, HashMapImpl* base) |
389 | { |
390 | auto scope = DECLARE_THROW_SCOPE(vm); |
391 | Base::finishCreation(vm); |
392 | |
393 | // This size should be the same to the case when you clone the map by calling add() repeatedly. |
394 | uint32_t capacity = ((Checked<uint32_t>(base->m_keyCount) * 2) + 1).unsafeGet(); |
395 | RELEASE_ASSERT(capacity <= (1U << 31)); |
396 | capacity = std::max<uint32_t>(WTF::roundUpToPowerOfTwo(capacity), 4U); |
397 | m_capacity = capacity; |
398 | makeAndSetNewBuffer(globalObject, vm); |
399 | RETURN_IF_EXCEPTION(scope, void()); |
400 | |
401 | setUpHeadAndTail(globalObject, vm); |
402 | |
403 | HashMapBucketType* bucket = base->m_head.get()->next(); |
404 | while (bucket) { |
405 | if (!bucket->deleted()) { |
406 | addNormalizedNonExistingForCloning(globalObject, bucket->key(), HashMapBucketType::extractValue(*bucket)); |
407 | RETURN_IF_EXCEPTION(scope, void()); |
408 | } |
409 | bucket = bucket->next(); |
410 | } |
411 | checkConsistency(); |
412 | } |
413 | |
414 | static HashMapBucketType* emptyValue() |
415 | { |
416 | return bitwise_cast<HashMapBucketType*>(static_cast<uintptr_t>(-1)); |
417 | } |
418 | |
419 | static ALWAYS_INLINE bool isEmpty(HashMapBucketType* bucket) |
420 | { |
421 | return bucket == emptyValue(); |
422 | } |
423 | |
424 | static HashMapBucketType* deletedValue() |
425 | { |
426 | return bitwise_cast<HashMapBucketType*>(static_cast<uintptr_t>(-3)); |
427 | } |
428 | |
429 | static ALWAYS_INLINE bool isDeleted(HashMapBucketType* bucket) |
430 | { |
431 | return bucket == deletedValue(); |
432 | } |
433 | |
434 | ALWAYS_INLINE HashMapBucketType** findBucket(JSGlobalObject* globalObject, JSValue key) |
435 | { |
436 | VM& vm = getVM(globalObject); |
437 | auto scope = DECLARE_THROW_SCOPE(vm); |
438 | key = normalizeMapKey(key); |
439 | uint32_t hash = jsMapHash(globalObject, vm, key); |
440 | RETURN_IF_EXCEPTION(scope, nullptr); |
441 | return findBucket(globalObject, key, hash); |
442 | } |
443 | |
444 | ALWAYS_INLINE HashMapBucketType** findBucket(JSGlobalObject* globalObject, JSValue key, uint32_t hash) |
445 | { |
446 | ASSERT_WITH_MESSAGE(normalizeMapKey(key) == key, "We expect normalized values flowing into this function." ); |
447 | return findBucketAlreadyHashedAndNormalized(globalObject, key, hash); |
448 | } |
449 | |
450 | template <typename T = HashMapBucketType> |
451 | ALWAYS_INLINE typename std::enable_if<std::is_same<T, HashMapBucket<HashMapBucketDataKeyValue>>::value, JSValue>::type get(JSGlobalObject* globalObject, JSValue key) |
452 | { |
453 | if (HashMapBucketType** bucket = findBucket(globalObject, key)) |
454 | return (*bucket)->value(); |
455 | return jsUndefined(); |
456 | } |
457 | |
458 | ALWAYS_INLINE bool has(JSGlobalObject* globalObject, JSValue key) |
459 | { |
460 | return !!findBucket(globalObject, key); |
461 | } |
462 | |
463 | ALWAYS_INLINE void add(JSGlobalObject* globalObject, JSValue key, JSValue value = JSValue()) |
464 | { |
465 | key = normalizeMapKey(key); |
466 | addNormalizedInternal(globalObject, key, value, [&] (HashMapBucketType* bucket) { |
467 | return !isDeleted(bucket) && areKeysEqual(globalObject, key, bucket->key()); |
468 | }); |
469 | if (shouldRehashAfterAdd()) |
470 | rehash(globalObject); |
471 | } |
472 | |
473 | ALWAYS_INLINE HashMapBucketType* addNormalized(JSGlobalObject* globalObject, JSValue key, JSValue value, uint32_t hash) |
474 | { |
475 | ASSERT_WITH_MESSAGE(normalizeMapKey(key) == key, "We expect normalized values flowing into this function." ); |
476 | ASSERT_WITH_MESSAGE(jsMapHash(globalObject, getVM(globalObject), key) == hash, "We expect hash value is what we expect." ); |
477 | |
478 | auto* bucket = addNormalizedInternal(getVM(globalObject), key, value, hash, [&] (HashMapBucketType* bucket) { |
479 | return !isDeleted(bucket) && areKeysEqual(globalObject, key, bucket->key()); |
480 | }); |
481 | if (shouldRehashAfterAdd()) |
482 | rehash(globalObject); |
483 | return bucket; |
484 | } |
485 | |
486 | ALWAYS_INLINE bool remove(JSGlobalObject* globalObject, JSValue key) |
487 | { |
488 | HashMapBucketType** bucket = findBucket(globalObject, key); |
489 | if (!bucket) |
490 | return false; |
491 | |
492 | VM& vm = getVM(globalObject); |
493 | HashMapBucketType* impl = *bucket; |
494 | impl->next()->setPrev(vm, impl->prev()); |
495 | impl->prev()->setNext(vm, impl->next()); |
496 | impl->makeDeleted(vm); |
497 | |
498 | *bucket = deletedValue(); |
499 | |
500 | ++m_deleteCount; |
501 | ASSERT(m_keyCount > 0); |
502 | --m_keyCount; |
503 | |
504 | if (shouldShrink()) |
505 | rehash(globalObject); |
506 | |
507 | return true; |
508 | } |
509 | |
510 | ALWAYS_INLINE uint32_t size() const |
511 | { |
512 | return m_keyCount; |
513 | } |
514 | |
515 | ALWAYS_INLINE void clear(JSGlobalObject* globalObject) |
516 | { |
517 | VM& vm = getVM(globalObject); |
518 | m_keyCount = 0; |
519 | m_deleteCount = 0; |
520 | HashMapBucketType* head = m_head.get(); |
521 | HashMapBucketType* bucket = m_head->next(); |
522 | HashMapBucketType* tail = m_tail.get(); |
523 | while (bucket != tail) { |
524 | HashMapBucketType* next = bucket->next(); |
525 | // We restart each iterator by pointing it to the head of the list. |
526 | bucket->setNext(vm, head); |
527 | bucket->makeDeleted(vm); |
528 | bucket = next; |
529 | } |
530 | m_head->setNext(vm, m_tail.get()); |
531 | m_tail->setPrev(vm, m_head.get()); |
532 | m_capacity = 4; |
533 | makeAndSetNewBuffer(globalObject, vm); |
534 | checkConsistency(); |
535 | } |
536 | |
537 | ALWAYS_INLINE size_t bufferSizeInBytes() const |
538 | { |
539 | return m_capacity * sizeof(HashMapBucketType*); |
540 | } |
541 | |
542 | static ptrdiff_t offsetOfHead() |
543 | { |
544 | return OBJECT_OFFSETOF(HashMapImpl<HashMapBucketType>, m_head); |
545 | } |
546 | |
547 | static ptrdiff_t offsetOfBuffer() |
548 | { |
549 | return OBJECT_OFFSETOF(HashMapImpl<HashMapBucketType>, m_buffer); |
550 | } |
551 | |
552 | static ptrdiff_t offsetOfCapacity() |
553 | { |
554 | return OBJECT_OFFSETOF(HashMapImpl<HashMapBucketType>, m_capacity); |
555 | } |
556 | |
557 | HashMapBucketType* head() { return m_head.get(); } |
558 | HashMapBucketType* tail() { return m_tail.get(); } |
559 | |
560 | size_t approximateSize() const |
561 | { |
562 | size_t size = sizeof(HashMapImpl); |
563 | size += bufferSizeInBytes(); |
564 | size += 2 * sizeof(HashMapBucketType); // Head and tail members. |
565 | size += m_keyCount * sizeof(HashMapBucketType); // Number of members that are on the list. |
566 | return size; |
567 | } |
568 | |
569 | private: |
570 | ALWAYS_INLINE uint32_t shouldRehashAfterAdd() const |
571 | { |
572 | return JSC::shouldRehashAfterAdd(m_capacity, m_keyCount, m_deleteCount); |
573 | } |
574 | |
575 | ALWAYS_INLINE uint32_t shouldShrink() const |
576 | { |
577 | return JSC::shouldShrink(m_capacity, m_keyCount); |
578 | } |
579 | |
580 | ALWAYS_INLINE void setUpHeadAndTail(JSGlobalObject*, VM& vm) |
581 | { |
582 | m_head.set(vm, this, HashMapBucketType::create(vm)); |
583 | m_tail.set(vm, this, HashMapBucketType::create(vm)); |
584 | |
585 | m_head->setNext(vm, m_tail.get()); |
586 | m_tail->setPrev(vm, m_head.get()); |
587 | ASSERT(m_head->deleted()); |
588 | ASSERT(m_tail->deleted()); |
589 | } |
590 | |
591 | ALWAYS_INLINE void addNormalizedNonExistingForCloning(JSGlobalObject* globalObject, JSValue key, JSValue value = JSValue()) |
592 | { |
593 | addNormalizedInternal(globalObject, key, value, [&] (HashMapBucketType*) { |
594 | return false; |
595 | }); |
596 | } |
597 | |
598 | template<typename CanUseBucket> |
599 | ALWAYS_INLINE void addNormalizedInternal(JSGlobalObject* globalObject, JSValue key, JSValue value, const CanUseBucket& canUseBucket) |
600 | { |
601 | VM& vm = getVM(globalObject); |
602 | auto scope = DECLARE_THROW_SCOPE(vm); |
603 | |
604 | uint32_t hash = jsMapHash(globalObject, vm, key); |
605 | RETURN_IF_EXCEPTION(scope, void()); |
606 | scope.release(); |
607 | addNormalizedInternal(vm, key, value, hash, canUseBucket); |
608 | } |
609 | |
610 | template<typename CanUseBucket> |
611 | ALWAYS_INLINE HashMapBucketType* addNormalizedInternal(VM& vm, JSValue key, JSValue value, uint32_t hash, const CanUseBucket& canUseBucket) |
612 | { |
613 | ASSERT_WITH_MESSAGE(normalizeMapKey(key) == key, "We expect normalized values flowing into this function." ); |
614 | |
615 | const uint32_t mask = m_capacity - 1; |
616 | uint32_t index = hash & mask; |
617 | HashMapBucketType** buffer = this->buffer(); |
618 | HashMapBucketType* bucket = buffer[index]; |
619 | while (!isEmpty(bucket)) { |
620 | if (canUseBucket(bucket)) { |
621 | bucket->setValue(vm, value); |
622 | return bucket; |
623 | } |
624 | index = (index + 1) & mask; |
625 | bucket = buffer[index]; |
626 | } |
627 | |
628 | HashMapBucketType* newEntry = m_tail.get(); |
629 | buffer[index] = newEntry; |
630 | newEntry->setKey(vm, key); |
631 | newEntry->setValue(vm, value); |
632 | ASSERT(!newEntry->deleted()); |
633 | HashMapBucketType* newTail = HashMapBucketType::create(vm); |
634 | m_tail.set(vm, this, newTail); |
635 | newTail->setPrev(vm, newEntry); |
636 | ASSERT(newTail->deleted()); |
637 | newEntry->setNext(vm, newTail); |
638 | |
639 | ++m_keyCount; |
640 | return newEntry; |
641 | } |
642 | |
643 | ALWAYS_INLINE HashMapBucketType** findBucketAlreadyHashedAndNormalized(JSGlobalObject* globalObject, JSValue key, uint32_t hash) |
644 | { |
645 | const uint32_t mask = m_capacity - 1; |
646 | uint32_t index = hash & mask; |
647 | HashMapBucketType** buffer = this->buffer(); |
648 | HashMapBucketType* bucket = buffer[index]; |
649 | |
650 | while (!isEmpty(bucket)) { |
651 | if (!isDeleted(bucket) && areKeysEqual(globalObject, key, bucket->key())) |
652 | return buffer + index; |
653 | index = (index + 1) & mask; |
654 | bucket = buffer[index]; |
655 | } |
656 | return nullptr; |
657 | } |
658 | |
659 | void rehash(JSGlobalObject* globalObject) |
660 | { |
661 | VM& vm = getVM(globalObject); |
662 | auto scope = DECLARE_THROW_SCOPE(vm); |
663 | |
664 | uint32_t oldCapacity = m_capacity; |
665 | m_capacity = nextCapacity(m_capacity, m_keyCount); |
666 | |
667 | if (m_capacity != oldCapacity) { |
668 | makeAndSetNewBuffer(globalObject, vm); |
669 | RETURN_IF_EXCEPTION(scope, void()); |
670 | } else { |
671 | m_buffer->reset(m_capacity); |
672 | assertBufferIsEmpty(); |
673 | } |
674 | |
675 | HashMapBucketType* iter = m_head->next(); |
676 | HashMapBucketType* end = m_tail.get(); |
677 | const uint32_t mask = m_capacity - 1; |
678 | RELEASE_ASSERT(!(m_capacity & (m_capacity - 1))); |
679 | HashMapBucketType** buffer = this->buffer(); |
680 | while (iter != end) { |
681 | uint32_t index = jsMapHash(globalObject, vm, iter->key()) & mask; |
682 | EXCEPTION_ASSERT_WITH_MESSAGE(!scope.exception(), "All keys should already be hashed before, so this should not throw because it won't resolve ropes." ); |
683 | { |
684 | HashMapBucketType* bucket = buffer[index]; |
685 | while (!isEmpty(bucket)) { |
686 | index = (index + 1) & mask; |
687 | bucket = buffer[index]; |
688 | } |
689 | } |
690 | buffer[index] = iter; |
691 | iter = iter->next(); |
692 | } |
693 | |
694 | m_deleteCount = 0; |
695 | |
696 | checkConsistency(); |
697 | } |
698 | |
699 | ALWAYS_INLINE void checkConsistency() const |
700 | { |
701 | if (!ASSERT_DISABLED) { |
702 | HashMapBucketType* iter = m_head->next(); |
703 | HashMapBucketType* end = m_tail.get(); |
704 | uint32_t size = 0; |
705 | while (iter != end) { |
706 | ++size; |
707 | iter = iter->next(); |
708 | } |
709 | ASSERT(size == m_keyCount); |
710 | } |
711 | } |
712 | |
713 | void makeAndSetNewBuffer(JSGlobalObject* globalObject, VM& vm) |
714 | { |
715 | ASSERT(!(m_capacity & (m_capacity - 1))); |
716 | |
717 | HashMapBufferType* buffer = HashMapBufferType::create(globalObject, vm, this, m_capacity); |
718 | if (UNLIKELY(!buffer)) |
719 | return; |
720 | |
721 | m_buffer.set(vm, this, buffer); |
722 | assertBufferIsEmpty(); |
723 | } |
724 | |
725 | ALWAYS_INLINE void assertBufferIsEmpty() const |
726 | { |
727 | if (!ASSERT_DISABLED) { |
728 | for (unsigned i = 0; i < m_capacity; i++) |
729 | ASSERT(isEmpty(buffer()[i])); |
730 | } |
731 | } |
732 | |
733 | WriteBarrier<HashMapBucketType> m_head; |
734 | WriteBarrier<HashMapBucketType> m_tail; |
735 | AuxiliaryBarrier<HashMapBufferType*> m_buffer; |
736 | uint32_t m_keyCount; |
737 | uint32_t m_deleteCount; |
738 | uint32_t m_capacity; |
739 | }; |
740 | |
741 | } // namespace JSC |
742 | |