1 | /* |
2 | * Copyright (C) 2017 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include <wtf/ConcurrentPtrHashSet.h> |
28 | |
29 | #include <wtf/DataLog.h> |
30 | |
31 | namespace WTF { |
32 | |
33 | ConcurrentPtrHashSet::ConcurrentPtrHashSet() |
34 | { |
35 | initialize(); |
36 | } |
37 | |
38 | ConcurrentPtrHashSet::~ConcurrentPtrHashSet() |
39 | { |
40 | } |
41 | |
42 | void ConcurrentPtrHashSet::deleteOldTables() |
43 | { |
44 | // This is just in case. It does not make it OK for other threads to call add(). But it might prevent |
45 | // some bad crashes if we did make that mistake. |
46 | auto locker = holdLock(m_lock); |
47 | |
48 | m_allTables.removeAllMatching( |
49 | [&] (std::unique_ptr<Table>& table) -> bool { |
50 | return table.get() != m_table.loadRelaxed(); |
51 | }); |
52 | } |
53 | |
54 | void ConcurrentPtrHashSet::clear() |
55 | { |
56 | // This is just in case. It does not make it OK for other threads to call add(). But it might prevent |
57 | // some bad crashes if we did make that mistake. |
58 | auto locker = holdLock(m_lock); |
59 | |
60 | m_allTables.clear(); |
61 | initialize(); |
62 | } |
63 | |
64 | void ConcurrentPtrHashSet::initialize() |
65 | { |
66 | constexpr unsigned initialSize = 32; |
67 | std::unique_ptr<Table> table = Table::create(initialSize); |
68 | m_table.storeRelaxed(table.get()); |
69 | m_allTables.append(WTFMove(table)); |
70 | } |
71 | |
72 | bool ConcurrentPtrHashSet::addSlow(Table* table, unsigned mask, unsigned startIndex, unsigned index, void* ptr) |
73 | { |
74 | if (table->load.exchangeAdd(1) >= table->maxLoad()) |
75 | return resizeAndAdd(ptr); |
76 | |
77 | for (;;) { |
78 | void* oldEntry = table->array[index].compareExchangeStrong(nullptr, ptr); |
79 | if (!oldEntry) { |
80 | if (m_table.load() != table) { |
81 | // We added an entry to an old table! We need to reexecute the add on the new table. |
82 | return add(ptr); |
83 | } |
84 | return true; |
85 | } |
86 | if (oldEntry == ptr) |
87 | return false; |
88 | index = (index + 1) & mask; |
89 | RELEASE_ASSERT(index != startIndex); |
90 | } |
91 | } |
92 | |
93 | void ConcurrentPtrHashSet::resizeIfNecessary() |
94 | { |
95 | auto locker = holdLock(m_lock); |
96 | Table* table = m_table.loadRelaxed(); |
97 | if (table->load.loadRelaxed() < table->maxLoad()) |
98 | return; |
99 | |
100 | std::unique_ptr<Table> newTable = Table::create(table->size * 2); |
101 | unsigned mask = newTable->mask; |
102 | unsigned load = 0; |
103 | for (unsigned i = 0; i < table->size; ++i) { |
104 | void* ptr = table->array[i].loadRelaxed(); |
105 | if (!ptr) |
106 | continue; |
107 | |
108 | unsigned startIndex = hash(ptr) & mask; |
109 | unsigned index = startIndex; |
110 | for (;;) { |
111 | Atomic<void*>& entryRef = newTable->array[index]; |
112 | void* entry = entryRef.loadRelaxed(); |
113 | if (!entry) { |
114 | entryRef.storeRelaxed(ptr); |
115 | break; |
116 | } |
117 | RELEASE_ASSERT(entry != ptr); |
118 | index = (index + 1) & mask; |
119 | RELEASE_ASSERT(index != startIndex); |
120 | } |
121 | |
122 | load++; |
123 | } |
124 | |
125 | newTable->load.storeRelaxed(load); |
126 | |
127 | m_table.store(newTable.get()); |
128 | m_allTables.append(WTFMove(newTable)); |
129 | } |
130 | |
131 | bool ConcurrentPtrHashSet::resizeAndAdd(void* ptr) |
132 | { |
133 | resizeIfNecessary(); |
134 | return add(ptr); |
135 | } |
136 | |
137 | std::unique_ptr<ConcurrentPtrHashSet::Table> ConcurrentPtrHashSet::Table::create(unsigned size) |
138 | { |
139 | std::unique_ptr<ConcurrentPtrHashSet::Table> result(new (fastMalloc(OBJECT_OFFSETOF(Table, array) + sizeof(Atomic<void*>) * size)) Table()); |
140 | result->size = size; |
141 | result->mask = size - 1; |
142 | result->load.storeRelaxed(0); |
143 | for (unsigned i = 0; i < size; ++i) |
144 | result->array[i].storeRelaxed(nullptr); |
145 | return result; |
146 | } |
147 | |
148 | } // namespace WTF |
149 | |
150 | |