1/*
2 * Copyright (C) 2017 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
23 * THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "MarkingConstraintSolver.h"
28
29#include "JSCInlines.h"
30#include "MarkingConstraintSet.h"
31
32namespace JSC {
33
34MarkingConstraintSolver::MarkingConstraintSolver(MarkingConstraintSet& set)
35 : m_heap(set.m_heap)
36 , m_mainVisitor(m_heap.collectorSlotVisitor())
37 , m_set(set)
38{
39 m_heap.forEachSlotVisitor(
40 [&] (SlotVisitor& visitor) {
41 m_visitCounters.append(VisitCounter(visitor));
42 });
43}
44
45MarkingConstraintSolver::~MarkingConstraintSolver()
46{
47}
48
49bool MarkingConstraintSolver::didVisitSomething() const
50{
51 for (const VisitCounter& visitCounter : m_visitCounters) {
52 if (visitCounter.visitCount())
53 return true;
54 }
55 return false;
56}
57
58void MarkingConstraintSolver::execute(SchedulerPreference preference, ScopedLambda<Optional<unsigned>()> pickNext)
59{
60 m_pickNextIsStillActive = true;
61 RELEASE_ASSERT(!m_numThreadsThatMayProduceWork);
62
63 if (Options::useParallelMarkingConstraintSolver()) {
64 if (Options::logGC())
65 dataLog(preference == ParallelWorkFirst ? "P" : "N", "<");
66
67 m_heap.runFunctionInParallel(
68 [&] (SlotVisitor& visitor) { runExecutionThread(visitor, preference, pickNext); });
69
70 if (Options::logGC())
71 dataLog(">");
72 } else
73 runExecutionThread(m_mainVisitor, preference, pickNext);
74
75 RELEASE_ASSERT(!m_pickNextIsStillActive);
76 RELEASE_ASSERT(!m_numThreadsThatMayProduceWork);
77
78 if (!m_toExecuteSequentially.isEmpty()) {
79 for (unsigned indexToRun : m_toExecuteSequentially)
80 execute(*m_set.m_set[indexToRun]);
81 m_toExecuteSequentially.clear();
82 }
83
84 RELEASE_ASSERT(m_toExecuteInParallel.isEmpty());
85}
86
87void MarkingConstraintSolver::drain(BitVector& unexecuted)
88{
89 auto iter = unexecuted.begin();
90 auto end = unexecuted.end();
91 if (iter == end)
92 return;
93 auto pickNext = scopedLambda<Optional<unsigned>()>(
94 [&] () -> Optional<unsigned> {
95 if (iter == end)
96 return WTF::nullopt;
97 return *iter++;
98 });
99 execute(NextConstraintFirst, pickNext);
100 unexecuted.clearAll();
101}
102
103void MarkingConstraintSolver::converge(const Vector<MarkingConstraint*>& order)
104{
105 if (didVisitSomething())
106 return;
107
108 if (order.isEmpty())
109 return;
110
111 size_t index = 0;
112
113 // We want to execute the first constraint sequentially if we think it will quickly give us a
114 // result. If we ran it in parallel to other constraints, then we might end up having to wait for
115 // those other constraints to finish, which would be a waste of time since during convergence it's
116 // empirically most optimal to return to draining as soon as a constraint generates work. Most
117 // constraints don't generate any work most of the time, and when they do generate work, they tend
118 // to generate enough of it to feed a decent draining cycle. Therefore, pause times are lowest if
119 // we get the heck out of here as soon as a constraint generates work. I think that part of what
120 // makes this optimal is that we also never abort running a constraint early, so when we do run
121 // one, it has an opportunity to generate as much work as it possibly can.
122 if (order[index]->quickWorkEstimate(m_mainVisitor) > 0.) {
123 execute(*order[index++]);
124
125 if (m_toExecuteInParallel.isEmpty()
126 && (order.isEmpty() || didVisitSomething()))
127 return;
128 }
129
130 auto pickNext = scopedLambda<Optional<unsigned>()>(
131 [&] () -> Optional<unsigned> {
132 if (didVisitSomething())
133 return WTF::nullopt;
134
135 if (index >= order.size())
136 return WTF::nullopt;
137
138 MarkingConstraint& constraint = *order[index++];
139 return constraint.index();
140 });
141
142 execute(ParallelWorkFirst, pickNext);
143}
144
145void MarkingConstraintSolver::execute(MarkingConstraint& constraint)
146{
147 if (m_executed.get(constraint.index()))
148 return;
149
150 constraint.prepareToExecute(NoLockingNecessary, m_mainVisitor);
151 constraint.execute(m_mainVisitor);
152 m_executed.set(constraint.index());
153}
154
155void MarkingConstraintSolver::addParallelTask(RefPtr<SharedTask<void(SlotVisitor&)>> task, MarkingConstraint& constraint)
156{
157 auto locker = holdLock(m_lock);
158 m_toExecuteInParallel.append(TaskWithConstraint(WTFMove(task), &constraint));
159}
160
161void MarkingConstraintSolver::runExecutionThread(SlotVisitor& visitor, SchedulerPreference preference, ScopedLambda<Optional<unsigned>()> pickNext)
162{
163 for (;;) {
164 bool doParallelWorkMode;
165 MarkingConstraint* constraint = nullptr;
166 unsigned indexToRun = UINT_MAX;
167 TaskWithConstraint task;
168 {
169 auto locker = holdLock(m_lock);
170
171 for (;;) {
172 auto tryParallelWork = [&] () -> bool {
173 if (m_toExecuteInParallel.isEmpty())
174 return false;
175
176 task = m_toExecuteInParallel.first();
177 constraint = task.constraint;
178 doParallelWorkMode = true;
179 return true;
180 };
181
182 auto tryNextConstraint = [&] () -> bool {
183 if (!m_pickNextIsStillActive)
184 return false;
185
186 for (;;) {
187 Optional<unsigned> pickResult = pickNext();
188 if (!pickResult) {
189 m_pickNextIsStillActive = false;
190 return false;
191 }
192
193 if (m_executed.get(*pickResult))
194 continue;
195
196 MarkingConstraint& candidateConstraint = *m_set.m_set[*pickResult];
197 if (candidateConstraint.concurrency() == ConstraintConcurrency::Sequential) {
198 m_toExecuteSequentially.append(*pickResult);
199 continue;
200 }
201 if (candidateConstraint.parallelism() == ConstraintParallelism::Parallel)
202 m_numThreadsThatMayProduceWork++;
203 indexToRun = *pickResult;
204 constraint = &candidateConstraint;
205 doParallelWorkMode = false;
206 constraint->prepareToExecute(locker, visitor);
207 return true;
208 }
209 };
210
211 if (preference == ParallelWorkFirst) {
212 if (tryParallelWork() || tryNextConstraint())
213 break;
214 } else {
215 if (tryNextConstraint() || tryParallelWork())
216 break;
217 }
218
219 // This means that we have nothing left to run. The only way for us to have more work is
220 // if someone is running a constraint that may produce parallel work.
221
222 if (!m_numThreadsThatMayProduceWork)
223 return;
224
225 // FIXME: Any waiting could be replaced with just running the SlotVisitor.
226 // I wonder if that would be profitable.
227 m_condition.wait(m_lock);
228 }
229 }
230
231 if (doParallelWorkMode)
232 constraint->doParallelWork(visitor, *task.task);
233 else {
234 if (constraint->parallelism() == ConstraintParallelism::Parallel) {
235 visitor.m_currentConstraint = constraint;
236 visitor.m_currentSolver = this;
237 }
238
239 constraint->execute(visitor);
240
241 visitor.m_currentConstraint = nullptr;
242 visitor.m_currentSolver = nullptr;
243 }
244
245 {
246 auto locker = holdLock(m_lock);
247
248 if (doParallelWorkMode) {
249 if (!m_toExecuteInParallel.isEmpty()
250 && task == m_toExecuteInParallel.first())
251 m_toExecuteInParallel.takeFirst();
252 else
253 ASSERT(!m_toExecuteInParallel.contains(task));
254 } else {
255 if (constraint->parallelism() == ConstraintParallelism::Parallel)
256 m_numThreadsThatMayProduceWork--;
257 m_executed.set(indexToRun);
258 }
259
260 m_condition.notifyAll();
261 }
262 }
263}
264
265} // namespace JSC
266
267