1 | /* |
2 | * Copyright (C) 2013-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "DFGWorklist.h" |
28 | |
29 | #include "CodeBlock.h" |
30 | #include "DFGSafepoint.h" |
31 | #include "DeferGC.h" |
32 | #include "JSCInlines.h" |
33 | #include "ReleaseHeapAccessScope.h" |
34 | #include <mutex> |
35 | |
36 | namespace JSC { namespace DFG { |
37 | |
38 | #if ENABLE(DFG_JIT) |
39 | |
40 | class Worklist::ThreadBody : public AutomaticThread { |
41 | public: |
42 | ThreadBody(const AbstractLocker& locker, Worklist& worklist, ThreadData& data, Box<Lock> lock, Ref<AutomaticThreadCondition>&& condition, int relativePriority) |
43 | : AutomaticThread(locker, lock, WTFMove(condition)) |
44 | , m_worklist(worklist) |
45 | , m_data(data) |
46 | , m_relativePriority(relativePriority) |
47 | { |
48 | } |
49 | |
50 | const char* name() const override |
51 | { |
52 | return m_worklist.m_threadName.data(); |
53 | } |
54 | |
55 | protected: |
56 | PollResult poll(const AbstractLocker& locker) override |
57 | { |
58 | if (m_worklist.m_queue.isEmpty()) |
59 | return PollResult::Wait; |
60 | |
61 | m_plan = m_worklist.m_queue.takeFirst(); |
62 | if (!m_plan) { |
63 | if (Options::verboseCompilationQueue()) { |
64 | m_worklist.dump(locker, WTF::dataFile()); |
65 | dataLog(": Thread shutting down\n" ); |
66 | } |
67 | return PollResult::Stop; |
68 | } |
69 | RELEASE_ASSERT(m_plan->stage() == Plan::Preparing); |
70 | m_worklist.m_numberOfActiveThreads++; |
71 | return PollResult::Work; |
72 | } |
73 | |
74 | class WorkScope; |
75 | friend class WorkScope; |
76 | class WorkScope { |
77 | public: |
78 | WorkScope(ThreadBody& thread) |
79 | : m_thread(thread) |
80 | { |
81 | RELEASE_ASSERT(m_thread.m_plan); |
82 | RELEASE_ASSERT(m_thread.m_worklist.m_numberOfActiveThreads); |
83 | } |
84 | |
85 | ~WorkScope() |
86 | { |
87 | LockHolder locker(*m_thread.m_worklist.m_lock); |
88 | m_thread.m_plan = nullptr; |
89 | m_thread.m_worklist.m_numberOfActiveThreads--; |
90 | } |
91 | |
92 | private: |
93 | ThreadBody& m_thread; |
94 | }; |
95 | |
96 | WorkResult work() override |
97 | { |
98 | WorkScope workScope(*this); |
99 | |
100 | LockHolder locker(m_data.m_rightToRun); |
101 | { |
102 | LockHolder locker(*m_worklist.m_lock); |
103 | if (m_plan->stage() == Plan::Cancelled) |
104 | return WorkResult::Continue; |
105 | m_plan->notifyCompiling(); |
106 | } |
107 | |
108 | if (Options::verboseCompilationQueue()) |
109 | dataLog(m_worklist, ": Compiling " , m_plan->key(), " asynchronously\n" ); |
110 | |
111 | // There's no way for the GC to be safepointing since we own rightToRun. |
112 | if (m_plan->vm()->heap.worldIsStopped()) { |
113 | dataLog("Heap is stoped but here we are! (1)\n" ); |
114 | RELEASE_ASSERT_NOT_REACHED(); |
115 | } |
116 | m_plan->compileInThread(&m_data); |
117 | if (m_plan->stage() != Plan::Cancelled) { |
118 | if (m_plan->vm()->heap.worldIsStopped()) { |
119 | dataLog("Heap is stopped but here we are! (2)\n" ); |
120 | RELEASE_ASSERT_NOT_REACHED(); |
121 | } |
122 | } |
123 | |
124 | { |
125 | LockHolder locker(*m_worklist.m_lock); |
126 | if (m_plan->stage() == Plan::Cancelled) |
127 | return WorkResult::Continue; |
128 | |
129 | m_plan->notifyReady(); |
130 | |
131 | if (Options::verboseCompilationQueue()) { |
132 | m_worklist.dump(locker, WTF::dataFile()); |
133 | dataLog(": Compiled " , m_plan->key(), " asynchronously\n" ); |
134 | } |
135 | |
136 | m_worklist.m_readyPlans.append(m_plan); |
137 | |
138 | RELEASE_ASSERT(!m_plan->vm()->heap.worldIsStopped()); |
139 | m_worklist.m_planCompiled.notifyAll(); |
140 | } |
141 | |
142 | return WorkResult::Continue; |
143 | } |
144 | |
145 | void threadDidStart() override |
146 | { |
147 | if (Options::verboseCompilationQueue()) |
148 | dataLog(m_worklist, ": Thread started\n" ); |
149 | |
150 | if (m_relativePriority) |
151 | Thread::current().changePriority(m_relativePriority); |
152 | |
153 | m_compilationScope = std::make_unique<CompilationScope>(); |
154 | } |
155 | |
156 | void threadIsStopping(const AbstractLocker&) override |
157 | { |
158 | // We're holding the Worklist::m_lock, so we should be careful not to deadlock. |
159 | |
160 | if (Options::verboseCompilationQueue()) |
161 | dataLog(m_worklist, ": Thread will stop\n" ); |
162 | |
163 | ASSERT(!m_plan); |
164 | |
165 | m_compilationScope = nullptr; |
166 | m_plan = nullptr; |
167 | } |
168 | |
169 | private: |
170 | Worklist& m_worklist; |
171 | ThreadData& m_data; |
172 | int m_relativePriority; |
173 | std::unique_ptr<CompilationScope> m_compilationScope; |
174 | RefPtr<Plan> m_plan; |
175 | }; |
176 | |
177 | static CString createWorklistName(CString&& tierName) |
178 | { |
179 | #if OS(LINUX) |
180 | return toCString(WTFMove(tierName), "Worker" ); |
181 | #else |
182 | return toCString(WTFMove(tierName), " Worklist Worker Thread" ); |
183 | #endif |
184 | } |
185 | |
186 | Worklist::Worklist(CString&& tierName) |
187 | : m_threadName(createWorklistName(WTFMove(tierName))) |
188 | , m_planEnqueued(AutomaticThreadCondition::create()) |
189 | , m_lock(Box<Lock>::create()) |
190 | { |
191 | } |
192 | |
193 | Worklist::~Worklist() |
194 | { |
195 | { |
196 | LockHolder locker(*m_lock); |
197 | for (unsigned i = m_threads.size(); i--;) |
198 | m_queue.append(nullptr); // Use null plan to indicate that we want the thread to terminate. |
199 | m_planEnqueued->notifyAll(locker); |
200 | } |
201 | for (unsigned i = m_threads.size(); i--;) |
202 | m_threads[i]->m_thread->join(); |
203 | ASSERT(!m_numberOfActiveThreads); |
204 | } |
205 | |
206 | void Worklist::finishCreation(unsigned numberOfThreads, int relativePriority) |
207 | { |
208 | RELEASE_ASSERT(numberOfThreads); |
209 | LockHolder locker(*m_lock); |
210 | for (unsigned i = numberOfThreads; i--;) { |
211 | createNewThread(locker, relativePriority); |
212 | } |
213 | } |
214 | |
215 | void Worklist::createNewThread(const AbstractLocker& locker, int relativePriority) |
216 | { |
217 | std::unique_ptr<ThreadData> data = std::make_unique<ThreadData>(this); |
218 | data->m_thread = adoptRef(new ThreadBody(locker, *this, *data, m_lock, m_planEnqueued.copyRef(), relativePriority)); |
219 | m_threads.append(WTFMove(data)); |
220 | } |
221 | |
222 | Ref<Worklist> Worklist::create(CString&& tierName, unsigned numberOfThreads, int relativePriority) |
223 | { |
224 | Ref<Worklist> result = adoptRef(*new Worklist(WTFMove(tierName))); |
225 | result->finishCreation(numberOfThreads, relativePriority); |
226 | return result; |
227 | } |
228 | |
229 | bool Worklist::isActiveForVM(VM& vm) const |
230 | { |
231 | LockHolder locker(*m_lock); |
232 | PlanMap::const_iterator end = m_plans.end(); |
233 | for (PlanMap::const_iterator iter = m_plans.begin(); iter != end; ++iter) { |
234 | if (iter->value->vm() == &vm) |
235 | return true; |
236 | } |
237 | return false; |
238 | } |
239 | |
240 | void Worklist::enqueue(Ref<Plan>&& plan) |
241 | { |
242 | LockHolder locker(*m_lock); |
243 | if (Options::verboseCompilationQueue()) { |
244 | dump(locker, WTF::dataFile()); |
245 | dataLog(": Enqueueing plan to optimize " , plan->key(), "\n" ); |
246 | } |
247 | ASSERT(m_plans.find(plan->key()) == m_plans.end()); |
248 | m_plans.add(plan->key(), plan.copyRef()); |
249 | m_queue.append(WTFMove(plan)); |
250 | m_planEnqueued->notifyOne(locker); |
251 | } |
252 | |
253 | Worklist::State Worklist::compilationState(CompilationKey key) |
254 | { |
255 | LockHolder locker(*m_lock); |
256 | PlanMap::iterator iter = m_plans.find(key); |
257 | if (iter == m_plans.end()) |
258 | return NotKnown; |
259 | return iter->value->stage() == Plan::Ready ? Compiled : Compiling; |
260 | } |
261 | |
262 | void Worklist::waitUntilAllPlansForVMAreReady(VM& vm) |
263 | { |
264 | DeferGC deferGC(vm.heap); |
265 | |
266 | // While we are waiting for the compiler to finish, the collector might have already suspended |
267 | // the compiler and then it will be waiting for us to stop. That's a deadlock. We avoid that |
268 | // deadlock by relinquishing our heap access, so that the collector pretends that we are stopped |
269 | // even if we aren't. |
270 | ReleaseHeapAccessScope releaseHeapAccessScope(vm.heap); |
271 | |
272 | // Wait for all of the plans for the given VM to complete. The idea here |
273 | // is that we want all of the caller VM's plans to be done. We don't care |
274 | // about any other VM's plans, and we won't attempt to wait on those. |
275 | // After we release this lock, we know that although other VMs may still |
276 | // be adding plans, our VM will not be. |
277 | |
278 | LockHolder locker(*m_lock); |
279 | |
280 | if (Options::verboseCompilationQueue()) { |
281 | dump(locker, WTF::dataFile()); |
282 | dataLog(": Waiting for all in VM to complete.\n" ); |
283 | } |
284 | |
285 | for (;;) { |
286 | bool allAreCompiled = true; |
287 | PlanMap::iterator end = m_plans.end(); |
288 | for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) { |
289 | if (iter->value->vm() != &vm) |
290 | continue; |
291 | if (iter->value->stage() != Plan::Ready) { |
292 | allAreCompiled = false; |
293 | break; |
294 | } |
295 | } |
296 | |
297 | if (allAreCompiled) |
298 | break; |
299 | |
300 | m_planCompiled.wait(*m_lock); |
301 | } |
302 | } |
303 | |
304 | void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans) |
305 | { |
306 | DeferGC deferGC(vm.heap); |
307 | LockHolder locker(*m_lock); |
308 | for (size_t i = 0; i < m_readyPlans.size(); ++i) { |
309 | RefPtr<Plan> plan = m_readyPlans[i]; |
310 | if (plan->vm() != &vm) |
311 | continue; |
312 | if (plan->stage() != Plan::Ready) |
313 | continue; |
314 | myReadyPlans.append(plan); |
315 | m_readyPlans[i--] = m_readyPlans.last(); |
316 | m_readyPlans.removeLast(); |
317 | m_plans.remove(plan->key()); |
318 | } |
319 | } |
320 | |
321 | void Worklist::removeAllReadyPlansForVM(VM& vm) |
322 | { |
323 | Vector<RefPtr<Plan>, 8> myReadyPlans; |
324 | removeAllReadyPlansForVM(vm, myReadyPlans); |
325 | } |
326 | |
327 | Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requestedKey) |
328 | { |
329 | DeferGC deferGC(vm.heap); |
330 | Vector<RefPtr<Plan>, 8> myReadyPlans; |
331 | |
332 | removeAllReadyPlansForVM(vm, myReadyPlans); |
333 | |
334 | State resultingState = NotKnown; |
335 | |
336 | while (!myReadyPlans.isEmpty()) { |
337 | RefPtr<Plan> plan = myReadyPlans.takeLast(); |
338 | CompilationKey currentKey = plan->key(); |
339 | |
340 | if (Options::verboseCompilationQueue()) |
341 | dataLog(*this, ": Completing " , currentKey, "\n" ); |
342 | |
343 | RELEASE_ASSERT(plan->stage() == Plan::Ready); |
344 | |
345 | plan->finalizeAndNotifyCallback(); |
346 | |
347 | if (currentKey == requestedKey) |
348 | resultingState = Compiled; |
349 | } |
350 | |
351 | if (!!requestedKey && resultingState == NotKnown) { |
352 | LockHolder locker(*m_lock); |
353 | if (m_plans.contains(requestedKey)) |
354 | resultingState = Compiling; |
355 | } |
356 | |
357 | return resultingState; |
358 | } |
359 | |
360 | void Worklist::completeAllPlansForVM(VM& vm) |
361 | { |
362 | DeferGC deferGC(vm.heap); |
363 | waitUntilAllPlansForVMAreReady(vm); |
364 | completeAllReadyPlansForVM(vm); |
365 | } |
366 | |
367 | void Worklist::suspendAllThreads() |
368 | { |
369 | m_suspensionLock.lock(); |
370 | for (unsigned i = m_threads.size(); i--;) |
371 | m_threads[i]->m_rightToRun.lock(); |
372 | } |
373 | |
374 | void Worklist::resumeAllThreads() |
375 | { |
376 | for (unsigned i = m_threads.size(); i--;) |
377 | m_threads[i]->m_rightToRun.unlock(); |
378 | m_suspensionLock.unlock(); |
379 | } |
380 | |
381 | void Worklist::visitWeakReferences(SlotVisitor& visitor) |
382 | { |
383 | VM* vm = visitor.heap()->vm(); |
384 | { |
385 | LockHolder locker(*m_lock); |
386 | for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) { |
387 | Plan* plan = iter->value.get(); |
388 | if (plan->vm() != vm) |
389 | continue; |
390 | plan->checkLivenessAndVisitChildren(visitor); |
391 | } |
392 | } |
393 | // This loop doesn't need locking because: |
394 | // (1) no new threads can be added to m_threads. Hence, it is immutable and needs no locks. |
395 | // (2) ThreadData::m_safepoint is protected by that thread's m_rightToRun which we must be |
396 | // holding here because of a prior call to suspendAllThreads(). |
397 | for (unsigned i = m_threads.size(); i--;) { |
398 | ThreadData* data = m_threads[i].get(); |
399 | Safepoint* safepoint = data->m_safepoint; |
400 | if (safepoint && safepoint->vm() == vm) |
401 | safepoint->checkLivenessAndVisitChildren(visitor); |
402 | } |
403 | } |
404 | |
405 | void Worklist::removeDeadPlans(VM& vm) |
406 | { |
407 | { |
408 | LockHolder locker(*m_lock); |
409 | HashSet<CompilationKey> deadPlanKeys; |
410 | for (PlanMap::iterator iter = m_plans.begin(); iter != m_plans.end(); ++iter) { |
411 | Plan* plan = iter->value.get(); |
412 | if (plan->vm() != &vm) |
413 | continue; |
414 | if (plan->isKnownToBeLiveDuringGC()) { |
415 | plan->finalizeInGC(); |
416 | continue; |
417 | } |
418 | RELEASE_ASSERT(plan->stage() != Plan::Cancelled); // Should not be cancelled, yet. |
419 | ASSERT(!deadPlanKeys.contains(plan->key())); |
420 | deadPlanKeys.add(plan->key()); |
421 | } |
422 | if (!deadPlanKeys.isEmpty()) { |
423 | for (HashSet<CompilationKey>::iterator iter = deadPlanKeys.begin(); iter != deadPlanKeys.end(); ++iter) |
424 | m_plans.take(*iter)->cancel(); |
425 | Deque<RefPtr<Plan>> newQueue; |
426 | while (!m_queue.isEmpty()) { |
427 | RefPtr<Plan> plan = m_queue.takeFirst(); |
428 | if (plan->stage() != Plan::Cancelled) |
429 | newQueue.append(plan); |
430 | } |
431 | m_queue.swap(newQueue); |
432 | for (unsigned i = 0; i < m_readyPlans.size(); ++i) { |
433 | if (m_readyPlans[i]->stage() != Plan::Cancelled) |
434 | continue; |
435 | m_readyPlans[i--] = m_readyPlans.last(); |
436 | m_readyPlans.removeLast(); |
437 | } |
438 | } |
439 | } |
440 | |
441 | // No locking needed for this part, see comment in visitWeakReferences(). |
442 | for (unsigned i = m_threads.size(); i--;) { |
443 | ThreadData* data = m_threads[i].get(); |
444 | Safepoint* safepoint = data->m_safepoint; |
445 | if (!safepoint) |
446 | continue; |
447 | if (safepoint->vm() != &vm) |
448 | continue; |
449 | if (safepoint->isKnownToBeLiveDuringGC()) |
450 | continue; |
451 | safepoint->cancel(); |
452 | } |
453 | } |
454 | |
455 | void Worklist::removeNonCompilingPlansForVM(VM& vm) |
456 | { |
457 | LockHolder locker(*m_lock); |
458 | HashSet<CompilationKey> deadPlanKeys; |
459 | Vector<RefPtr<Plan>> deadPlans; |
460 | for (auto& entry : m_plans) { |
461 | Plan* plan = entry.value.get(); |
462 | if (plan->vm() != &vm) |
463 | continue; |
464 | if (plan->stage() == Plan::Compiling) |
465 | continue; |
466 | deadPlanKeys.add(plan->key()); |
467 | deadPlans.append(plan); |
468 | } |
469 | for (CompilationKey key : deadPlanKeys) |
470 | m_plans.remove(key); |
471 | Deque<RefPtr<Plan>> newQueue; |
472 | while (!m_queue.isEmpty()) { |
473 | RefPtr<Plan> plan = m_queue.takeFirst(); |
474 | if (!deadPlanKeys.contains(plan->key())) |
475 | newQueue.append(WTFMove(plan)); |
476 | } |
477 | m_queue = WTFMove(newQueue); |
478 | m_readyPlans.removeAllMatching( |
479 | [&] (RefPtr<Plan>& plan) -> bool { |
480 | return deadPlanKeys.contains(plan->key()); |
481 | }); |
482 | for (auto& plan : deadPlans) |
483 | plan->cancel(); |
484 | } |
485 | |
486 | size_t Worklist::queueLength() |
487 | { |
488 | LockHolder locker(*m_lock); |
489 | return m_queue.size(); |
490 | } |
491 | |
492 | void Worklist::dump(PrintStream& out) const |
493 | { |
494 | LockHolder locker(*m_lock); |
495 | dump(locker, out); |
496 | } |
497 | |
498 | void Worklist::dump(const AbstractLocker&, PrintStream& out) const |
499 | { |
500 | out.print( |
501 | "Worklist(" , RawPointer(this), ")[Queue Length = " , m_queue.size(), |
502 | ", Map Size = " , m_plans.size(), ", Num Ready = " , m_readyPlans.size(), |
503 | ", Num Active Threads = " , m_numberOfActiveThreads, "/" , m_threads.size(), "]" ); |
504 | } |
505 | |
506 | unsigned Worklist::setNumberOfThreads(unsigned numberOfThreads, int relativePriority) |
507 | { |
508 | LockHolder locker(m_suspensionLock); |
509 | auto currentNumberOfThreads = m_threads.size(); |
510 | if (numberOfThreads < currentNumberOfThreads) { |
511 | { |
512 | LockHolder locker(*m_lock); |
513 | for (unsigned i = currentNumberOfThreads; i-- > numberOfThreads;) { |
514 | if (m_threads[i]->m_thread->hasUnderlyingThread(locker)) { |
515 | m_queue.append(nullptr); |
516 | m_threads[i]->m_thread->notify(locker); |
517 | } |
518 | } |
519 | } |
520 | for (unsigned i = currentNumberOfThreads; i-- > numberOfThreads;) { |
521 | bool isStopped = false; |
522 | { |
523 | LockHolder locker(*m_lock); |
524 | isStopped = m_threads[i]->m_thread->tryStop(locker); |
525 | } |
526 | if (!isStopped) |
527 | m_threads[i]->m_thread->join(); |
528 | m_threads.remove(i); |
529 | } |
530 | m_threads.shrinkToFit(); |
531 | ASSERT(m_numberOfActiveThreads <= numberOfThreads); |
532 | } else if (numberOfThreads > currentNumberOfThreads) { |
533 | LockHolder locker(*m_lock); |
534 | for (unsigned i = currentNumberOfThreads; i < numberOfThreads; i++) |
535 | createNewThread(locker, relativePriority); |
536 | } |
537 | return currentNumberOfThreads; |
538 | } |
539 | |
540 | static Worklist* theGlobalDFGWorklist; |
541 | static unsigned numberOfDFGCompilerThreads; |
542 | static unsigned numberOfFTLCompilerThreads; |
543 | |
544 | static unsigned getNumberOfDFGCompilerThreads() |
545 | { |
546 | return numberOfDFGCompilerThreads ? numberOfDFGCompilerThreads : Options::numberOfDFGCompilerThreads(); |
547 | } |
548 | |
549 | static unsigned getNumberOfFTLCompilerThreads() |
550 | { |
551 | return numberOfFTLCompilerThreads ? numberOfFTLCompilerThreads : Options::numberOfFTLCompilerThreads(); |
552 | } |
553 | |
554 | unsigned setNumberOfDFGCompilerThreads(unsigned numberOfThreads) |
555 | { |
556 | auto previousNumberOfThreads = getNumberOfDFGCompilerThreads(); |
557 | numberOfDFGCompilerThreads = numberOfThreads; |
558 | return previousNumberOfThreads; |
559 | } |
560 | |
561 | unsigned setNumberOfFTLCompilerThreads(unsigned numberOfThreads) |
562 | { |
563 | auto previousNumberOfThreads = getNumberOfFTLCompilerThreads(); |
564 | numberOfFTLCompilerThreads = numberOfThreads; |
565 | return previousNumberOfThreads; |
566 | } |
567 | |
568 | Worklist& ensureGlobalDFGWorklist() |
569 | { |
570 | static std::once_flag initializeGlobalWorklistOnceFlag; |
571 | std::call_once(initializeGlobalWorklistOnceFlag, [] { |
572 | Worklist* worklist = &Worklist::create("DFG" , getNumberOfDFGCompilerThreads(), Options::priorityDeltaOfDFGCompilerThreads()).leakRef(); |
573 | WTF::storeStoreFence(); |
574 | theGlobalDFGWorklist = worklist; |
575 | }); |
576 | return *theGlobalDFGWorklist; |
577 | } |
578 | |
579 | Worklist* existingGlobalDFGWorklistOrNull() |
580 | { |
581 | return theGlobalDFGWorklist; |
582 | } |
583 | |
584 | static Worklist* theGlobalFTLWorklist; |
585 | |
586 | Worklist& ensureGlobalFTLWorklist() |
587 | { |
588 | static std::once_flag initializeGlobalWorklistOnceFlag; |
589 | std::call_once(initializeGlobalWorklistOnceFlag, [] { |
590 | Worklist* worklist = &Worklist::create("FTL" , getNumberOfFTLCompilerThreads(), Options::priorityDeltaOfFTLCompilerThreads()).leakRef(); |
591 | WTF::storeStoreFence(); |
592 | theGlobalFTLWorklist = worklist; |
593 | }); |
594 | return *theGlobalFTLWorklist; |
595 | } |
596 | |
597 | Worklist* existingGlobalFTLWorklistOrNull() |
598 | { |
599 | return theGlobalFTLWorklist; |
600 | } |
601 | |
602 | Worklist& ensureGlobalWorklistFor(CompilationMode mode) |
603 | { |
604 | switch (mode) { |
605 | case InvalidCompilationMode: |
606 | RELEASE_ASSERT_NOT_REACHED(); |
607 | return ensureGlobalDFGWorklist(); |
608 | case DFGMode: |
609 | return ensureGlobalDFGWorklist(); |
610 | case FTLMode: |
611 | case FTLForOSREntryMode: |
612 | return ensureGlobalFTLWorklist(); |
613 | } |
614 | RELEASE_ASSERT_NOT_REACHED(); |
615 | return ensureGlobalDFGWorklist(); |
616 | } |
617 | |
618 | unsigned numberOfWorklists() { return 2; } |
619 | |
620 | Worklist& ensureWorklistForIndex(unsigned index) |
621 | { |
622 | switch (index) { |
623 | case 0: |
624 | return ensureGlobalDFGWorklist(); |
625 | case 1: |
626 | return ensureGlobalFTLWorklist(); |
627 | default: |
628 | RELEASE_ASSERT_NOT_REACHED(); |
629 | return ensureGlobalDFGWorklist(); |
630 | } |
631 | } |
632 | |
633 | Worklist* existingWorklistForIndexOrNull(unsigned index) |
634 | { |
635 | switch (index) { |
636 | case 0: |
637 | return existingGlobalDFGWorklistOrNull(); |
638 | case 1: |
639 | return existingGlobalFTLWorklistOrNull(); |
640 | default: |
641 | RELEASE_ASSERT_NOT_REACHED(); |
642 | return 0; |
643 | } |
644 | } |
645 | |
646 | Worklist& existingWorklistForIndex(unsigned index) |
647 | { |
648 | Worklist* result = existingWorklistForIndexOrNull(index); |
649 | RELEASE_ASSERT(result); |
650 | return *result; |
651 | } |
652 | |
653 | void completeAllPlansForVM(VM& vm) |
654 | { |
655 | for (unsigned i = DFG::numberOfWorklists(); i--;) { |
656 | if (DFG::Worklist* worklist = DFG::existingWorklistForIndexOrNull(i)) |
657 | worklist->completeAllPlansForVM(vm); |
658 | } |
659 | } |
660 | |
661 | #else // ENABLE(DFG_JIT) |
662 | |
663 | void completeAllPlansForVM(VM&) |
664 | { |
665 | } |
666 | |
667 | #endif // ENABLE(DFG_JIT) |
668 | |
669 | } } // namespace JSC::DFG |
670 | |
671 | |
672 | |