1 | /* |
2 | * Copyright (C) 1999-2000 Harri Porten ([email protected]) |
3 | * Copyright (C) 2001 Peter Kelly ([email protected]) |
4 | * Copyright (C) 2003-2017 Apple Inc. All rights reserved. |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * |
20 | */ |
21 | |
22 | #pragma once |
23 | |
24 | #include "RegisterState.h" |
25 | #include <wtf/Lock.h> |
26 | #include <wtf/ScopedLambda.h> |
27 | #include <wtf/ThreadGroup.h> |
28 | |
29 | namespace JSC { |
30 | |
31 | class CodeBlockSet; |
32 | class ConservativeRoots; |
33 | class Heap; |
34 | class JITStubRoutineSet; |
35 | |
36 | struct CurrentThreadState { |
37 | void* stackOrigin { nullptr }; |
38 | void* stackTop { nullptr }; |
39 | RegisterState* registerState { nullptr }; |
40 | }; |
41 | |
42 | class MachineThreads { |
43 | WTF_MAKE_FAST_ALLOCATED; |
44 | WTF_MAKE_NONCOPYABLE(MachineThreads); |
45 | public: |
46 | MachineThreads(); |
47 | |
48 | void gatherConservativeRoots(ConservativeRoots&, JITStubRoutineSet&, CodeBlockSet&, CurrentThreadState*, Thread*); |
49 | |
50 | // Only needs to be called by clients that can use the same heap from multiple threads. |
51 | bool addCurrentThread() { return m_threadGroup->addCurrentThread() == ThreadGroupAddResult::NewlyAdded; } |
52 | |
53 | WordLock& getLock() { return m_threadGroup->getLock(); } |
54 | const ListHashSet<Ref<Thread>>& threads(const AbstractLocker& locker) const { return m_threadGroup->threads(locker); } |
55 | |
56 | private: |
57 | void gatherFromCurrentThread(ConservativeRoots&, JITStubRoutineSet&, CodeBlockSet&, CurrentThreadState&); |
58 | |
59 | void tryCopyOtherThreadStack(Thread&, void*, size_t capacity, size_t*); |
60 | bool tryCopyOtherThreadStacks(const AbstractLocker&, void*, size_t capacity, size_t*, Thread&); |
61 | |
62 | std::shared_ptr<ThreadGroup> m_threadGroup; |
63 | }; |
64 | |
65 | #define DECLARE_AND_COMPUTE_CURRENT_THREAD_STATE(stateName) \ |
66 | CurrentThreadState stateName; \ |
67 | stateName.stackTop = &stateName; \ |
68 | stateName.stackOrigin = Thread::current().stack().origin(); \ |
69 | ALLOCATE_AND_GET_REGISTER_STATE(stateName ## _registerState); \ |
70 | stateName.registerState = &stateName ## _registerState |
71 | |
72 | // The return value is meaningless. We just use it to suppress tail call optimization. |
73 | int callWithCurrentThreadState(const ScopedLambda<void(CurrentThreadState&)>&); |
74 | |
75 | } // namespace JSC |
76 | |
77 | |