1/*
2 * Copyright (C) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "VMTraps.h"
28
29#include "CallFrame.h"
30#include "CallFrameInlines.h"
31#include "CodeBlock.h"
32#include "CodeBlockSet.h"
33#include "DFGCommonData.h"
34#include "ExceptionHelpers.h"
35#include "HeapInlines.h"
36#include "JSCPtrTag.h"
37#include "LLIntPCRanges.h"
38#include "MachineContext.h"
39#include "MachineStackMarker.h"
40#include "MacroAssembler.h"
41#include "MacroAssemblerCodeRef.h"
42#include "VM.h"
43#include "VMInspector.h"
44#include "Watchdog.h"
45#include <wtf/ProcessID.h>
46#include <wtf/ThreadMessage.h>
47#include <wtf/threads/Signals.h>
48
49namespace JSC {
50
51ALWAYS_INLINE VM& VMTraps::vm() const
52{
53 return *bitwise_cast<VM*>(bitwise_cast<uintptr_t>(this) - OBJECT_OFFSETOF(VM, m_traps));
54}
55
56#if ENABLE(SIGNAL_BASED_VM_TRAPS)
57
58struct SignalContext {
59private:
60 SignalContext(PlatformRegisters& registers, MacroAssemblerCodePtr<PlatformRegistersPCPtrTag> trapPC)
61 : registers(registers)
62 , trapPC(trapPC)
63 , stackPointer(MachineContext::stackPointer(registers))
64 , framePointer(MachineContext::framePointer(registers))
65 { }
66
67public:
68 static Optional<SignalContext> tryCreate(PlatformRegisters& registers)
69 {
70 auto instructionPointer = MachineContext::instructionPointer(registers);
71 if (!instructionPointer)
72 return WTF::nullopt;
73 return SignalContext(registers, *instructionPointer);
74 }
75
76 PlatformRegisters& registers;
77 MacroAssemblerCodePtr<PlatformRegistersPCPtrTag> trapPC;
78 void* stackPointer;
79 void* framePointer;
80};
81
82inline static bool vmIsInactive(VM& vm)
83{
84 return !vm.entryScope && !vm.ownerThread();
85}
86
87static bool isSaneFrame(CallFrame* frame, CallFrame* calleeFrame, EntryFrame* entryFrame, StackBounds stackBounds)
88{
89 if (reinterpret_cast<void*>(frame) >= reinterpret_cast<void*>(entryFrame))
90 return false;
91 if (calleeFrame >= frame)
92 return false;
93 return stackBounds.contains(frame);
94}
95
96void VMTraps::tryInstallTrapBreakpoints(SignalContext& context, StackBounds stackBounds)
97{
98 // This must be the initial signal to get the mutator thread's attention.
99 // Let's get the thread to break at invalidation points if needed.
100 VM& vm = this->vm();
101 void* trapPC = context.trapPC.untaggedExecutableAddress();
102 // We must ensure we're in JIT/LLint code. If we are, we know a few things:
103 // - The JS thread isn't holding the malloc lock. Therefore, it's safe to malloc below.
104 // - The JS thread isn't holding the CodeBlockSet lock.
105 // If we're not in JIT/LLInt code, we can't run the C++ code below because it
106 // mallocs, and we must prove the JS thread isn't holding the malloc lock
107 // to be able to do that without risking a deadlock.
108 if (!isJITPC(trapPC) && !LLInt::isLLIntPC(trapPC))
109 return;
110
111 CallFrame* callFrame = reinterpret_cast<CallFrame*>(context.framePointer);
112
113 auto codeBlockSetLocker = holdLock(vm.heap.codeBlockSet().getLock());
114
115 CodeBlock* foundCodeBlock = nullptr;
116 EntryFrame* entryFrame = vm.topEntryFrame;
117
118 // We don't have a callee to start with. So, use the end of the stack to keep the
119 // isSaneFrame() checker below happy for the first iteration. It will still check
120 // to ensure that the address is in the stackBounds.
121 CallFrame* calleeFrame = reinterpret_cast<CallFrame*>(stackBounds.end());
122
123 if (!entryFrame || !callFrame)
124 return; // Not running JS code. Let the SignalSender try again later.
125
126 do {
127 if (!isSaneFrame(callFrame, calleeFrame, entryFrame, stackBounds))
128 return; // Let the SignalSender try again later.
129
130 CodeBlock* candidateCodeBlock = callFrame->unsafeCodeBlock();
131 if (candidateCodeBlock && vm.heap.codeBlockSet().contains(codeBlockSetLocker, candidateCodeBlock)) {
132 foundCodeBlock = candidateCodeBlock;
133 break;
134 }
135
136 calleeFrame = callFrame;
137 callFrame = callFrame->callerFrame(entryFrame);
138
139 } while (callFrame && entryFrame);
140
141 if (!foundCodeBlock) {
142 // We may have just entered the frame and the codeBlock pointer is not
143 // initialized yet. Just bail and let the SignalSender try again later.
144 return;
145 }
146
147 if (JITCode::isOptimizingJIT(foundCodeBlock->jitType())) {
148 auto locker = tryHoldLock(*m_lock);
149 if (!locker)
150 return; // Let the SignalSender try again later.
151
152 if (!needTrapHandling()) {
153 // Too late. Someone else already handled the trap.
154 return;
155 }
156
157 if (!foundCodeBlock->hasInstalledVMTrapBreakpoints())
158 foundCodeBlock->installVMTrapBreakpoints();
159 return;
160 }
161}
162
163void VMTraps::invalidateCodeBlocksOnStack()
164{
165 invalidateCodeBlocksOnStack(vm().topCallFrame);
166}
167
168void VMTraps::invalidateCodeBlocksOnStack(CallFrame* topCallFrame)
169{
170 auto codeBlockSetLocker = holdLock(vm().heap.codeBlockSet().getLock());
171 invalidateCodeBlocksOnStack(codeBlockSetLocker, topCallFrame);
172}
173
174void VMTraps::invalidateCodeBlocksOnStack(Locker<Lock>&, CallFrame* topCallFrame)
175{
176 if (!m_needToInvalidatedCodeBlocks)
177 return;
178
179 m_needToInvalidatedCodeBlocks = false;
180
181 EntryFrame* entryFrame = vm().topEntryFrame;
182 CallFrame* callFrame = topCallFrame;
183
184 if (!entryFrame)
185 return; // Not running JS code. Nothing to invalidate.
186
187 while (callFrame) {
188 CodeBlock* codeBlock = callFrame->codeBlock();
189 if (codeBlock && JITCode::isOptimizingJIT(codeBlock->jitType()))
190 codeBlock->jettison(Profiler::JettisonDueToVMTraps);
191 callFrame = callFrame->callerFrame(entryFrame);
192 }
193}
194
195class VMTraps::SignalSender final : public AutomaticThread {
196public:
197 using Base = AutomaticThread;
198 SignalSender(const AbstractLocker& locker, VM& vm)
199 : Base(locker, vm.traps().m_lock, vm.traps().m_condition.copyRef())
200 , m_vm(vm)
201 {
202 static std::once_flag once;
203 std::call_once(once, [] {
204 installSignalHandler(Signal::BadAccess, [] (Signal, SigInfo&, PlatformRegisters& registers) -> SignalAction {
205 auto signalContext = SignalContext::tryCreate(registers);
206 if (!signalContext)
207 return SignalAction::NotHandled;
208
209 void* trapPC = signalContext->trapPC.untaggedExecutableAddress();
210 if (!isJITPC(trapPC))
211 return SignalAction::NotHandled;
212
213 CodeBlock* currentCodeBlock = DFG::codeBlockForVMTrapPC(trapPC);
214 if (!currentCodeBlock) {
215 // Either we trapped for some other reason, e.g. Wasm OOB, or we didn't properly monitor the PC. Regardless, we can't do much now...
216 return SignalAction::NotHandled;
217 }
218 ASSERT(currentCodeBlock->hasInstalledVMTrapBreakpoints());
219 VM& vm = currentCodeBlock->vm();
220
221 // We are in JIT code so it's safe to acquire this lock.
222 auto codeBlockSetLocker = holdLock(vm.heap.codeBlockSet().getLock());
223 bool sawCurrentCodeBlock = false;
224 vm.heap.forEachCodeBlockIgnoringJITPlans(codeBlockSetLocker, [&] (CodeBlock* codeBlock) {
225 // We want to jettison all code blocks that have vm traps breakpoints, otherwise we could hit them later.
226 if (codeBlock->hasInstalledVMTrapBreakpoints()) {
227 if (currentCodeBlock == codeBlock)
228 sawCurrentCodeBlock = true;
229
230 codeBlock->jettison(Profiler::JettisonDueToVMTraps);
231 }
232 });
233 RELEASE_ASSERT(sawCurrentCodeBlock);
234
235 return SignalAction::Handled; // We've successfully jettisoned the codeBlocks.
236 });
237 });
238 }
239
240 const char* name() const override
241 {
242 return "JSC VMTraps Signal Sender Thread";
243 }
244
245 VMTraps& traps() { return m_vm.traps(); }
246
247protected:
248 PollResult poll(const AbstractLocker&) override
249 {
250 if (traps().m_isShuttingDown)
251 return PollResult::Stop;
252
253 if (!traps().needTrapHandling())
254 return PollResult::Wait;
255
256 // We know that no trap could have been processed and re-added because we are holding the lock.
257 if (vmIsInactive(m_vm))
258 return PollResult::Wait;
259 return PollResult::Work;
260 }
261
262 WorkResult work() override
263 {
264 VM& vm = m_vm;
265
266 auto optionalOwnerThread = vm.ownerThread();
267 if (optionalOwnerThread) {
268 sendMessage(*optionalOwnerThread.value().get(), [&] (PlatformRegisters& registers) -> void {
269 auto signalContext = SignalContext::tryCreate(registers);
270 if (!signalContext)
271 return;
272
273 auto ownerThread = vm.apiLock().ownerThread();
274 // We can't mess with a thread unless it's the one we suspended.
275 if (!ownerThread || ownerThread != optionalOwnerThread)
276 return;
277
278 Thread& thread = *ownerThread->get();
279 vm.traps().tryInstallTrapBreakpoints(*signalContext, thread.stack());
280 });
281 }
282
283 {
284 auto locker = holdLock(*traps().m_lock);
285 if (traps().m_isShuttingDown)
286 return WorkResult::Stop;
287 traps().m_condition->waitFor(*traps().m_lock, 1_ms);
288 }
289 return WorkResult::Continue;
290 }
291
292private:
293
294 VM& m_vm;
295};
296
297#endif // ENABLE(SIGNAL_BASED_VM_TRAPS)
298
299void VMTraps::willDestroyVM()
300{
301 m_isShuttingDown = true;
302#if ENABLE(SIGNAL_BASED_VM_TRAPS)
303 if (m_signalSender) {
304 {
305 auto locker = holdLock(*m_lock);
306 if (!m_signalSender->tryStop(locker))
307 m_condition->notifyAll(locker);
308 }
309 m_signalSender->join();
310 m_signalSender = nullptr;
311 }
312#endif
313}
314
315void VMTraps::fireTrap(VMTraps::EventType eventType)
316{
317 ASSERT(!vm().currentThreadIsHoldingAPILock());
318 {
319 auto locker = holdLock(*m_lock);
320 ASSERT(!m_isShuttingDown);
321 setTrapForEvent(locker, eventType);
322 m_needToInvalidatedCodeBlocks = true;
323 }
324
325#if ENABLE(SIGNAL_BASED_VM_TRAPS)
326 if (!Options::usePollingTraps()) {
327 // sendSignal() can loop until it has confirmation that the mutator thread
328 // has received the trap request. We'll call it from another thread so that
329 // fireTrap() does not block.
330 auto locker = holdLock(*m_lock);
331 if (!m_signalSender)
332 m_signalSender = adoptRef(new SignalSender(locker, vm()));
333 m_condition->notifyAll(locker);
334 }
335#endif
336}
337
338void VMTraps::handleTraps(JSGlobalObject* globalObject, CallFrame* callFrame, VMTraps::Mask mask)
339{
340 VM& vm = this->vm();
341 auto scope = DECLARE_THROW_SCOPE(vm);
342
343 {
344 auto codeBlockSetLocker = holdLock(vm.heap.codeBlockSet().getLock());
345 vm.heap.forEachCodeBlockIgnoringJITPlans(codeBlockSetLocker, [&] (CodeBlock* codeBlock) {
346 // We want to jettison all code blocks that have vm traps breakpoints, otherwise we could hit them later.
347 if (codeBlock->hasInstalledVMTrapBreakpoints())
348 codeBlock->jettison(Profiler::JettisonDueToVMTraps);
349 });
350 }
351
352 ASSERT(needTrapHandling(mask));
353 while (needTrapHandling(mask)) {
354 auto eventType = takeTopPriorityTrap(mask);
355 switch (eventType) {
356 case NeedDebuggerBreak:
357 dataLog("VM ", RawPointer(&vm), " on pid ", getCurrentProcessID(), " received NeedDebuggerBreak trap\n");
358 invalidateCodeBlocksOnStack(callFrame);
359 break;
360
361 case NeedWatchdogCheck:
362 ASSERT(vm.watchdog());
363 if (LIKELY(!vm.watchdog()->shouldTerminate(globalObject)))
364 continue;
365 FALLTHROUGH;
366
367 case NeedTermination:
368 throwException(globalObject, scope, createTerminatedExecutionException(&vm));
369 return;
370
371 default:
372 RELEASE_ASSERT_NOT_REACHED();
373 }
374 }
375}
376
377auto VMTraps::takeTopPriorityTrap(VMTraps::Mask mask) -> EventType
378{
379 auto locker = holdLock(*m_lock);
380 for (int i = 0; i < NumberOfEventTypes; ++i) {
381 EventType eventType = static_cast<EventType>(i);
382 if (hasTrapForEvent(locker, eventType, mask)) {
383 clearTrapForEvent(locker, eventType);
384 return eventType;
385 }
386 }
387 return Invalid;
388}
389
390VMTraps::VMTraps()
391 : m_lock(Box<Lock>::create())
392 , m_condition(AutomaticThreadCondition::create())
393{
394}
395
396VMTraps::~VMTraps()
397{
398#if ENABLE(SIGNAL_BASED_VM_TRAPS)
399 ASSERT(!m_signalSender);
400#endif
401}
402
403} // namespace JSC
404