1 | /* |
2 | * Copyright (C) 2013-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "DFGOSRExitCompilerCommon.h" |
28 | |
29 | #if ENABLE(DFG_JIT) |
30 | |
31 | #include "DFGJITCode.h" |
32 | #include "DFGOperations.h" |
33 | #include "JIT.h" |
34 | #include "JSCJSValueInlines.h" |
35 | #include "JSCInlines.h" |
36 | #include "StructureStubInfo.h" |
37 | |
38 | namespace JSC { namespace DFG { |
39 | |
40 | void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit) |
41 | { |
42 | if (!exitKindMayJettison(exit.m_kind)) { |
43 | // FIXME: We may want to notice that we're frequently exiting |
44 | // at an op_catch that we didn't compile an entrypoint for, and |
45 | // then trigger a reoptimization of this CodeBlock: |
46 | // https://bugs.webkit.org/show_bug.cgi?id=175842 |
47 | return; |
48 | } |
49 | |
50 | jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count)); |
51 | |
52 | jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT3); |
53 | |
54 | AssemblyHelpers::Jump tooFewFails; |
55 | |
56 | jit.load32(AssemblyHelpers::Address(GPRInfo::regT3, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2); |
57 | jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); |
58 | jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT3, CodeBlock::offsetOfOSRExitCounter())); |
59 | |
60 | jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0); |
61 | AssemblyHelpers::Jump reoptimizeNow = jit.branch32( |
62 | AssemblyHelpers::GreaterThanOrEqual, |
63 | AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()), |
64 | AssemblyHelpers::TrustedImm32(0)); |
65 | |
66 | // We want to figure out if there's a possibility that we're in a loop. For the outermost |
67 | // code block in the inline stack, we handle this appropriately by having the loop OSR trigger |
68 | // check the exit count of the replacement of the CodeBlock from which we are OSRing. The |
69 | // problem is the inlined functions, which might also have loops, but whose baseline versions |
70 | // don't know where to look for the exit count. Figure out if those loops are severe enough |
71 | // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. |
72 | // Otherwise, we should use the normal reoptimization trigger. |
73 | |
74 | AssemblyHelpers::JumpList loopThreshold; |
75 | |
76 | for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) { |
77 | loopThreshold.append( |
78 | jit.branchTest8( |
79 | AssemblyHelpers::NonZero, |
80 | AssemblyHelpers::AbsoluteAddress( |
81 | inlineCallFrame->baselineCodeBlock->ownerExecutable()->addressOfDidTryToEnterInLoop()))); |
82 | } |
83 | |
84 | jit.move( |
85 | AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()), |
86 | GPRInfo::regT1); |
87 | |
88 | if (!loopThreshold.empty()) { |
89 | AssemblyHelpers::Jump done = jit.jump(); |
90 | |
91 | loopThreshold.link(&jit); |
92 | jit.move( |
93 | AssemblyHelpers::TrustedImm32( |
94 | jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()), |
95 | GPRInfo::regT1); |
96 | |
97 | done.link(&jit); |
98 | } |
99 | |
100 | tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1); |
101 | |
102 | reoptimizeNow.link(&jit); |
103 | |
104 | jit.setupArguments<decltype(triggerReoptimizationNow)>(GPRInfo::regT0, GPRInfo::regT3, AssemblyHelpers::TrustedImmPtr(&exit)); |
105 | jit.move(AssemblyHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0); |
106 | jit.call(GPRInfo::nonArgGPR0, OperationPtrTag); |
107 | AssemblyHelpers::Jump doneAdjusting = jit.jump(); |
108 | |
109 | tooFewFails.link(&jit); |
110 | |
111 | // Adjust the execution counter such that the target is to only optimize after a while. |
112 | int32_t activeThreshold = |
113 | jit.baselineCodeBlock()->adjustedCounterValue( |
114 | Options::thresholdForOptimizeAfterLongWarmUp()); |
115 | int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt( |
116 | activeThreshold, jit.baselineCodeBlock()); |
117 | int32_t clippedValue; |
118 | switch (jit.codeBlock()->jitType()) { |
119 | case JITType::DFGJIT: |
120 | clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); |
121 | break; |
122 | case JITType::FTLJIT: |
123 | clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue); |
124 | break; |
125 | default: |
126 | RELEASE_ASSERT_NOT_REACHED(); |
127 | #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) |
128 | clippedValue = 0; // Make some compilers, and mhahnenberg, happy. |
129 | #endif |
130 | break; |
131 | } |
132 | jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); |
133 | jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); |
134 | jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); |
135 | |
136 | doneAdjusting.link(&jit); |
137 | } |
138 | |
139 | void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit) |
140 | { |
141 | // FIXME: We shouldn't leave holes on the stack when performing an OSR exit |
142 | // in presence of inlined tail calls. |
143 | // https://bugs.webkit.org/show_bug.cgi?id=147511 |
144 | ASSERT(jit.baselineCodeBlock()->jitType() == JITType::BaselineJIT); |
145 | jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)CallFrameSlot::codeBlock)); |
146 | |
147 | const CodeOrigin* codeOrigin; |
148 | for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame(); codeOrigin = codeOrigin->inlineCallFrame()->getCallerSkippingTailCalls()) { |
149 | InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame(); |
150 | CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(*codeOrigin); |
151 | InlineCallFrame::Kind trueCallerCallKind; |
152 | CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind); |
153 | GPRReg callerFrameGPR = GPRInfo::callFrameRegister; |
154 | |
155 | if (!trueCaller) { |
156 | ASSERT(inlineCallFrame->isTail()); |
157 | jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3); |
158 | #if CPU(ARM64E) |
159 | jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, GPRInfo::regT2); |
160 | jit.untagPtr(GPRInfo::regT2, GPRInfo::regT3); |
161 | jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->returnPCOffset() + sizeof(void*)), GPRInfo::callFrameRegister, GPRInfo::regT2); |
162 | jit.tagPtr(GPRInfo::regT2, GPRInfo::regT3); |
163 | #endif |
164 | jit.storePtr(GPRInfo::regT3, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); |
165 | jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), GPRInfo::regT3); |
166 | callerFrameGPR = GPRInfo::regT3; |
167 | } else { |
168 | CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller); |
169 | unsigned callBytecodeIndex = trueCaller->bytecodeIndex(); |
170 | void* jumpTarget = nullptr; |
171 | |
172 | switch (trueCallerCallKind) { |
173 | case InlineCallFrame::Call: |
174 | case InlineCallFrame::Construct: |
175 | case InlineCallFrame::CallVarargs: |
176 | case InlineCallFrame::ConstructVarargs: |
177 | case InlineCallFrame::TailCall: |
178 | case InlineCallFrame::TailCallVarargs: { |
179 | CallLinkInfo* callLinkInfo = |
180 | baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex); |
181 | RELEASE_ASSERT(callLinkInfo); |
182 | |
183 | jumpTarget = callLinkInfo->callReturnLocation().untaggedExecutableAddress(); |
184 | break; |
185 | } |
186 | |
187 | case InlineCallFrame::GetterCall: |
188 | case InlineCallFrame::SetterCall: { |
189 | StructureStubInfo* stubInfo = |
190 | baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex)); |
191 | RELEASE_ASSERT(stubInfo); |
192 | |
193 | jumpTarget = stubInfo->doneLocation().untaggedExecutableAddress(); |
194 | break; |
195 | } |
196 | |
197 | default: |
198 | RELEASE_ASSERT_NOT_REACHED(); |
199 | } |
200 | |
201 | if (trueCaller->inlineCallFrame()) { |
202 | jit.addPtr( |
203 | AssemblyHelpers::TrustedImm32(trueCaller->inlineCallFrame()->stackOffset * sizeof(EncodedJSValue)), |
204 | GPRInfo::callFrameRegister, |
205 | GPRInfo::regT3); |
206 | callerFrameGPR = GPRInfo::regT3; |
207 | } |
208 | |
209 | #if CPU(ARM64E) |
210 | jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->returnPCOffset() + sizeof(void*)), GPRInfo::callFrameRegister, GPRInfo::regT2); |
211 | jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::nonArgGPR0); |
212 | jit.tagPtr(GPRInfo::regT2, GPRInfo::nonArgGPR0); |
213 | jit.storePtr(GPRInfo::nonArgGPR0, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); |
214 | #else |
215 | jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); |
216 | #endif |
217 | } |
218 | |
219 | jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock))); |
220 | |
221 | // Restore the inline call frame's callee save registers. |
222 | // If this inlined frame is a tail call that will return back to the original caller, we need to |
223 | // copy the prior contents of the tag registers already saved for the outer frame to this frame. |
224 | jit.emitSaveOrCopyCalleeSavesFor( |
225 | baselineCodeBlock, |
226 | static_cast<VirtualRegister>(inlineCallFrame->stackOffset), |
227 | trueCaller ? AssemblyHelpers::UseExistingTagRegisterContents : AssemblyHelpers::CopyBaselineCalleeSavedRegistersFromBaseFrame, |
228 | GPRInfo::regT2); |
229 | |
230 | if (!inlineCallFrame->isVarargs()) |
231 | jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->argumentCountIncludingThis), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount))); |
232 | #if USE(JSVALUE64) |
233 | jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); |
234 | uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex()).bits(); |
235 | jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount))); |
236 | if (!inlineCallFrame->isClosureCall) |
237 | jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee))); |
238 | #else // USE(JSVALUE64) // so this is the 32-bit part |
239 | jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); |
240 | const Instruction* instruction = baselineCodeBlock->instructions().at(codeOrigin->bytecodeIndex()).ptr(); |
241 | uint32_t locationBits = CallSiteIndex(instruction).bits(); |
242 | jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount))); |
243 | jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee))); |
244 | if (!inlineCallFrame->isClosureCall) |
245 | jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee))); |
246 | #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part |
247 | } |
248 | |
249 | // Don't need to set the toplevel code origin if we only did inline tail calls |
250 | if (codeOrigin) { |
251 | #if USE(JSVALUE64) |
252 | uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex()).bits(); |
253 | #else |
254 | const Instruction* instruction = jit.baselineCodeBlock()->instructions().at(codeOrigin->bytecodeIndex()).ptr(); |
255 | uint32_t locationBits = CallSiteIndex(instruction).bits(); |
256 | #endif |
257 | jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(CallFrameSlot::argumentCount))); |
258 | } |
259 | } |
260 | |
261 | static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch) |
262 | { |
263 | AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.barrierBranchWithoutFence(owner); |
264 | |
265 | // We need these extra slots because setupArgumentsWithExecState will use poke on x86. |
266 | #if CPU(X86) |
267 | jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 4), MacroAssembler::stackPointerRegister); |
268 | #endif |
269 | |
270 | jit.setupArguments<decltype(operationOSRWriteBarrier)>(owner); |
271 | jit.move(MacroAssembler::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationOSRWriteBarrier)), scratch); |
272 | jit.call(scratch, OperationPtrTag); |
273 | |
274 | #if CPU(X86) |
275 | jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 4), MacroAssembler::stackPointerRegister); |
276 | #endif |
277 | |
278 | ownerIsRememberedOrInEden.link(&jit); |
279 | } |
280 | |
281 | void adjustAndJumpToTarget(VM& vm, CCallHelpers& jit, const OSRExitBase& exit) |
282 | { |
283 | jit.memoryFence(); |
284 | |
285 | jit.move( |
286 | AssemblyHelpers::TrustedImmPtr( |
287 | jit.codeBlock()->baselineAlternative()), GPRInfo::argumentGPR1); |
288 | osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0); |
289 | |
290 | // We barrier all inlined frames -- and not just the current inline stack -- |
291 | // because we don't know which inlined function owns the value profile that |
292 | // we'll update when we exit. In the case of "f() { a(); b(); }", if both |
293 | // a and b are inlined, we might exit inside b due to a bad value loaded |
294 | // from a. |
295 | // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns |
296 | // the value profile. |
297 | InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get(); |
298 | if (inlineCallFrames) { |
299 | for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) { |
300 | jit.move( |
301 | AssemblyHelpers::TrustedImmPtr( |
302 | inlineCallFrame->baselineCodeBlock.get()), GPRInfo::argumentGPR1); |
303 | osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0); |
304 | } |
305 | } |
306 | |
307 | auto* exitInlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); |
308 | if (exitInlineCallFrame) |
309 | jit.addPtr(AssemblyHelpers::TrustedImm32(exitInlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); |
310 | |
311 | CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin); |
312 | ASSERT(codeBlockForExit == codeBlockForExit->baselineVersion()); |
313 | ASSERT(codeBlockForExit->jitType() == JITType::BaselineJIT); |
314 | CodeLocationLabel<JSEntryPtrTag> codeLocation = codeBlockForExit->jitCodeMap().find(exit.m_codeOrigin.bytecodeIndex()); |
315 | ASSERT(codeLocation); |
316 | |
317 | void* jumpTarget = codeLocation.retagged<OSRExitPtrTag>().executableAddress(); |
318 | jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister); |
319 | if (exit.isExceptionHandler()) { |
320 | // Since we're jumping to op_catch, we need to set callFrameForCatch. |
321 | jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch()); |
322 | } |
323 | |
324 | jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2); |
325 | jit.jump(GPRInfo::regT2, OSRExitPtrTag); |
326 | } |
327 | |
328 | } } // namespace JSC::DFG |
329 | |
330 | #endif // ENABLE(DFG_JIT) |
331 | |
332 | |