1/*
2 * Copyright (C) 2013-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSRExitCompilerCommon.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "Bytecodes.h"
32#include "DFGJITCode.h"
33#include "DFGOperations.h"
34#include "JIT.h"
35#include "JSCJSValueInlines.h"
36#include "JSCInlines.h"
37#include "LLIntData.h"
38#include "StructureStubInfo.h"
39
40namespace JSC { namespace DFG {
41
42void handleExitCounts(VM& vm, CCallHelpers& jit, const OSRExitBase& exit)
43{
44 if (!exitKindMayJettison(exit.m_kind)) {
45 // FIXME: We may want to notice that we're frequently exiting
46 // at an op_catch that we didn't compile an entrypoint for, and
47 // then trigger a reoptimization of this CodeBlock:
48 // https://bugs.webkit.org/show_bug.cgi?id=175842
49 return;
50 }
51
52 jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
53
54 jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT3);
55
56 AssemblyHelpers::Jump tooFewFails;
57
58 jit.load32(AssemblyHelpers::Address(GPRInfo::regT3, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
59 jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
60 jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT3, CodeBlock::offsetOfOSRExitCounter()));
61
62 jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
63 AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
64 AssemblyHelpers::GreaterThanOrEqual,
65 AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
66 AssemblyHelpers::TrustedImm32(0));
67
68 // We want to figure out if there's a possibility that we're in a loop. For the outermost
69 // code block in the inline stack, we handle this appropriately by having the loop OSR trigger
70 // check the exit count of the replacement of the CodeBlock from which we are OSRing. The
71 // problem is the inlined functions, which might also have loops, but whose baseline versions
72 // don't know where to look for the exit count. Figure out if those loops are severe enough
73 // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger.
74 // Otherwise, we should use the normal reoptimization trigger.
75
76 AssemblyHelpers::JumpList loopThreshold;
77
78 for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame(); inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame()) {
79 loopThreshold.append(
80 jit.branchTest8(
81 AssemblyHelpers::NonZero,
82 AssemblyHelpers::AbsoluteAddress(
83 inlineCallFrame->baselineCodeBlock->ownerExecutable()->addressOfDidTryToEnterInLoop())));
84 }
85
86 jit.move(
87 AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()),
88 GPRInfo::regT1);
89
90 if (!loopThreshold.empty()) {
91 AssemblyHelpers::Jump done = jit.jump();
92
93 loopThreshold.link(&jit);
94 jit.move(
95 AssemblyHelpers::TrustedImm32(
96 jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()),
97 GPRInfo::regT1);
98
99 done.link(&jit);
100 }
101
102 tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
103
104 reoptimizeNow.link(&jit);
105
106 jit.setupArguments<decltype(operationTriggerReoptimizationNow)>(GPRInfo::regT0, GPRInfo::regT3, AssemblyHelpers::TrustedImmPtr(&exit));
107 jit.prepareCallOperation(vm);
108 jit.move(AssemblyHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationTriggerReoptimizationNow)), GPRInfo::nonArgGPR0);
109 jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
110 AssemblyHelpers::Jump doneAdjusting = jit.jump();
111
112 tooFewFails.link(&jit);
113
114 // Adjust the execution counter such that the target is to only optimize after a while.
115 int32_t activeThreshold =
116 jit.baselineCodeBlock()->adjustedCounterValue(
117 Options::thresholdForOptimizeAfterLongWarmUp());
118 int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
119 activeThreshold, jit.baselineCodeBlock());
120 int32_t clippedValue;
121 switch (jit.codeBlock()->jitType()) {
122 case JITType::DFGJIT:
123 clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
124 break;
125 case JITType::FTLJIT:
126 clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
127 break;
128 default:
129 RELEASE_ASSERT_NOT_REACHED();
130#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
131 clippedValue = 0; // Make some compilers, and mhahnenberg, happy.
132#endif
133 break;
134 }
135 jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
136 jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
137 jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
138
139 doneAdjusting.link(&jit);
140}
141
142void* callerReturnPC(CodeBlock* baselineCodeBlockForCaller, BytecodeIndex callBytecodeIndex, InlineCallFrame::Kind trueCallerCallKind, bool& callerIsLLInt)
143{
144 callerIsLLInt = Options::forceOSRExitToLLInt() || baselineCodeBlockForCaller->jitType() == JITType::InterpreterThunk;
145
146 void* jumpTarget;
147
148 if (callerIsLLInt) {
149 const Instruction& callInstruction = *baselineCodeBlockForCaller->instructions().at(callBytecodeIndex).ptr();
150#define LLINT_RETURN_LOCATION(name) (callInstruction.isWide16() ? LLInt::getWide16CodePtr<NoPtrTag>(name##_return_location) : (callInstruction.isWide32() ? LLInt::getWide32CodePtr<NoPtrTag>(name##_return_location) : LLInt::getCodePtr<NoPtrTag>(name##_return_location))).executableAddress()
151
152 switch (trueCallerCallKind) {
153 case InlineCallFrame::Call:
154 jumpTarget = LLINT_RETURN_LOCATION(op_call);
155 break;
156 case InlineCallFrame::Construct:
157 jumpTarget = LLINT_RETURN_LOCATION(op_construct);
158 break;
159 case InlineCallFrame::CallVarargs:
160 jumpTarget = LLINT_RETURN_LOCATION(op_call_varargs_slow);
161 break;
162 case InlineCallFrame::ConstructVarargs:
163 jumpTarget = LLINT_RETURN_LOCATION(op_construct_varargs_slow);
164 break;
165 case InlineCallFrame::GetterCall: {
166 if (callInstruction.opcodeID() == op_get_by_id)
167 jumpTarget = LLINT_RETURN_LOCATION(op_get_by_id);
168 else if (callInstruction.opcodeID() == op_get_by_val)
169 jumpTarget = LLINT_RETURN_LOCATION(op_get_by_val);
170 else
171 RELEASE_ASSERT_NOT_REACHED();
172 break;
173 }
174 case InlineCallFrame::SetterCall: {
175 if (callInstruction.opcodeID() == op_put_by_id)
176 jumpTarget = LLINT_RETURN_LOCATION(op_put_by_id);
177 else if (callInstruction.opcodeID() == op_put_by_val)
178 jumpTarget = LLINT_RETURN_LOCATION(op_put_by_val);
179 else
180 RELEASE_ASSERT_NOT_REACHED();
181 break;
182 }
183 default:
184 RELEASE_ASSERT_NOT_REACHED();
185 }
186
187#undef LLINT_RETURN_LOCATION
188
189 } else {
190 switch (trueCallerCallKind) {
191 case InlineCallFrame::Call:
192 case InlineCallFrame::Construct:
193 case InlineCallFrame::CallVarargs:
194 case InlineCallFrame::ConstructVarargs: {
195 CallLinkInfo* callLinkInfo =
196 baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
197 RELEASE_ASSERT(callLinkInfo);
198
199 jumpTarget = callLinkInfo->callReturnLocation().untaggedExecutableAddress();
200 break;
201 }
202
203 case InlineCallFrame::GetterCall:
204 case InlineCallFrame::SetterCall: {
205 StructureStubInfo* stubInfo =
206 baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
207 RELEASE_ASSERT(stubInfo);
208
209 jumpTarget = stubInfo->doneLocation().untaggedExecutableAddress();
210 break;
211 }
212
213 default:
214 RELEASE_ASSERT_NOT_REACHED();
215 }
216 }
217
218 return jumpTarget;
219}
220
221CCallHelpers::Address calleeSaveSlot(InlineCallFrame* inlineCallFrame, CodeBlock* baselineCodeBlock, GPRReg calleeSave)
222{
223 const RegisterAtOffsetList* calleeSaves = baselineCodeBlock->calleeSaveRegisters();
224 for (unsigned i = 0; i < calleeSaves->size(); i++) {
225 RegisterAtOffset entry = calleeSaves->at(i);
226 if (entry.reg() != calleeSave)
227 continue;
228 return CCallHelpers::Address(CCallHelpers::framePointerRegister, static_cast<VirtualRegister>(inlineCallFrame->stackOffset).offsetInBytes() + entry.offset());
229 }
230
231 RELEASE_ASSERT_NOT_REACHED();
232 return CCallHelpers::Address(CCallHelpers::framePointerRegister);
233}
234
235void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
236{
237 // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
238 // in presence of inlined tail calls.
239 // https://bugs.webkit.org/show_bug.cgi?id=147511
240 ASSERT(JITCode::isBaselineCode(jit.baselineCodeBlock()->jitType()));
241 jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)CallFrameSlot::codeBlock));
242
243 const CodeOrigin* codeOrigin;
244 for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame(); codeOrigin = codeOrigin->inlineCallFrame()->getCallerSkippingTailCalls()) {
245 InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame();
246 CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(*codeOrigin);
247 InlineCallFrame::Kind trueCallerCallKind;
248 CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
249 GPRReg callerFrameGPR = GPRInfo::callFrameRegister;
250
251 bool callerIsLLInt = false;
252
253 if (!trueCaller) {
254 ASSERT(inlineCallFrame->isTail());
255 jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
256#if CPU(ARM64E)
257 jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), GPRInfo::callFrameRegister, GPRInfo::regT2);
258 jit.untagPtr(GPRInfo::regT2, GPRInfo::regT3);
259 jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->returnPCOffset() + sizeof(void*)), GPRInfo::callFrameRegister, GPRInfo::regT2);
260 jit.tagPtr(GPRInfo::regT2, GPRInfo::regT3);
261#endif
262 jit.storePtr(GPRInfo::regT3, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
263 jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), GPRInfo::regT3);
264 callerFrameGPR = GPRInfo::regT3;
265 } else {
266 CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
267 auto callBytecodeIndex = trueCaller->bytecodeIndex();
268 void* jumpTarget = callerReturnPC(baselineCodeBlockForCaller, callBytecodeIndex, trueCallerCallKind, callerIsLLInt);
269
270 if (trueCaller->inlineCallFrame()) {
271 jit.addPtr(
272 AssemblyHelpers::TrustedImm32(trueCaller->inlineCallFrame()->stackOffset * sizeof(EncodedJSValue)),
273 GPRInfo::callFrameRegister,
274 GPRInfo::regT3);
275 callerFrameGPR = GPRInfo::regT3;
276 }
277
278#if CPU(ARM64E)
279 jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->returnPCOffset() + sizeof(void*)), GPRInfo::callFrameRegister, GPRInfo::regT2);
280 jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::nonArgGPR0);
281 jit.tagPtr(GPRInfo::regT2, GPRInfo::nonArgGPR0);
282 jit.storePtr(GPRInfo::nonArgGPR0, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
283#else
284 jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
285#endif
286 }
287
288 jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock)));
289
290 // Restore the inline call frame's callee save registers.
291 // If this inlined frame is a tail call that will return back to the original caller, we need to
292 // copy the prior contents of the tag registers already saved for the outer frame to this frame.
293 jit.emitSaveOrCopyCalleeSavesFor(
294 baselineCodeBlock,
295 static_cast<VirtualRegister>(inlineCallFrame->stackOffset),
296 trueCaller ? AssemblyHelpers::UseExistingTagRegisterContents : AssemblyHelpers::CopyBaselineCalleeSavedRegistersFromBaseFrame,
297 GPRInfo::regT2);
298
299 if (callerIsLLInt) {
300 CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
301 jit.storePtr(CCallHelpers::TrustedImmPtr(baselineCodeBlockForCaller->metadataTable()), calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::metadataTableGPR));
302#if USE(JSVALUE64)
303 jit.storePtr(CCallHelpers::TrustedImmPtr(baselineCodeBlockForCaller->instructionsRawPointer()), calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::pbGPR));
304#endif
305 }
306
307 if (!inlineCallFrame->isVarargs())
308 jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->argumentCountIncludingThis), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
309#if USE(JSVALUE64)
310 jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
311 uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex()).bits();
312 jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
313 if (!inlineCallFrame->isClosureCall)
314 jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
315#else // USE(JSVALUE64) // so this is the 32-bit part
316 jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
317 const Instruction* instruction = baselineCodeBlock->instructions().at(codeOrigin->bytecodeIndex()).ptr();
318 uint32_t locationBits = CallSiteIndex(BytecodeIndex(bitwise_cast<uint32_t>(instruction))).bits();
319 jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
320 jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
321 if (!inlineCallFrame->isClosureCall)
322 jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
323#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
324 }
325
326 // Don't need to set the toplevel code origin if we only did inline tail calls
327 if (codeOrigin) {
328#if USE(JSVALUE64)
329 uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex()).bits();
330#else
331 const Instruction* instruction = jit.baselineCodeBlock()->instructions().at(codeOrigin->bytecodeIndex()).ptr();
332 uint32_t locationBits = CallSiteIndex(BytecodeIndex(bitwise_cast<uint32_t>(instruction))).bits();
333#endif
334 jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(CallFrameSlot::argumentCount)));
335 }
336}
337
338static void osrWriteBarrier(VM& vm, CCallHelpers& jit, GPRReg owner, GPRReg scratch)
339{
340 AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.barrierBranchWithoutFence(owner);
341
342 jit.setupArguments<decltype(operationOSRWriteBarrier)>(&vm, owner);
343 jit.prepareCallOperation(vm);
344 jit.move(MacroAssembler::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationOSRWriteBarrier)), scratch);
345 jit.call(scratch, OperationPtrTag);
346
347 ownerIsRememberedOrInEden.link(&jit);
348}
349
350void adjustAndJumpToTarget(VM& vm, CCallHelpers& jit, const OSRExitBase& exit)
351{
352 jit.memoryFence();
353
354 jit.move(
355 AssemblyHelpers::TrustedImmPtr(
356 jit.codeBlock()->baselineAlternative()), GPRInfo::argumentGPR1);
357 osrWriteBarrier(vm, jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
358
359 // We barrier all inlined frames -- and not just the current inline stack --
360 // because we don't know which inlined function owns the value profile that
361 // we'll update when we exit. In the case of "f() { a(); b(); }", if both
362 // a and b are inlined, we might exit inside b due to a bad value loaded
363 // from a.
364 // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns
365 // the value profile.
366 InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
367 if (inlineCallFrames) {
368 for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
369 jit.move(
370 AssemblyHelpers::TrustedImmPtr(
371 inlineCallFrame->baselineCodeBlock.get()), GPRInfo::argumentGPR1);
372 osrWriteBarrier(vm, jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
373 }
374 }
375
376 auto* exitInlineCallFrame = exit.m_codeOrigin.inlineCallFrame();
377 if (exitInlineCallFrame)
378 jit.addPtr(AssemblyHelpers::TrustedImm32(exitInlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
379
380 CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin);
381 ASSERT(codeBlockForExit == codeBlockForExit->baselineVersion());
382 ASSERT(JITCode::isBaselineCode(codeBlockForExit->jitType()));
383
384 void* jumpTarget;
385 bool exitToLLInt = Options::forceOSRExitToLLInt() || codeBlockForExit->jitType() == JITType::InterpreterThunk;
386 if (exitToLLInt) {
387 auto bytecodeIndex = exit.m_codeOrigin.bytecodeIndex();
388 const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeIndex).ptr();
389 MacroAssemblerCodePtr<JSEntryPtrTag> destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction);
390
391 if (exit.isExceptionHandler()) {
392 jit.move(CCallHelpers::TrustedImmPtr(&currentInstruction), GPRInfo::regT2);
393 jit.storePtr(GPRInfo::regT2, &vm.targetInterpreterPCForThrow);
394 }
395
396 jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->metadataTable()), LLInt::Registers::metadataTableGPR);
397#if USE(JSVALUE64)
398 jit.move(CCallHelpers::TrustedImmPtr(codeBlockForExit->instructionsRawPointer()), LLInt::Registers::pbGPR);
399 jit.move(CCallHelpers::TrustedImm32(bytecodeIndex.offset()), LLInt::Registers::pcGPR);
400#else
401 jit.move(CCallHelpers::TrustedImmPtr(&currentInstruction), LLInt::Registers::pcGPR);
402#endif
403 jumpTarget = destination.retagged<OSRExitPtrTag>().executableAddress();
404 } else {
405 CodeLocationLabel<JSEntryPtrTag> codeLocation = codeBlockForExit->jitCodeMap().find(exit.m_codeOrigin.bytecodeIndex());
406 ASSERT(codeLocation);
407
408 jumpTarget = codeLocation.retagged<OSRExitPtrTag>().executableAddress();
409 }
410
411 jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
412 if (exit.isExceptionHandler()) {
413 // Since we're jumping to op_catch, we need to set callFrameForCatch.
414 jit.storePtr(GPRInfo::callFrameRegister, vm.addressOfCallFrameForCatch());
415 }
416
417 jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
418 jit.farJump(GPRInfo::regT2, OSRExitPtrTag);
419}
420
421} } // namespace JSC::DFG
422
423#endif // ENABLE(DFG_JIT)
424
425