1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSRExit.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "AssemblyHelpers.h"
32#include "BytecodeUseDef.h"
33#include "ClonedArguments.h"
34#include "DFGGraph.h"
35#include "DFGMayExit.h"
36#include "DFGOSRExitCompilerCommon.h"
37#include "DFGOperations.h"
38#include "DFGSpeculativeJIT.h"
39#include "DirectArguments.h"
40#include "FrameTracers.h"
41#include "InlineCallFrame.h"
42#include "JSCInlines.h"
43#include "JSCJSValue.h"
44#include "OperandsInlines.h"
45#include "ProbeContext.h"
46#include "ProbeFrame.h"
47
48namespace JSC { namespace DFG {
49
50// Probe based OSR Exit.
51
52using CPUState = Probe::CPUState;
53using Context = Probe::Context;
54using Frame = Probe::Frame;
55
56static void reifyInlinedCallFrames(Probe::Context&, CodeBlock* baselineCodeBlock, const OSRExitBase&);
57static void adjustAndJumpToTarget(Probe::Context&, VM&, CodeBlock*, CodeBlock* baselineCodeBlock, OSRExit&);
58static void printOSRExit(Context&, uint32_t osrExitIndex, const OSRExit&);
59
60static JSValue jsValueFor(CPUState& cpu, JSValueSource source)
61{
62 if (source.isAddress()) {
63 JSValue result;
64 std::memcpy(&result, cpu.gpr<uint8_t*>(source.base()) + source.offset(), sizeof(JSValue));
65 return result;
66 }
67#if USE(JSVALUE64)
68 return JSValue::decode(cpu.gpr<EncodedJSValue>(source.gpr()));
69#else
70 if (source.hasKnownTag())
71 return JSValue(source.tag(), cpu.gpr<int32_t>(source.payloadGPR()));
72 return JSValue(cpu.gpr<int32_t>(source.tagGPR()), cpu.gpr<int32_t>(source.payloadGPR()));
73#endif
74}
75
76#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
77
78// Based on AssemblyHelpers::emitRestoreCalleeSavesFor().
79static void restoreCalleeSavesFor(Context& context, CodeBlock* codeBlock)
80{
81 ASSERT(codeBlock);
82
83 const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
84 RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
85 unsigned registerCount = calleeSaves->size();
86
87 UCPURegister* physicalStackFrame = context.fp<UCPURegister*>();
88 for (unsigned i = 0; i < registerCount; i++) {
89 RegisterAtOffset entry = calleeSaves->at(i);
90 if (dontRestoreRegisters.get(entry.reg()))
91 continue;
92 // The callee saved values come from the original stack, not the recovered stack.
93 // Hence, we read the values directly from the physical stack memory instead of
94 // going through context.stack().
95 ASSERT(!(entry.offset() % sizeof(UCPURegister)));
96 context.gpr(entry.reg().gpr()) = physicalStackFrame[entry.offset() / sizeof(UCPURegister)];
97 }
98}
99
100// Based on AssemblyHelpers::emitSaveCalleeSavesFor().
101static void saveCalleeSavesFor(Context& context, CodeBlock* codeBlock)
102{
103 auto& stack = context.stack();
104 ASSERT(codeBlock);
105
106 const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
107 RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
108 unsigned registerCount = calleeSaves->size();
109
110 for (unsigned i = 0; i < registerCount; i++) {
111 RegisterAtOffset entry = calleeSaves->at(i);
112 if (dontSaveRegisters.get(entry.reg()))
113 continue;
114 stack.set(context.fp(), entry.offset(), context.gpr<UCPURegister>(entry.reg().gpr()));
115 }
116}
117
118// Based on AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer().
119static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context& context)
120{
121 VM& vm = *context.arg<VM*>();
122
123 RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
124 RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
125 unsigned registerCount = allCalleeSaves->size();
126
127 VMEntryRecord* entryRecord = vmEntryRecord(vm.topEntryFrame);
128 UCPURegister* calleeSaveBuffer = reinterpret_cast<UCPURegister*>(entryRecord->calleeSaveRegistersBuffer);
129
130 // Restore all callee saves.
131 for (unsigned i = 0; i < registerCount; i++) {
132 RegisterAtOffset entry = allCalleeSaves->at(i);
133 if (dontRestoreRegisters.get(entry.reg()))
134 continue;
135 size_t uintptrOffset = entry.offset() / sizeof(UCPURegister);
136 if (entry.reg().isGPR())
137 context.gpr(entry.reg().gpr()) = calleeSaveBuffer[uintptrOffset];
138 else {
139#if USE(JSVALUE64)
140 context.fpr(entry.reg().fpr()) = bitwise_cast<double>(calleeSaveBuffer[uintptrOffset]);
141#else
142 // FIXME: <https://webkit.org/b/193275> support callee-saved floating point registers on 32-bit architectures
143 RELEASE_ASSERT_NOT_REACHED();
144#endif
145 }
146 }
147}
148
149// Based on AssemblyHelpers::copyCalleeSavesToVMEntryFrameCalleeSavesBuffer().
150static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context& context)
151{
152 VM& vm = *context.arg<VM*>();
153 auto& stack = context.stack();
154
155 VMEntryRecord* entryRecord = vmEntryRecord(vm.topEntryFrame);
156 void* calleeSaveBuffer = entryRecord->calleeSaveRegistersBuffer;
157
158 RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
159 RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
160 unsigned registerCount = allCalleeSaves->size();
161
162 for (unsigned i = 0; i < registerCount; i++) {
163 RegisterAtOffset entry = allCalleeSaves->at(i);
164 if (dontCopyRegisters.get(entry.reg()))
165 continue;
166 if (entry.reg().isGPR())
167 stack.set(calleeSaveBuffer, entry.offset(), context.gpr<UCPURegister>(entry.reg().gpr()));
168 else {
169#if USE(JSVALUE64)
170 stack.set(calleeSaveBuffer, entry.offset(), context.fpr<UCPURegister>(entry.reg().fpr()));
171#else
172 // FIXME: <https://webkit.org/b/193275> support callee-saved floating point registers on 32-bit architectures
173 RELEASE_ASSERT_NOT_REACHED();
174#endif
175 }
176 }
177}
178
179// Based on AssemblyHelpers::emitSaveOrCopyCalleeSavesFor().
180static void saveOrCopyCalleeSavesFor(Context& context, CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, bool wasCalledViaTailCall)
181{
182 Frame frame(context.fp(), context.stack());
183 ASSERT(codeBlock);
184
185 const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
186 RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
187 unsigned registerCount = calleeSaves->size();
188
189 RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
190
191 for (unsigned i = 0; i < registerCount; i++) {
192 RegisterAtOffset entry = calleeSaves->at(i);
193 if (dontSaveRegisters.get(entry.reg()))
194 continue;
195
196 uintptr_t savedRegisterValue;
197
198 if (wasCalledViaTailCall && baselineCalleeSaves.get(entry.reg()))
199 savedRegisterValue = frame.get<uintptr_t>(entry.offset());
200 else
201 savedRegisterValue = context.gpr(entry.reg().gpr());
202
203 frame.set(offsetVirtualRegister.offsetInBytes() + entry.offset(), savedRegisterValue);
204 }
205}
206#else // not NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
207
208static void restoreCalleeSavesFor(Context&, CodeBlock*) { }
209static void saveCalleeSavesFor(Context&, CodeBlock*) { }
210static void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(Context&) { }
211static void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(Context&) { }
212static void saveOrCopyCalleeSavesFor(Context&, CodeBlock*, VirtualRegister, bool) { }
213
214#endif // NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
215
216static JSCell* createDirectArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
217{
218 VM& vm = *context.arg<VM*>();
219
220 ASSERT(vm.heap.isDeferred());
221
222 if (inlineCallFrame)
223 codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
224
225 unsigned length = argumentCount - 1;
226 unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1));
227 DirectArguments* result = DirectArguments::create(
228 vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity);
229
230 result->setCallee(vm, callee);
231
232 void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
233 Frame frame(frameBase, context.stack());
234 for (unsigned i = length; i--;)
235 result->setIndexQuickly(vm, i, frame.argument(i));
236
237 return result;
238}
239
240static JSCell* createClonedArgumentsDuringExit(Context& context, CodeBlock* codeBlock, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount)
241{
242 VM& vm = *context.arg<VM*>();
243
244 ASSERT(vm.heap.isDeferred());
245
246 if (inlineCallFrame)
247 codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
248
249 JSGlobalObject* globalObject = codeBlock->globalObject();
250 unsigned length = argumentCount - 1;
251 ClonedArguments* result = ClonedArguments::createEmpty(
252 vm, globalObject->clonedArgumentsStructure(), callee, length);
253
254 void* frameBase = context.fp<Register*>() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0);
255 Frame frame(frameBase, context.stack());
256 for (unsigned i = length; i--;)
257 result->putDirectIndex(globalObject, i, frame.argument(i));
258 return result;
259}
260
261static void emitRestoreArguments(Context& context, CodeBlock* codeBlock, DFG::JITCode* dfgJITCode, const Operands<ValueRecovery>& operands)
262{
263 Frame frame(context.fp(), context.stack());
264
265 HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
266 for (size_t index = 0; index < operands.size(); ++index) {
267 const ValueRecovery& recovery = operands[index];
268 int operand = operands.operandForIndex(index);
269
270 if (recovery.technique() != DirectArgumentsThatWereNotCreated
271 && recovery.technique() != ClonedArgumentsThatWereNotCreated)
272 continue;
273
274 MinifiedID id = recovery.nodeID();
275 auto iter = alreadyAllocatedArguments.find(id);
276 if (iter != alreadyAllocatedArguments.end()) {
277 frame.setOperand(operand, frame.operand(iter->value));
278 continue;
279 }
280
281 InlineCallFrame* inlineCallFrame =
282 dfgJITCode->minifiedDFG.at(id)->inlineCallFrame();
283
284 int stackOffset;
285 if (inlineCallFrame)
286 stackOffset = inlineCallFrame->stackOffset;
287 else
288 stackOffset = 0;
289
290 JSFunction* callee;
291 if (!inlineCallFrame || inlineCallFrame->isClosureCall)
292 callee = jsCast<JSFunction*>(frame.operand(stackOffset + CallFrameSlot::callee).asCell());
293 else
294 callee = jsCast<JSFunction*>(inlineCallFrame->calleeRecovery.constant().asCell());
295
296 int32_t argumentCount;
297 if (!inlineCallFrame || inlineCallFrame->isVarargs())
298 argumentCount = frame.operand<int32_t>(stackOffset + CallFrameSlot::argumentCount, PayloadOffset);
299 else
300 argumentCount = inlineCallFrame->argumentCountIncludingThis;
301
302 JSCell* argumentsObject;
303 switch (recovery.technique()) {
304 case DirectArgumentsThatWereNotCreated:
305 argumentsObject = createDirectArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
306 break;
307 case ClonedArgumentsThatWereNotCreated:
308 argumentsObject = createClonedArgumentsDuringExit(context, codeBlock, inlineCallFrame, callee, argumentCount);
309 break;
310 default:
311 RELEASE_ASSERT_NOT_REACHED();
312 break;
313 }
314 frame.setOperand(operand, JSValue(argumentsObject));
315
316 alreadyAllocatedArguments.add(id, operand);
317 }
318}
319
320// The following is a list of extra initializations that need to be done in order
321// of most likely needed (lower enum value) to least likely needed (higher enum value).
322// Each level initialization includes the previous lower enum value (see use of the
323// extraInitializationLevel value below).
324enum class ExtraInitializationLevel {
325 None,
326 SpeculationRecovery,
327 ValueProfileUpdate,
328 ArrayProfileUpdate,
329 Other
330};
331
332void OSRExit::executeOSRExit(Context& context)
333{
334 VM& vm = *context.arg<VM*>();
335 auto scope = DECLARE_THROW_SCOPE(vm);
336
337 CallFrame* callFrame = context.fp<CallFrame*>();
338 ASSERT(&callFrame->deprecatedVM() == &vm);
339 auto& cpu = context.cpu;
340
341 if (validateDFGDoesGC) {
342 // We're about to exit optimized code. So, there's no longer any optimized
343 // code running that expects no GC.
344 vm.heap.setExpectDoesGC(true);
345 }
346
347 if (vm.callFrameForCatch) {
348 callFrame = vm.callFrameForCatch;
349 context.fp() = callFrame;
350 }
351
352 CodeBlock* codeBlock = callFrame->codeBlock();
353 ASSERT(codeBlock);
354 ASSERT(codeBlock->jitType() == JITType::DFGJIT);
355
356 // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
357 // really be profitable.
358 DeferGCForAWhile deferGC(vm.heap);
359
360 uint32_t exitIndex = vm.osrExitIndex;
361 DFG::JITCode* dfgJITCode = codeBlock->jitCode()->dfg();
362 OSRExit& exit = dfgJITCode->osrExit[exitIndex];
363
364 ASSERT(!vm.callFrameForCatch || exit.m_kind == GenericUnwind);
365 EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler());
366
367 if (UNLIKELY(!exit.exitState)) {
368 ExtraInitializationLevel extraInitializationLevel = ExtraInitializationLevel::None;
369
370 // We only need to execute this block once for each OSRExit record. The computed
371 // results will be cached in the OSRExitState record for use of the rest of the
372 // exit ramp code.
373
374 CodeBlock* baselineCodeBlock = codeBlock->baselineAlternative();
375 ASSERT(JITCode::isBaselineCode(baselineCodeBlock->jitType()));
376
377 SpeculationRecovery* recovery = nullptr;
378 if (exit.m_recoveryIndex != UINT_MAX) {
379 recovery = &dfgJITCode->speculationRecovery[exit.m_recoveryIndex];
380 extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::SpeculationRecovery);
381 }
382
383 if (UNLIKELY(exit.m_kind == GenericUnwind))
384 extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
385
386 ArrayProfile* arrayProfile = nullptr;
387 if (!!exit.m_jsValueSource) {
388 if (exit.m_valueProfile)
389 extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::ValueProfileUpdate);
390 if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
391 CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
392 CodeBlock* profiledCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock);
393 arrayProfile = profiledCodeBlock->getArrayProfile(codeOrigin.bytecodeIndex());
394 if (arrayProfile)
395 extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::ArrayProfileUpdate);
396 }
397 }
398
399 int32_t activeThreshold = baselineCodeBlock->adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp());
400 double adjustedThreshold = applyMemoryUsageHeuristicsAndConvertToInt(activeThreshold, baselineCodeBlock);
401 ASSERT(adjustedThreshold > 0);
402 adjustedThreshold = BaselineExecutionCounter::clippedThreshold(codeBlock->globalObject(), adjustedThreshold);
403
404 CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
405 bool exitToLLInt = Options::forceOSRExitToLLInt() || codeBlockForExit->jitType() == JITType::InterpreterThunk;
406 void* jumpTarget;
407 if (exitToLLInt) {
408 BytecodeIndex bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
409 const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeOffset).ptr();
410 MacroAssemblerCodePtr<JSEntryPtrTag> destination = LLInt::getCodePtr<JSEntryPtrTag>(currentInstruction);
411 jumpTarget = destination.executableAddress();
412 } else {
413 const JITCodeMap& codeMap = codeBlockForExit->jitCodeMap();
414 CodeLocationLabel<JSEntryPtrTag> codeLocation = codeMap.find(exit.m_codeOrigin.bytecodeIndex());
415 ASSERT(codeLocation);
416 jumpTarget = codeLocation.executableAddress();
417 }
418
419 // Compute the value recoveries.
420 Operands<ValueRecovery> operands;
421 Vector<UndefinedOperandSpan> undefinedOperandSpans;
422 dfgJITCode->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, dfgJITCode->minifiedDFG, exit.m_streamIndex, operands, &undefinedOperandSpans);
423 ptrdiff_t stackPointerOffset = -static_cast<ptrdiff_t>(codeBlock->jitCode()->dfgCommon()->requiredRegisterCountForExit) * sizeof(Register);
424
425 exit.exitState = adoptRef(new OSRExitState(exit, codeBlock, baselineCodeBlock, operands, WTFMove(undefinedOperandSpans), recovery, stackPointerOffset, activeThreshold, adjustedThreshold, jumpTarget, arrayProfile, exitToLLInt));
426
427 if (UNLIKELY(vm.m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation)) {
428 Profiler::Database& database = *vm.m_perBytecodeProfiler;
429 Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
430
431 Profiler::OSRExit* profilerExit = compilation->addOSRExit(
432 exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
433 exit.m_kind, exit.m_kind == UncountableInvalidation);
434 exit.exitState->profilerExit = profilerExit;
435 extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
436 }
437
438 if (UNLIKELY(Options::printEachOSRExit()))
439 extraInitializationLevel = std::max(extraInitializationLevel, ExtraInitializationLevel::Other);
440
441 exit.exitState->extraInitializationLevel = extraInitializationLevel;
442
443 if (UNLIKELY(Options::verboseOSR() || Options::verboseDFGOSRExit())) {
444 dataLogF("DFG OSR exit #%u (%s, %s) from %s, with operands = %s\n",
445 exitIndex, toCString(exit.m_codeOrigin).data(),
446 exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
447 toCString(ignoringContext<DumpContext>(operands)).data());
448 }
449 }
450
451 OSRExitState& exitState = *exit.exitState.get();
452 CodeBlock* baselineCodeBlock = exitState.baselineCodeBlock;
453 ASSERT(JITCode::isBaselineCode(baselineCodeBlock->jitType()));
454
455 Operands<ValueRecovery>& operands = exitState.operands;
456 Vector<UndefinedOperandSpan>& undefinedOperandSpans = exitState.undefinedOperandSpans;
457
458 context.sp() = context.fp<uint8_t*>() + exitState.stackPointerOffset;
459
460 // The only reason for using this do while loop is so we can break out midway when appropriate.
461 do {
462 auto extraInitializationLevel = static_cast<ExtraInitializationLevel>(exitState.extraInitializationLevel);
463
464 if (extraInitializationLevel == ExtraInitializationLevel::None)
465 break;
466
467 // Begin extra initilization level: SpeculationRecovery
468
469 // We need to do speculation recovery first because array profiling and value profiling
470 // may rely on a value that it recovers. However, that doesn't mean that it is likely
471 // to have a recovery value. So, we'll decorate it as UNLIKELY.
472 SpeculationRecovery* recovery = exitState.recovery;
473 if (UNLIKELY(recovery)) {
474 switch (recovery->type()) {
475 case SpeculativeAdd:
476 cpu.gpr(recovery->dest()) = cpu.gpr<uint32_t>(recovery->dest()) - cpu.gpr<uint32_t>(recovery->src());
477#if USE(JSVALUE64)
478 ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
479 cpu.gpr(recovery->dest()) |= JSValue::NumberTag;
480#endif
481 break;
482
483 case SpeculativeAddSelf:
484 cpu.gpr(recovery->dest()) = static_cast<uint32_t>(cpu.gpr<int32_t>(recovery->dest()) >> 1) ^ 0x80000000U;
485#if USE(JSVALUE64)
486 ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
487 cpu.gpr(recovery->dest()) |= JSValue::NumberTag;
488#endif
489 break;
490
491 case SpeculativeAddImmediate:
492 cpu.gpr(recovery->dest()) = (cpu.gpr<uint32_t>(recovery->dest()) - recovery->immediate());
493#if USE(JSVALUE64)
494 ASSERT(!(cpu.gpr(recovery->dest()) >> 32));
495 cpu.gpr(recovery->dest()) |= JSValue::NumberTag;
496#endif
497 break;
498
499 case BooleanSpeculationCheck:
500#if USE(JSVALUE64)
501 cpu.gpr(recovery->dest()) = cpu.gpr(recovery->dest()) ^ JSValue::ValueFalse;
502#endif
503 break;
504
505 default:
506 break;
507 }
508 }
509 if (extraInitializationLevel <= ExtraInitializationLevel::SpeculationRecovery)
510 break;
511
512 // Begin extra initilization level: ValueProfileUpdate
513 JSValue profiledValue;
514 if (!!exit.m_jsValueSource) {
515 profiledValue = jsValueFor(cpu, exit.m_jsValueSource);
516 if (MethodOfGettingAValueProfile profile = exit.m_valueProfile)
517 profile.reportValue(profiledValue);
518 }
519 if (extraInitializationLevel <= ExtraInitializationLevel::ValueProfileUpdate)
520 break;
521
522 // Begin extra initilization level: ArrayProfileUpdate
523 if (ArrayProfile* arrayProfile = exitState.arrayProfile) {
524 ASSERT(!!exit.m_jsValueSource);
525 ASSERT(exit.m_kind == BadCache || exit.m_kind == BadIndexingType);
526 CodeBlock* profiledCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOriginForExitProfile, baselineCodeBlock);
527 const Instruction* instruction = profiledCodeBlock->instructions().at(exit.m_codeOriginForExitProfile.bytecodeIndex()).ptr();
528 bool doProfile = !instruction->is<OpGetById>() || instruction->as<OpGetById>().metadata(profiledCodeBlock).m_modeMetadata.mode == GetByIdMode::ArrayLength;
529 if (doProfile) {
530 Structure* structure = profiledValue.asCell()->structure(vm);
531 arrayProfile->observeStructure(structure);
532 arrayProfile->observeArrayMode(arrayModesFromStructure(structure));
533 }
534 }
535 if (extraInitializationLevel <= ExtraInitializationLevel::ArrayProfileUpdate)
536 break;
537
538 // Begin Extra initilization level: Other
539 if (UNLIKELY(exit.m_kind == GenericUnwind)) {
540 // We are acting as a defacto op_catch because we arrive here from genericUnwind().
541 // So, we must restore our call frame and stack pointer.
542 restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(context);
543 ASSERT(context.fp() == vm.callFrameForCatch);
544 }
545
546 if (exitState.profilerExit)
547 exitState.profilerExit->incCount();
548
549 if (UNLIKELY(Options::printEachOSRExit()))
550 printOSRExit(context, vm.osrExitIndex, exit);
551
552 } while (false); // End extra initialization.
553
554 Frame frame(cpu.fp(), context.stack());
555 ASSERT(!(context.fp<uintptr_t>() & 0x7));
556
557#if USE(JSVALUE64)
558 ASSERT(cpu.gpr<int64_t>(GPRInfo::numberTagRegister) == JSValue::NumberTag);
559 ASSERT(cpu.gpr<int64_t>(GPRInfo::notCellMaskRegister) == JSValue::NotCellMask);
560#endif
561
562 // Do all data format conversions and store the results into the stack.
563 // Note: we need to recover values before restoring callee save registers below
564 // because the recovery may rely on values in some of callee save registers.
565
566 int calleeSaveSpaceAsVirtualRegisters = static_cast<int>(baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters());
567 size_t numberOfOperands = operands.size();
568 size_t numUndefinedOperandSpans = undefinedOperandSpans.size();
569
570 size_t nextUndefinedSpanIndex = 0;
571 size_t nextUndefinedOperandIndex = numberOfOperands;
572 if (numUndefinedOperandSpans)
573 nextUndefinedOperandIndex = undefinedOperandSpans[nextUndefinedSpanIndex].firstIndex;
574
575 JSValue undefined = jsUndefined();
576 for (size_t spanIndex = 0; spanIndex < numUndefinedOperandSpans; ++spanIndex) {
577 auto& span = undefinedOperandSpans[spanIndex];
578 int firstOffset = span.minOffset;
579 int lastOffset = firstOffset + span.numberOfRegisters;
580
581 for (int offset = firstOffset; offset < lastOffset; ++offset)
582 frame.setOperand(offset, undefined);
583 }
584
585 for (size_t index = 0; index < numberOfOperands; ++index) {
586 const ValueRecovery& recovery = operands[index];
587 VirtualRegister reg = operands.virtualRegisterForIndex(index);
588
589 if (UNLIKELY(index == nextUndefinedOperandIndex)) {
590 index += undefinedOperandSpans[nextUndefinedSpanIndex++].numberOfRegisters - 1;
591 if (nextUndefinedSpanIndex < numUndefinedOperandSpans)
592 nextUndefinedOperandIndex = undefinedOperandSpans[nextUndefinedSpanIndex].firstIndex;
593 else
594 nextUndefinedOperandIndex = numberOfOperands;
595 continue;
596 }
597
598 if (reg.isLocal() && reg.toLocal() < calleeSaveSpaceAsVirtualRegisters)
599 continue;
600
601 int operand = reg.offset();
602
603 switch (recovery.technique()) {
604 case DisplacedInJSStack:
605 frame.setOperand(operand, callFrame->r(recovery.virtualRegister()).asanUnsafeJSValue());
606 break;
607
608 case InFPR:
609 frame.setOperand(operand, cpu.fpr<JSValue>(recovery.fpr()));
610 break;
611
612#if USE(JSVALUE64)
613 case InGPR:
614 frame.setOperand(operand, cpu.gpr<JSValue>(recovery.gpr()));
615 break;
616#else
617 case InPair:
618 frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.tagGPR()), cpu.gpr<int32_t>(recovery.payloadGPR())));
619 break;
620#endif
621
622 case UnboxedCellInGPR:
623 frame.setOperand(operand, JSValue(cpu.gpr<JSCell*>(recovery.gpr())));
624 break;
625
626 case CellDisplacedInJSStack:
627 frame.setOperand(operand, JSValue(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedCell()));
628 break;
629
630#if USE(JSVALUE32_64)
631 case UnboxedBooleanInGPR:
632 frame.setOperand(operand, jsBoolean(cpu.gpr<bool>(recovery.gpr())));
633 break;
634#endif
635
636 case BooleanDisplacedInJSStack:
637#if USE(JSVALUE64)
638 frame.setOperand(operand, callFrame->r(recovery.virtualRegister()).asanUnsafeJSValue());
639#else
640 frame.setOperand(operand, jsBoolean(callFrame->r(recovery.virtualRegister()).asanUnsafeJSValue().payload()));
641#endif
642 break;
643
644 case UnboxedInt32InGPR:
645 frame.setOperand(operand, JSValue(cpu.gpr<int32_t>(recovery.gpr())));
646 break;
647
648 case Int32DisplacedInJSStack:
649 frame.setOperand(operand, JSValue(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedInt32()));
650 break;
651
652#if USE(JSVALUE64)
653 case UnboxedInt52InGPR:
654 frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr()) >> JSValue::int52ShiftAmount));
655 break;
656
657 case Int52DisplacedInJSStack:
658 frame.setOperand(operand, JSValue(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedInt52()));
659 break;
660
661 case UnboxedStrictInt52InGPR:
662 frame.setOperand(operand, JSValue(cpu.gpr<int64_t>(recovery.gpr())));
663 break;
664
665 case StrictInt52DisplacedInJSStack:
666 frame.setOperand(operand, JSValue(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedStrictInt52()));
667 break;
668#endif
669
670 case UnboxedDoubleInFPR:
671 frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(cpu.fpr(recovery.fpr()))));
672 break;
673
674 case DoubleDisplacedInJSStack:
675 frame.setOperand(operand, JSValue(JSValue::EncodeAsDouble, purifyNaN(callFrame->r(recovery.virtualRegister()).asanUnsafeUnboxedDouble())));
676 break;
677
678 case Constant:
679 frame.setOperand(operand, recovery.constant());
680 break;
681
682 case DirectArgumentsThatWereNotCreated:
683 case ClonedArgumentsThatWereNotCreated:
684 // Don't do this, yet.
685 break;
686
687 default:
688 RELEASE_ASSERT_NOT_REACHED();
689 break;
690 }
691 }
692
693 // Restore the DFG callee saves and then save the ones the baseline JIT uses.
694 restoreCalleeSavesFor(context, codeBlock);
695 saveCalleeSavesFor(context, baselineCodeBlock);
696
697#if USE(JSVALUE64)
698 cpu.gpr(GPRInfo::numberTagRegister) = static_cast<JSC::UCPURegister>(JSValue::NumberTag);
699 cpu.gpr(GPRInfo::notCellMaskRegister) = static_cast<JSC::UCPURegister>(JSValue::NotCellMask);
700#endif
701
702 if (exit.isExceptionHandler())
703 copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(context);
704
705 // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
706 // recoveries don't recursively refer to each other. But, we don't try to assume that they only
707 // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
708 // Note that we also roughly assume that the arguments might still be materialized outside of its
709 // inline call frame scope - but for now the DFG wouldn't do that.
710
711 DFG::emitRestoreArguments(context, codeBlock, dfgJITCode, operands);
712
713 // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
714 // that all new calls into this code will go to the new JIT, so the execute
715 // counter only affects call frames that performed OSR exit and call frames
716 // that were still executing the old JIT at the time of another call frame's
717 // OSR exit. We want to ensure that the following is true:
718 //
719 // (a) Code the performs an OSR exit gets a chance to reenter optimized
720 // code eventually, since optimized code is faster. But we don't
721 // want to do such reentery too aggressively (see (c) below).
722 //
723 // (b) If there is code on the call stack that is still running the old
724 // JIT's code and has never OSR'd, then it should get a chance to
725 // perform OSR entry despite the fact that we've exited.
726 //
727 // (c) Code the performs an OSR exit should not immediately retry OSR
728 // entry, since both forms of OSR are expensive. OSR entry is
729 // particularly expensive.
730 //
731 // (d) Frequent OSR failures, even those that do not result in the code
732 // running in a hot loop, result in recompilation getting triggered.
733 //
734 // To ensure (c), we'd like to set the execute counter to
735 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
736 // (a) and (b), since then every OSR exit would delay the opportunity for
737 // every call frame to perform OSR entry. Essentially, if OSR exit happens
738 // frequently and the function has few loops, then the counter will never
739 // become non-negative and OSR entry will never be triggered. OSR entry
740 // will only happen if a loop gets hot in the old JIT, which does a pretty
741 // good job of ensuring (a) and (b). But that doesn't take care of (d),
742 // since each speculation failure would reset the execute counter.
743 // So we check here if the number of speculation failures is significantly
744 // larger than the number of successes (we want 90% success rate), and if
745 // there have been a large enough number of failures. If so, we set the
746 // counter to 0; otherwise we set the counter to
747 // counterValueForOptimizeAfterWarmUp().
748
749 if (UNLIKELY(codeBlock->updateOSRExitCounterAndCheckIfNeedToReoptimize(exitState) == CodeBlock::OptimizeAction::ReoptimizeNow))
750 triggerReoptimizationNow(baselineCodeBlock, codeBlock, &exit);
751
752 reifyInlinedCallFrames(context, baselineCodeBlock, exit);
753 adjustAndJumpToTarget(context, vm, codeBlock, baselineCodeBlock, exit);
754}
755
756static void reifyInlinedCallFrames(Context& context, CodeBlock* outermostBaselineCodeBlock, const OSRExitBase& exit)
757{
758 auto& cpu = context.cpu;
759 Frame frame(cpu.fp(), context.stack());
760
761 // FIXME: We shouldn't leave holes on the stack when performing an OSR exit
762 // in presence of inlined tail calls.
763 // https://bugs.webkit.org/show_bug.cgi?id=147511
764 ASSERT(JITCode::isBaselineCode(outermostBaselineCodeBlock->jitType()));
765 frame.setOperand<CodeBlock*>(CallFrameSlot::codeBlock, outermostBaselineCodeBlock);
766
767 const CodeOrigin* codeOrigin;
768 for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame(); codeOrigin = codeOrigin->inlineCallFrame()->getCallerSkippingTailCalls()) {
769 InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame();
770 CodeBlock* baselineCodeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(*codeOrigin, outermostBaselineCodeBlock);
771 InlineCallFrame::Kind trueCallerCallKind;
772 CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
773 void* callerFrame = cpu.fp();
774
775 bool callerIsLLInt = false;
776
777 if (!trueCaller) {
778 ASSERT(inlineCallFrame->isTail());
779 void* returnPC = frame.get<void*>(CallFrame::returnPCOffset());
780#if CPU(ARM64E)
781 void* oldEntrySP = cpu.fp<uint8_t*>() + sizeof(CallerFrameAndPC);
782 void* newEntrySP = cpu.fp<uint8_t*>() + inlineCallFrame->returnPCOffset() + sizeof(void*);
783 returnPC = retagCodePtr(returnPC, bitwise_cast<PtrTag>(oldEntrySP), bitwise_cast<PtrTag>(newEntrySP));
784#endif
785 frame.set<void*>(inlineCallFrame->returnPCOffset(), returnPC);
786 callerFrame = frame.get<void*>(CallFrame::callerFrameOffset());
787 } else {
788 CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
789 BytecodeIndex callBytecodeIndex = trueCaller->bytecodeIndex();
790 void* jumpTarget = callerReturnPC(baselineCodeBlockForCaller, callBytecodeIndex, trueCallerCallKind, callerIsLLInt);
791
792 if (trueCaller->inlineCallFrame())
793 callerFrame = cpu.fp<uint8_t*>() + trueCaller->inlineCallFrame()->stackOffset * sizeof(EncodedJSValue);
794
795#if CPU(ARM64E)
796 void* newEntrySP = cpu.fp<uint8_t*>() + inlineCallFrame->returnPCOffset() + sizeof(void*);
797 jumpTarget = tagCodePtr(jumpTarget, bitwise_cast<PtrTag>(newEntrySP));
798#endif
799 frame.set<void*>(inlineCallFrame->returnPCOffset(), jumpTarget);
800 }
801
802 frame.setOperand<void*>(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock, baselineCodeBlock);
803
804 // Restore the inline call frame's callee save registers.
805 // If this inlined frame is a tail call that will return back to the original caller, we need to
806 // copy the prior contents of the tag registers already saved for the outer frame to this frame.
807 saveOrCopyCalleeSavesFor(context, baselineCodeBlock, VirtualRegister(inlineCallFrame->stackOffset), !trueCaller);
808
809 if (callerIsLLInt) {
810 CodeBlock* baselineCodeBlockForCaller = baselineCodeBlockForOriginAndBaselineCodeBlock(*trueCaller, outermostBaselineCodeBlock);
811 frame.set<const void*>(calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::metadataTableGPR).offset, baselineCodeBlockForCaller->metadataTable());
812#if USE(JSVALUE64)
813 frame.set<const void*>(calleeSaveSlot(inlineCallFrame, baselineCodeBlock, LLInt::Registers::pbGPR).offset, baselineCodeBlockForCaller->instructionsRawPointer());
814#endif
815 }
816
817 if (!inlineCallFrame->isVarargs())
818 frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, PayloadOffset, inlineCallFrame->argumentCountIncludingThis);
819 ASSERT(callerFrame);
820 frame.set<void*>(inlineCallFrame->callerFrameOffset(), callerFrame);
821#if USE(JSVALUE64)
822 uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex()).bits();
823 frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, TagOffset, locationBits);
824 if (!inlineCallFrame->isClosureCall)
825 frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, JSValue(inlineCallFrame->calleeConstant()));
826#else // USE(JSVALUE64) // so this is the 32-bit part
827 const Instruction* instruction = baselineCodeBlock->instructions().at(codeOrigin->bytecodeIndex()).ptr();
828 uint32_t locationBits = CallSiteIndex(BytecodeIndex(bitwise_cast<uint32_t>(instruction))).bits();
829 frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount, TagOffset, locationBits);
830 frame.setOperand<uint32_t>(inlineCallFrame->stackOffset + CallFrameSlot::callee, TagOffset, static_cast<uint32_t>(JSValue::CellTag));
831 if (!inlineCallFrame->isClosureCall)
832 frame.setOperand(inlineCallFrame->stackOffset + CallFrameSlot::callee, PayloadOffset, inlineCallFrame->calleeConstant());
833#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
834 }
835
836 // Don't need to set the toplevel code origin if we only did inline tail calls
837 if (codeOrigin) {
838#if USE(JSVALUE64)
839 uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex()).bits();
840#else
841 const Instruction* instruction = outermostBaselineCodeBlock->instructions().at(codeOrigin->bytecodeIndex()).ptr();
842 uint32_t locationBits = CallSiteIndex(BytecodeIndex(bitwise_cast<uint32_t>(instruction))).bits();
843#endif
844 frame.setOperand<uint32_t>(CallFrameSlot::argumentCount, TagOffset, locationBits);
845 }
846}
847
848static void adjustAndJumpToTarget(Context& context, VM& vm, CodeBlock* codeBlock, CodeBlock* baselineCodeBlock, OSRExit& exit)
849{
850 OSRExitState* exitState = exit.exitState.get();
851
852 WTF::storeLoadFence(); // The optimizing compiler expects that the OSR exit mechanism will execute this fence.
853 vm.heap.writeBarrier(baselineCodeBlock);
854
855 // We barrier all inlined frames -- and not just the current inline stack --
856 // because we don't know which inlined function owns the value profile that
857 // we'll update when we exit. In the case of "f() { a(); b(); }", if both
858 // a and b are inlined, we might exit inside b due to a bad value loaded
859 // from a.
860 // FIXME: MethodOfGettingAValueProfile should remember which CodeBlock owns
861 // the value profile.
862 InlineCallFrameSet* inlineCallFrames = codeBlock->jitCode()->dfgCommon()->inlineCallFrames.get();
863 if (inlineCallFrames) {
864 for (InlineCallFrame* inlineCallFrame : *inlineCallFrames)
865 vm.heap.writeBarrier(inlineCallFrame->baselineCodeBlock.get());
866 }
867
868 auto* exitInlineCallFrame = exit.m_codeOrigin.inlineCallFrame();
869 if (exitInlineCallFrame)
870 context.fp() = context.fp<uint8_t*>() + exitInlineCallFrame->stackOffset * sizeof(EncodedJSValue);
871
872 void* jumpTarget = exitState->jumpTarget;
873 ASSERT(jumpTarget);
874
875 if (exit.isExceptionHandler()) {
876 // Since we're jumping to op_catch, we need to set callFrameForCatch.
877 vm.callFrameForCatch = context.fp<CallFrame*>();
878 }
879
880 vm.topCallFrame = context.fp<CallFrame*>();
881
882 if (exitState->isJumpToLLInt) {
883 CodeBlock* codeBlockForExit = baselineCodeBlockForOriginAndBaselineCodeBlock(exit.m_codeOrigin, baselineCodeBlock);
884 BytecodeIndex bytecodeIndex = exit.m_codeOrigin.bytecodeIndex();
885 const Instruction& currentInstruction = *codeBlockForExit->instructions().at(bytecodeIndex).ptr();
886
887 context.gpr(LLInt::Registers::metadataTableGPR) = bitwise_cast<uintptr_t>(codeBlockForExit->metadataTable());
888#if USE(JSVALUE64)
889 context.gpr(LLInt::Registers::pbGPR) = bitwise_cast<uintptr_t>(codeBlockForExit->instructionsRawPointer());
890 context.gpr(LLInt::Registers::pcGPR) = static_cast<uintptr_t>(exit.m_codeOrigin.bytecodeIndex().offset());
891#else
892 context.gpr(LLInt::Registers::pcGPR) = bitwise_cast<uintptr_t>(&currentInstruction);
893#endif
894
895 if (exit.isExceptionHandler())
896 vm.targetInterpreterPCForThrow = &currentInstruction;
897 }
898
899 context.pc() = untagCodePtr<JSEntryPtrTag>(jumpTarget);
900}
901
902static void printOSRExit(Context& context, uint32_t osrExitIndex, const OSRExit& exit)
903{
904 CallFrame* callFrame = context.fp<CallFrame*>();
905 CodeBlock* codeBlock = callFrame->codeBlock();
906 CodeBlock* alternative = codeBlock->alternative();
907 ExitKind kind = exit.m_kind;
908 BytecodeIndex bytecodeOffset = exit.m_codeOrigin.bytecodeIndex();
909
910 dataLog("Speculation failure in ", *codeBlock);
911 dataLog(" @ exit #", osrExitIndex, " (", bytecodeOffset, ", ", exitKindToString(kind), ") with ");
912 if (alternative) {
913 dataLog(
914 "executeCounter = ", alternative->jitExecuteCounter(),
915 ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
916 ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
917 } else
918 dataLog("no alternative code block (i.e. we've been jettisoned)");
919 dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
920 dataLog(" GPRs at time of exit:");
921 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
922 GPRReg gpr = GPRInfo::toRegister(i);
923 dataLog(" ", context.gprName(gpr), ":", RawPointer(context.gpr<void*>(gpr)));
924 }
925 dataLog("\n");
926 dataLog(" FPRs at time of exit:");
927 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
928 FPRReg fpr = FPRInfo::toRegister(i);
929 dataLog(" ", context.fprName(fpr), ":");
930 uint64_t bits = context.fpr<uint64_t>(fpr);
931 double value = context.fpr(fpr);
932 dataLogF("%llx:%lf", static_cast<long long>(bits), value);
933 }
934 dataLog("\n");
935}
936
937// JIT based OSR Exit.
938
939OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex)
940 : OSRExitBase(kind, jit->m_origin.forExit, jit->m_origin.semantic, jit->m_origin.wasHoisted)
941 , m_jsValueSource(jsValueSource)
942 , m_valueProfile(valueProfile)
943 , m_recoveryIndex(recoveryIndex)
944 , m_streamIndex(streamIndex)
945{
946 bool canExit = jit->m_origin.exitOK;
947 if (!canExit && jit->m_currentNode) {
948 ExitMode exitMode = mayExit(jit->m_jit.graph(), jit->m_currentNode);
949 canExit = exitMode == ExitMode::Exits || exitMode == ExitMode::ExitsForExceptions;
950 }
951 DFG_ASSERT(jit->m_jit.graph(), jit->m_currentNode, canExit);
952}
953
954CodeLocationJump<JSInternalPtrTag> OSRExit::codeLocationForRepatch() const
955{
956 return CodeLocationJump<JSInternalPtrTag>(m_patchableJumpLocation);
957}
958
959void OSRExit::emitRestoreArguments(CCallHelpers& jit, VM& vm, const Operands<ValueRecovery>& operands)
960{
961 HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand.
962 for (size_t index = 0; index < operands.size(); ++index) {
963 const ValueRecovery& recovery = operands[index];
964 int operand = operands.operandForIndex(index);
965
966 if (recovery.technique() != DirectArgumentsThatWereNotCreated
967 && recovery.technique() != ClonedArgumentsThatWereNotCreated)
968 continue;
969
970 MinifiedID id = recovery.nodeID();
971 auto iter = alreadyAllocatedArguments.find(id);
972 if (iter != alreadyAllocatedArguments.end()) {
973 JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1);
974 jit.loadValue(CCallHelpers::addressFor(iter->value), regs);
975 jit.storeValue(regs, CCallHelpers::addressFor(operand));
976 continue;
977 }
978
979 InlineCallFrame* inlineCallFrame =
980 jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame();
981
982 int stackOffset;
983 if (inlineCallFrame)
984 stackOffset = inlineCallFrame->stackOffset;
985 else
986 stackOffset = 0;
987
988 if (!inlineCallFrame || inlineCallFrame->isClosureCall) {
989 jit.loadPtr(
990 AssemblyHelpers::addressFor(stackOffset + CallFrameSlot::callee),
991 GPRInfo::regT0);
992 } else {
993 jit.move(
994 AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()),
995 GPRInfo::regT0);
996 }
997
998 if (!inlineCallFrame || inlineCallFrame->isVarargs()) {
999 jit.load32(
1000 AssemblyHelpers::payloadFor(stackOffset + CallFrameSlot::argumentCount),
1001 GPRInfo::regT1);
1002 } else {
1003 jit.move(
1004 AssemblyHelpers::TrustedImm32(inlineCallFrame->argumentCountIncludingThis),
1005 GPRInfo::regT1);
1006 }
1007
1008 static_assert(std::is_same<decltype(operationCreateDirectArgumentsDuringExit), decltype(operationCreateClonedArgumentsDuringExit)>::value, "We assume these functions have the same signature below.");
1009 jit.setupArguments<decltype(operationCreateDirectArgumentsDuringExit)>(
1010 AssemblyHelpers::TrustedImmPtr(&vm), AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1);
1011 jit.prepareCallOperation(vm);
1012 switch (recovery.technique()) {
1013 case DirectArgumentsThatWereNotCreated:
1014 jit.move(AssemblyHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0);
1015 break;
1016 case ClonedArgumentsThatWereNotCreated:
1017 jit.move(AssemblyHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0);
1018 break;
1019 default:
1020 RELEASE_ASSERT_NOT_REACHED();
1021 break;
1022 }
1023 jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
1024 jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand));
1025
1026 alreadyAllocatedArguments.add(id, operand);
1027 }
1028}
1029
1030void JIT_OPERATION operationCompileOSRExit(CallFrame* callFrame)
1031{
1032 VM& vm = callFrame->deprecatedVM();
1033 auto scope = DECLARE_THROW_SCOPE(vm);
1034
1035 if (validateDFGDoesGC) {
1036 // We're about to exit optimized code. So, there's no longer any optimized
1037 // code running that expects no GC.
1038 vm.heap.setExpectDoesGC(true);
1039 }
1040
1041 if (vm.callFrameForCatch)
1042 RELEASE_ASSERT(vm.callFrameForCatch == callFrame);
1043
1044 CodeBlock* codeBlock = callFrame->codeBlock();
1045 ASSERT(codeBlock);
1046 ASSERT(codeBlock->jitType() == JITType::DFGJIT);
1047
1048 // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
1049 // really be profitable.
1050 DeferGCForAWhile deferGC(vm.heap);
1051
1052 uint32_t exitIndex = vm.osrExitIndex;
1053 OSRExit& exit = codeBlock->jitCode()->dfg()->osrExit[exitIndex];
1054
1055 ASSERT(!vm.callFrameForCatch || exit.m_kind == GenericUnwind);
1056 EXCEPTION_ASSERT_UNUSED(scope, !!scope.exception() || !exit.isExceptionHandler());
1057
1058 // Compute the value recoveries.
1059 Operands<ValueRecovery> operands;
1060 codeBlock->jitCode()->dfg()->variableEventStream.reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->jitCode()->dfg()->minifiedDFG, exit.m_streamIndex, operands);
1061
1062 SpeculationRecovery* recovery = 0;
1063 if (exit.m_recoveryIndex != UINT_MAX)
1064 recovery = &codeBlock->jitCode()->dfg()->speculationRecovery[exit.m_recoveryIndex];
1065
1066 {
1067 CCallHelpers jit(codeBlock);
1068
1069 if (exit.m_kind == GenericUnwind) {
1070 // We are acting as a defacto op_catch because we arrive here from genericUnwind().
1071 // So, we must restore our call frame and stack pointer.
1072 jit.restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
1073 jit.loadPtr(vm.addressOfCallFrameForCatch(), GPRInfo::callFrameRegister);
1074 }
1075 jit.addPtr(
1076 CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)),
1077 GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
1078
1079 jit.jitAssertHasValidCallFrame();
1080
1081 if (UNLIKELY(vm.m_perBytecodeProfiler && codeBlock->jitCode()->dfgCommon()->compilation)) {
1082 Profiler::Database& database = *vm.m_perBytecodeProfiler;
1083 Profiler::Compilation* compilation = codeBlock->jitCode()->dfgCommon()->compilation.get();
1084
1085 Profiler::OSRExit* profilerExit = compilation->addOSRExit(
1086 exitIndex, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
1087 exit.m_kind, exit.m_kind == UncountableInvalidation);
1088 jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
1089 }
1090
1091 OSRExit::compileExit(jit, vm, exit, operands, recovery);
1092
1093 LinkBuffer patchBuffer(jit, codeBlock);
1094 exit.m_code = FINALIZE_CODE_IF(
1095 shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseDFGOSRExit(),
1096 patchBuffer, OSRExitPtrTag,
1097 "DFG OSR exit #%u (%s, %s) from %s, with operands = %s",
1098 exitIndex, toCString(exit.m_codeOrigin).data(),
1099 exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
1100 toCString(ignoringContext<DumpContext>(operands)).data());
1101 }
1102
1103 MacroAssembler::repatchJump(exit.codeLocationForRepatch(), CodeLocationLabel<OSRExitPtrTag>(exit.m_code.code()));
1104
1105 vm.osrExitJumpDestination = exit.m_code.code().executableAddress();
1106}
1107
1108void OSRExit::compileExit(CCallHelpers& jit, VM& vm, const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
1109{
1110 jit.jitAssertTagsInPlace();
1111
1112 // Pro-forma stuff.
1113 if (Options::printEachOSRExit()) {
1114 SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
1115 debugInfo->codeBlock = jit.codeBlock();
1116 debugInfo->kind = exit.m_kind;
1117 debugInfo->bytecodeIndex = exit.m_codeOrigin.bytecodeIndex();
1118
1119 jit.debugCall(vm, operationDebugPrintSpeculationFailure, debugInfo);
1120 }
1121
1122 // Perform speculation recovery. This only comes into play when an operation
1123 // starts mutating state before verifying the speculation it has already made.
1124
1125 if (recovery) {
1126 switch (recovery->type()) {
1127 case SpeculativeAdd:
1128 jit.sub32(recovery->src(), recovery->dest());
1129#if USE(JSVALUE64)
1130 jit.or64(GPRInfo::numberTagRegister, recovery->dest());
1131#endif
1132 break;
1133
1134 case SpeculativeAddSelf:
1135 // If A + A = A (int32_t) overflows, A can be recovered by ((static_cast<int32_t>(A) >> 1) ^ 0x8000000).
1136 jit.rshift32(AssemblyHelpers::TrustedImm32(1), recovery->dest());
1137 jit.xor32(AssemblyHelpers::TrustedImm32(0x80000000), recovery->dest());
1138#if USE(JSVALUE64)
1139 jit.or64(GPRInfo::numberTagRegister, recovery->dest());
1140#endif
1141 break;
1142
1143 case SpeculativeAddImmediate:
1144 jit.sub32(AssemblyHelpers::Imm32(recovery->immediate()), recovery->dest());
1145#if USE(JSVALUE64)
1146 jit.or64(GPRInfo::numberTagRegister, recovery->dest());
1147#endif
1148 break;
1149
1150 case BooleanSpeculationCheck:
1151#if USE(JSVALUE64)
1152 jit.xor64(AssemblyHelpers::TrustedImm32(JSValue::ValueFalse), recovery->dest());
1153#endif
1154 break;
1155
1156 default:
1157 break;
1158 }
1159 }
1160
1161 // Refine some array and/or value profile, if appropriate.
1162
1163 if (!!exit.m_jsValueSource) {
1164 if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
1165 // If the instruction that this originated from has an array profile, then
1166 // refine it. If it doesn't, then do nothing. The latter could happen for
1167 // hoisted checks, or checks emitted for operations that didn't have array
1168 // profiling - either ops that aren't array accesses at all, or weren't
1169 // known to be array acceses in the bytecode. The latter case is a FIXME
1170 // while the former case is an outcome of a CheckStructure not knowing why
1171 // it was emitted (could be either due to an inline cache of a property
1172 // property access, or due to an array profile).
1173
1174 CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
1175 CodeBlock* codeBlock = jit.baselineCodeBlockFor(codeOrigin);
1176 if (ArrayProfile* arrayProfile = codeBlock->getArrayProfile(codeOrigin.bytecodeIndex())) {
1177 const Instruction* instruction = codeBlock->instructions().at(codeOrigin.bytecodeIndex()).ptr();
1178 CCallHelpers::Jump skipProfile;
1179 if (instruction->is<OpGetById>()) {
1180 auto& metadata = instruction->as<OpGetById>().metadata(codeBlock);
1181 skipProfile = jit.branch8(CCallHelpers::NotEqual, CCallHelpers::AbsoluteAddress(&metadata.m_modeMetadata.mode), CCallHelpers::TrustedImm32(static_cast<uint8_t>(GetByIdMode::ArrayLength)));
1182 }
1183
1184#if USE(JSVALUE64)
1185 GPRReg usedRegister;
1186 if (exit.m_jsValueSource.isAddress())
1187 usedRegister = exit.m_jsValueSource.base();
1188 else
1189 usedRegister = exit.m_jsValueSource.gpr();
1190#else
1191 GPRReg usedRegister1;
1192 GPRReg usedRegister2;
1193 if (exit.m_jsValueSource.isAddress()) {
1194 usedRegister1 = exit.m_jsValueSource.base();
1195 usedRegister2 = InvalidGPRReg;
1196 } else {
1197 usedRegister1 = exit.m_jsValueSource.payloadGPR();
1198 if (exit.m_jsValueSource.hasKnownTag())
1199 usedRegister2 = InvalidGPRReg;
1200 else
1201 usedRegister2 = exit.m_jsValueSource.tagGPR();
1202 }
1203#endif
1204
1205 GPRReg scratch1;
1206 GPRReg scratch2;
1207#if USE(JSVALUE64)
1208 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister);
1209 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister, scratch1);
1210#else
1211 scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
1212 scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
1213#endif
1214
1215 if (isARM64()) {
1216 jit.pushToSave(scratch1);
1217 jit.pushToSave(scratch2);
1218 } else {
1219 jit.push(scratch1);
1220 jit.push(scratch2);
1221 }
1222
1223 GPRReg value;
1224 if (exit.m_jsValueSource.isAddress()) {
1225 value = scratch1;
1226 jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
1227 } else
1228 value = exit.m_jsValueSource.payloadGPR();
1229
1230 jit.load32(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
1231 jit.store32(scratch1, arrayProfile->addressOfLastSeenStructureID());
1232
1233 jit.load8(AssemblyHelpers::Address(value, JSCell::typeInfoTypeOffset()), scratch2);
1234 jit.sub32(AssemblyHelpers::TrustedImm32(FirstTypedArrayType), scratch2);
1235 auto notTypedArray = jit.branch32(MacroAssembler::AboveOrEqual, scratch2, AssemblyHelpers::TrustedImm32(NumberOfTypedArrayTypesExcludingDataView));
1236 jit.move(AssemblyHelpers::TrustedImmPtr(typedArrayModes), scratch1);
1237 jit.load32(AssemblyHelpers::BaseIndex(scratch1, scratch2, AssemblyHelpers::TimesFour), scratch2);
1238 auto storeArrayModes = jit.jump();
1239
1240 notTypedArray.link(&jit);
1241#if USE(JSVALUE64)
1242 jit.load8(AssemblyHelpers::Address(value, JSCell::indexingTypeAndMiscOffset()), scratch1);
1243#else
1244 jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingModeIncludingHistoryOffset()), scratch1);
1245#endif
1246 jit.and32(AssemblyHelpers::TrustedImm32(IndexingModeMask), scratch1);
1247 jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
1248 jit.lshift32(scratch1, scratch2);
1249 storeArrayModes.link(&jit);
1250 jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
1251
1252 if (isARM64()) {
1253 jit.popToRestore(scratch2);
1254 jit.popToRestore(scratch1);
1255 } else {
1256 jit.pop(scratch2);
1257 jit.pop(scratch1);
1258 }
1259
1260 if (skipProfile.isSet())
1261 skipProfile.link(&jit);
1262 }
1263 }
1264
1265 if (MethodOfGettingAValueProfile profile = exit.m_valueProfile) {
1266#if USE(JSVALUE64)
1267 if (exit.m_jsValueSource.isAddress()) {
1268 // We can't be sure that we have a spare register. So use the numberTagRegister,
1269 // since we know how to restore it.
1270 jit.load64(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::numberTagRegister);
1271 profile.emitReportValue(jit, JSValueRegs(GPRInfo::numberTagRegister));
1272 jit.move(AssemblyHelpers::TrustedImm64(JSValue::NumberTag), GPRInfo::numberTagRegister);
1273 } else
1274 profile.emitReportValue(jit, JSValueRegs(exit.m_jsValueSource.gpr()));
1275#else // not USE(JSVALUE64)
1276 if (exit.m_jsValueSource.isAddress()) {
1277 // Save a register so we can use it.
1278 GPRReg scratchPayload = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
1279 GPRReg scratchTag = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base(), scratchPayload);
1280 jit.pushToSave(scratchPayload);
1281 jit.pushToSave(scratchTag);
1282
1283 JSValueRegs scratch(scratchTag, scratchPayload);
1284
1285 jit.loadValue(exit.m_jsValueSource.asAddress(), scratch);
1286 profile.emitReportValue(jit, scratch);
1287
1288 jit.popToRestore(scratchTag);
1289 jit.popToRestore(scratchPayload);
1290 } else if (exit.m_jsValueSource.hasKnownTag()) {
1291 GPRReg scratchTag = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.payloadGPR());
1292 jit.pushToSave(scratchTag);
1293 jit.move(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), scratchTag);
1294 JSValueRegs value(scratchTag, exit.m_jsValueSource.payloadGPR());
1295 profile.emitReportValue(jit, value);
1296 jit.popToRestore(scratchTag);
1297 } else
1298 profile.emitReportValue(jit, exit.m_jsValueSource.regs());
1299#endif // USE(JSVALUE64)
1300 }
1301 }
1302
1303 // What follows is an intentionally simple OSR exit implementation that generates
1304 // fairly poor code but is very easy to hack. In particular, it dumps all state that
1305 // needs conversion into a scratch buffer so that in step 6, where we actually do the
1306 // conversions, we know that all temp registers are free to use and the variable is
1307 // definitely in a well-known spot in the scratch buffer regardless of whether it had
1308 // originally been in a register or spilled. This allows us to decouple "where was
1309 // the variable" from "how was it represented". Consider that the
1310 // Int32DisplacedInJSStack recovery: it tells us that the value is in a
1311 // particular place and that that place holds an unboxed int32. We have two different
1312 // places that a value could be (displaced, register) and a bunch of different
1313 // ways of representing a value. The number of recoveries is two * a bunch. The code
1314 // below means that we have to have two + a bunch cases rather than two * a bunch.
1315 // Once we have loaded the value from wherever it was, the reboxing is the same
1316 // regardless of its location. Likewise, before we do the reboxing, the way we get to
1317 // the value (i.e. where we load it from) is the same regardless of its type. Because
1318 // the code below always dumps everything into a scratch buffer first, the two
1319 // questions become orthogonal, which simplifies adding new types and adding new
1320 // locations.
1321 //
1322 // This raises the question: does using such a suboptimal implementation of OSR exit,
1323 // where we always emit code to dump all state into a scratch buffer only to then
1324 // dump it right back into the stack, hurt us in any way? The asnwer is that OSR exits
1325 // are rare. Our tiering strategy ensures this. This is because if an OSR exit is
1326 // taken more than ~100 times, we jettison the DFG code block along with all of its
1327 // exits. It is impossible for an OSR exit - i.e. the code we compile below - to
1328 // execute frequently enough for the codegen to matter that much. It probably matters
1329 // enough that we don't want to turn this into some super-slow function call, but so
1330 // long as we're generating straight-line code, that code can be pretty bad. Also
1331 // because we tend to exit only along one OSR exit from any DFG code block - that's an
1332 // empirical result that we're extremely confident about - the code size of this
1333 // doesn't matter much. Hence any attempt to optimize the codegen here is just purely
1334 // harmful to the system: it probably won't reduce either net memory usage or net
1335 // execution time. It will only prevent us from cleanly decoupling "where was the
1336 // variable" from "how was it represented", which will make it more difficult to add
1337 // features in the future and it will make it harder to reason about bugs.
1338
1339 // Save all state from GPRs into the scratch buffer.
1340
1341 ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
1342 EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
1343
1344 for (size_t index = 0; index < operands.size(); ++index) {
1345 const ValueRecovery& recovery = operands[index];
1346
1347 switch (recovery.technique()) {
1348 case UnboxedInt32InGPR:
1349 case UnboxedCellInGPR:
1350#if USE(JSVALUE64)
1351 case InGPR:
1352 case UnboxedInt52InGPR:
1353 case UnboxedStrictInt52InGPR:
1354 jit.store64(recovery.gpr(), scratch + index);
1355 break;
1356#else
1357 case UnboxedBooleanInGPR:
1358 jit.store32(
1359 recovery.gpr(),
1360 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
1361 break;
1362
1363 case InPair:
1364 jit.store32(
1365 recovery.tagGPR(),
1366 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
1367 jit.store32(
1368 recovery.payloadGPR(),
1369 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
1370 break;
1371#endif
1372
1373 default:
1374 break;
1375 }
1376 }
1377
1378 // And voila, all GPRs are free to reuse.
1379
1380 // Save all state from FPRs into the scratch buffer.
1381
1382 for (size_t index = 0; index < operands.size(); ++index) {
1383 const ValueRecovery& recovery = operands[index];
1384
1385 switch (recovery.technique()) {
1386 case UnboxedDoubleInFPR:
1387 case InFPR:
1388 jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
1389 jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0));
1390 break;
1391
1392 default:
1393 break;
1394 }
1395 }
1396
1397 // Now, all FPRs are also free.
1398
1399 // Save all state from the stack into the scratch buffer. For simplicity we
1400 // do this even for state that's already in the right place on the stack.
1401 // It makes things simpler later.
1402
1403 for (size_t index = 0; index < operands.size(); ++index) {
1404 const ValueRecovery& recovery = operands[index];
1405
1406 switch (recovery.technique()) {
1407 case DisplacedInJSStack:
1408 case CellDisplacedInJSStack:
1409 case BooleanDisplacedInJSStack:
1410 case Int32DisplacedInJSStack:
1411 case DoubleDisplacedInJSStack:
1412#if USE(JSVALUE64)
1413 case Int52DisplacedInJSStack:
1414 case StrictInt52DisplacedInJSStack:
1415 jit.load64(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0);
1416 jit.store64(GPRInfo::regT0, scratch + index);
1417 break;
1418#else
1419 jit.load32(
1420 AssemblyHelpers::tagFor(recovery.virtualRegister()),
1421 GPRInfo::regT0);
1422 jit.load32(
1423 AssemblyHelpers::payloadFor(recovery.virtualRegister()),
1424 GPRInfo::regT1);
1425 jit.store32(
1426 GPRInfo::regT0,
1427 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag);
1428 jit.store32(
1429 GPRInfo::regT1,
1430 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload);
1431 break;
1432#endif
1433
1434 default:
1435 break;
1436 }
1437 }
1438
1439 if (validateDFGDoesGC) {
1440 // We're about to exit optimized code. So, there's no longer any optimized
1441 // code running that expects no GC. We need to set this before arguments
1442 // materialization below (see emitRestoreArguments()).
1443
1444 // Even though we set Heap::m_expectDoesGC in compileOSRExit(), we also need
1445 // to set it here because compileOSRExit() is only called on the first time
1446 // we exit from this site, but all subsequent exits will take this compiled
1447 // ramp without calling compileOSRExit() first.
1448 jit.store8(CCallHelpers::TrustedImm32(true), vm.heap.addressOfExpectDoesGC());
1449 }
1450
1451 // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
1452 // could toast some stack that the DFG used. We need to do it before storing to stack offsets
1453 // used by baseline.
1454 jit.addPtr(
1455 CCallHelpers::TrustedImm32(
1456 -jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
1457 CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
1458
1459 // Restore the DFG callee saves and then save the ones the baseline JIT uses.
1460 jit.emitRestoreCalleeSaves();
1461 jit.emitSaveCalleeSavesFor(jit.baselineCodeBlock());
1462
1463 // The tag registers are needed to materialize recoveries below.
1464 jit.emitMaterializeTagCheckRegisters();
1465
1466 if (exit.isExceptionHandler())
1467 jit.copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
1468
1469 // Do all data format conversions and store the results into the stack.
1470
1471 for (size_t index = 0; index < operands.size(); ++index) {
1472 const ValueRecovery& recovery = operands[index];
1473 VirtualRegister reg = operands.virtualRegisterForIndex(index);
1474
1475 if (reg.isLocal() && reg.toLocal() < static_cast<int>(jit.baselineCodeBlock()->calleeSaveSpaceAsVirtualRegisters()))
1476 continue;
1477
1478 int operand = reg.offset();
1479
1480 switch (recovery.technique()) {
1481 case DisplacedInJSStack:
1482 case InFPR:
1483#if USE(JSVALUE64)
1484 case InGPR:
1485 case UnboxedCellInGPR:
1486 case CellDisplacedInJSStack:
1487 case BooleanDisplacedInJSStack:
1488 jit.load64(scratch + index, GPRInfo::regT0);
1489 jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1490 break;
1491#else // not USE(JSVALUE64)
1492 case InPair:
1493 jit.load32(
1494 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag,
1495 GPRInfo::regT0);
1496 jit.load32(
1497 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
1498 GPRInfo::regT1);
1499 jit.store32(
1500 GPRInfo::regT0,
1501 AssemblyHelpers::tagFor(operand));
1502 jit.store32(
1503 GPRInfo::regT1,
1504 AssemblyHelpers::payloadFor(operand));
1505 break;
1506
1507 case UnboxedCellInGPR:
1508 case CellDisplacedInJSStack:
1509 jit.load32(
1510 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
1511 GPRInfo::regT0);
1512 jit.store32(
1513 AssemblyHelpers::TrustedImm32(JSValue::CellTag),
1514 AssemblyHelpers::tagFor(operand));
1515 jit.store32(
1516 GPRInfo::regT0,
1517 AssemblyHelpers::payloadFor(operand));
1518 break;
1519
1520 case UnboxedBooleanInGPR:
1521 case BooleanDisplacedInJSStack:
1522 jit.load32(
1523 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
1524 GPRInfo::regT0);
1525 jit.store32(
1526 AssemblyHelpers::TrustedImm32(JSValue::BooleanTag),
1527 AssemblyHelpers::tagFor(operand));
1528 jit.store32(
1529 GPRInfo::regT0,
1530 AssemblyHelpers::payloadFor(operand));
1531 break;
1532#endif // USE(JSVALUE64)
1533
1534 case UnboxedInt32InGPR:
1535 case Int32DisplacedInJSStack:
1536#if USE(JSVALUE64)
1537 jit.load64(scratch + index, GPRInfo::regT0);
1538 jit.zeroExtend32ToPtr(GPRInfo::regT0, GPRInfo::regT0);
1539 jit.or64(GPRInfo::numberTagRegister, GPRInfo::regT0);
1540 jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1541#else
1542 jit.load32(
1543 &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.payload,
1544 GPRInfo::regT0);
1545 jit.store32(
1546 AssemblyHelpers::TrustedImm32(JSValue::Int32Tag),
1547 AssemblyHelpers::tagFor(operand));
1548 jit.store32(
1549 GPRInfo::regT0,
1550 AssemblyHelpers::payloadFor(operand));
1551#endif
1552 break;
1553
1554#if USE(JSVALUE64)
1555 case UnboxedInt52InGPR:
1556 case Int52DisplacedInJSStack:
1557 jit.load64(scratch + index, GPRInfo::regT0);
1558 jit.rshift64(
1559 AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), GPRInfo::regT0);
1560 jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
1561 jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1562 break;
1563
1564 case UnboxedStrictInt52InGPR:
1565 case StrictInt52DisplacedInJSStack:
1566 jit.load64(scratch + index, GPRInfo::regT0);
1567 jit.boxInt52(GPRInfo::regT0, GPRInfo::regT0, GPRInfo::regT1, FPRInfo::fpRegT0);
1568 jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1569 break;
1570#endif
1571
1572 case UnboxedDoubleInFPR:
1573 case DoubleDisplacedInJSStack:
1574 jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
1575 jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
1576 jit.purifyNaN(FPRInfo::fpRegT0);
1577#if USE(JSVALUE64)
1578 jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0);
1579 jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
1580#else
1581 jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand));
1582#endif
1583 break;
1584
1585 case Constant:
1586#if USE(JSVALUE64)
1587 jit.store64(
1588 AssemblyHelpers::TrustedImm64(JSValue::encode(recovery.constant())),
1589 AssemblyHelpers::addressFor(operand));
1590#else
1591 jit.store32(
1592 AssemblyHelpers::TrustedImm32(recovery.constant().tag()),
1593 AssemblyHelpers::tagFor(operand));
1594 jit.store32(
1595 AssemblyHelpers::TrustedImm32(recovery.constant().payload()),
1596 AssemblyHelpers::payloadFor(operand));
1597#endif
1598 break;
1599
1600 case DirectArgumentsThatWereNotCreated:
1601 case ClonedArgumentsThatWereNotCreated:
1602 // Don't do this, yet.
1603 break;
1604
1605 default:
1606 RELEASE_ASSERT_NOT_REACHED();
1607 break;
1608 }
1609 }
1610
1611 // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
1612 // recoveries don't recursively refer to each other. But, we don't try to assume that they only
1613 // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
1614 // Note that we also roughly assume that the arguments might still be materialized outside of its
1615 // inline call frame scope - but for now the DFG wouldn't do that.
1616
1617 emitRestoreArguments(jit, vm, operands);
1618
1619 // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
1620 // that all new calls into this code will go to the new JIT, so the execute
1621 // counter only affects call frames that performed OSR exit and call frames
1622 // that were still executing the old JIT at the time of another call frame's
1623 // OSR exit. We want to ensure that the following is true:
1624 //
1625 // (a) Code the performs an OSR exit gets a chance to reenter optimized
1626 // code eventually, since optimized code is faster. But we don't
1627 // want to do such reentery too aggressively (see (c) below).
1628 //
1629 // (b) If there is code on the call stack that is still running the old
1630 // JIT's code and has never OSR'd, then it should get a chance to
1631 // perform OSR entry despite the fact that we've exited.
1632 //
1633 // (c) Code the performs an OSR exit should not immediately retry OSR
1634 // entry, since both forms of OSR are expensive. OSR entry is
1635 // particularly expensive.
1636 //
1637 // (d) Frequent OSR failures, even those that do not result in the code
1638 // running in a hot loop, result in recompilation getting triggered.
1639 //
1640 // To ensure (c), we'd like to set the execute counter to
1641 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
1642 // (a) and (b), since then every OSR exit would delay the opportunity for
1643 // every call frame to perform OSR entry. Essentially, if OSR exit happens
1644 // frequently and the function has few loops, then the counter will never
1645 // become non-negative and OSR entry will never be triggered. OSR entry
1646 // will only happen if a loop gets hot in the old JIT, which does a pretty
1647 // good job of ensuring (a) and (b). But that doesn't take care of (d),
1648 // since each speculation failure would reset the execute counter.
1649 // So we check here if the number of speculation failures is significantly
1650 // larger than the number of successes (we want 90% success rate), and if
1651 // there have been a large enough number of failures. If so, we set the
1652 // counter to 0; otherwise we set the counter to
1653 // counterValueForOptimizeAfterWarmUp().
1654
1655 handleExitCounts(vm, jit, exit);
1656
1657 // Reify inlined call frames.
1658
1659 reifyInlinedCallFrames(jit, exit);
1660
1661 // And finish.
1662 adjustAndJumpToTarget(vm, jit, exit);
1663}
1664
1665void JIT_OPERATION operationDebugPrintSpeculationFailure(CallFrame* callFrame, void* debugInfoRaw, void* scratch)
1666{
1667 VM& vm = callFrame->deprecatedVM();
1668 NativeCallFrameTracer tracer(vm, callFrame);
1669
1670 SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
1671 CodeBlock* codeBlock = debugInfo->codeBlock;
1672 CodeBlock* alternative = codeBlock->alternative();
1673 dataLog("Speculation failure in ", *codeBlock);
1674 dataLog(" @ exit #", vm.osrExitIndex, " (", debugInfo->bytecodeIndex, ", ", exitKindToString(debugInfo->kind), ") with ");
1675 if (alternative) {
1676 dataLog(
1677 "executeCounter = ", alternative->jitExecuteCounter(),
1678 ", reoptimizationRetryCounter = ", alternative->reoptimizationRetryCounter(),
1679 ", optimizationDelayCounter = ", alternative->optimizationDelayCounter());
1680 } else
1681 dataLog("no alternative code block (i.e. we've been jettisoned)");
1682 dataLog(", osrExitCounter = ", codeBlock->osrExitCounter(), "\n");
1683 dataLog(" GPRs at time of exit:");
1684 char* scratchPointer = static_cast<char*>(scratch);
1685 for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
1686 GPRReg gpr = GPRInfo::toRegister(i);
1687 dataLog(" ", GPRInfo::debugName(gpr), ":", RawPointer(*reinterpret_cast_ptr<void**>(scratchPointer)));
1688 scratchPointer += sizeof(EncodedJSValue);
1689 }
1690 dataLog("\n");
1691 dataLog(" FPRs at time of exit:");
1692 for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
1693 FPRReg fpr = FPRInfo::toRegister(i);
1694 dataLog(" ", FPRInfo::debugName(fpr), ":");
1695 uint64_t bits = *reinterpret_cast_ptr<uint64_t*>(scratchPointer);
1696 double value = *reinterpret_cast_ptr<double*>(scratchPointer);
1697 dataLogF("%llx:%lf", static_cast<long long>(bits), value);
1698 scratchPointer += sizeof(EncodedJSValue);
1699 }
1700 dataLog("\n");
1701}
1702
1703} } // namespace JSC::DFG
1704
1705#endif // ENABLE(DFG_JIT)
1706