1/*
2 * Copyright (C) 2013-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "FTLOSRExitCompiler.h"
28
29#if ENABLE(FTL_JIT)
30
31#include "BytecodeStructs.h"
32#include "DFGOSRExitCompilerCommon.h"
33#include "FTLExitArgumentForOperand.h"
34#include "FTLJITCode.h"
35#include "FTLLocation.h"
36#include "FTLOSRExit.h"
37#include "FTLOperations.h"
38#include "FTLState.h"
39#include "FTLSaveRestore.h"
40#include "LinkBuffer.h"
41#include "MaxFrameExtentForSlowPathCall.h"
42#include "OperandsInlines.h"
43#include "JSCInlines.h"
44
45namespace JSC { namespace FTL {
46
47using namespace DFG;
48
49static void reboxAccordingToFormat(
50 DataFormat format, AssemblyHelpers& jit, GPRReg value, GPRReg scratch1, GPRReg scratch2)
51{
52 switch (format) {
53 case DataFormatInt32: {
54 jit.zeroExtend32ToPtr(value, value);
55 jit.or64(GPRInfo::numberTagRegister, value);
56 break;
57 }
58
59 case DataFormatInt52: {
60 jit.rshift64(AssemblyHelpers::TrustedImm32(JSValue::int52ShiftAmount), value);
61 jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2);
62 jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0);
63 jit.move64ToDouble(scratch2, FPRInfo::fpRegT0);
64 break;
65 }
66
67 case DataFormatStrictInt52: {
68 jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch2);
69 jit.boxInt52(value, value, scratch1, FPRInfo::fpRegT0);
70 jit.move64ToDouble(scratch2, FPRInfo::fpRegT0);
71 break;
72 }
73
74 case DataFormatBoolean: {
75 jit.zeroExtend32ToPtr(value, value);
76 jit.or32(MacroAssembler::TrustedImm32(JSValue::ValueFalse), value);
77 break;
78 }
79
80 case DataFormatJS: {
81 // Done already!
82 break;
83 }
84
85 case DataFormatDouble: {
86 jit.moveDoubleTo64(FPRInfo::fpRegT0, scratch1);
87 jit.move64ToDouble(value, FPRInfo::fpRegT0);
88 jit.purifyNaN(FPRInfo::fpRegT0);
89 jit.boxDouble(FPRInfo::fpRegT0, value);
90 jit.move64ToDouble(scratch1, FPRInfo::fpRegT0);
91 break;
92 }
93
94 default:
95 RELEASE_ASSERT_NOT_REACHED();
96 break;
97 }
98}
99
100static void compileRecovery(
101 CCallHelpers& jit, const ExitValue& value,
102 Vector<B3::ValueRep>& valueReps,
103 char* registerScratch,
104 const HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*>& materializationToPointer)
105{
106 switch (value.kind()) {
107 case ExitValueDead:
108 jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsUndefined())), GPRInfo::regT0);
109 break;
110
111 case ExitValueConstant:
112 jit.move(MacroAssembler::TrustedImm64(JSValue::encode(value.constant())), GPRInfo::regT0);
113 break;
114
115 case ExitValueArgument:
116 Location::forValueRep(valueReps[value.exitArgument().argument()]).restoreInto(
117 jit, registerScratch, GPRInfo::regT0);
118 break;
119
120 case ExitValueInJSStack:
121 case ExitValueInJSStackAsInt32:
122 case ExitValueInJSStackAsInt52:
123 case ExitValueInJSStackAsDouble:
124 jit.load64(AssemblyHelpers::addressFor(value.virtualRegister()), GPRInfo::regT0);
125 break;
126
127 case ExitValueMaterializeNewObject:
128 jit.loadPtr(materializationToPointer.get(value.objectMaterialization()), GPRInfo::regT0);
129 break;
130
131 default:
132 RELEASE_ASSERT_NOT_REACHED();
133 break;
134 }
135
136 reboxAccordingToFormat(
137 value.dataFormat(), jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2);
138}
139
140static void compileStub(VM& vm, unsigned exitID, JITCode* jitCode, OSRExit& exit, CodeBlock* codeBlock)
141{
142 // This code requires framePointerRegister is the same as callFrameRegister
143 static_assert(MacroAssembler::framePointerRegister == GPRInfo::callFrameRegister, "MacroAssembler::framePointerRegister and GPRInfo::callFrameRegister must be the same");
144
145 CCallHelpers jit(codeBlock);
146
147 // The first thing we need to do is restablish our frame in the case of an exception.
148 if (exit.isGenericUnwindHandler()) {
149 RELEASE_ASSERT(vm.callFrameForCatch); // The first time we hit this exit, like at all other times, this field should be non-null.
150 jit.restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
151 jit.loadPtr(vm.addressOfCallFrameForCatch(), MacroAssembler::framePointerRegister);
152 jit.addPtr(CCallHelpers::TrustedImm32(codeBlock->stackPointerOffset() * sizeof(Register)),
153 MacroAssembler::framePointerRegister, CCallHelpers::stackPointerRegister);
154
155 // Do a pushToSave because that's what the exit compiler below expects the stack
156 // to look like because that's the last thing the ExitThunkGenerator does. The code
157 // below doesn't actually use the value that was pushed, but it does rely on the
158 // general shape of the stack being as it is in the non-exception OSR case.
159 jit.pushToSaveImmediateWithoutTouchingRegisters(CCallHelpers::TrustedImm32(0xbadbeef));
160 }
161
162 // We need scratch space to save all registers, to build up the JS stack, to deal with unwind
163 // fixup, pointers to all of the objects we materialize, and the elements inside those objects
164 // that we materialize.
165
166 // Figure out how much space we need for those object allocations.
167 unsigned numMaterializations = 0;
168 size_t maxMaterializationNumArguments = 0;
169 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) {
170 numMaterializations++;
171
172 maxMaterializationNumArguments = std::max(
173 maxMaterializationNumArguments,
174 materialization->properties().size());
175 }
176
177 ScratchBuffer* scratchBuffer = vm.scratchBufferForSize(
178 sizeof(EncodedJSValue) * (
179 exit.m_descriptor->m_values.size() + numMaterializations + maxMaterializationNumArguments) +
180 requiredScratchMemorySizeInBytes() +
181 codeBlock->calleeSaveRegisters()->size() * sizeof(uint64_t));
182 EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
183 EncodedJSValue* materializationPointers = scratch + exit.m_descriptor->m_values.size();
184 EncodedJSValue* materializationArguments = materializationPointers + numMaterializations;
185 char* registerScratch = bitwise_cast<char*>(materializationArguments + maxMaterializationNumArguments);
186 uint64_t* unwindScratch = bitwise_cast<uint64_t*>(registerScratch + requiredScratchMemorySizeInBytes());
187
188 HashMap<ExitTimeObjectMaterialization*, EncodedJSValue*> materializationToPointer;
189 unsigned materializationCount = 0;
190 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) {
191 materializationToPointer.add(
192 materialization, materializationPointers + materializationCount++);
193 }
194
195 auto recoverValue = [&] (const ExitValue& value) {
196 compileRecovery(
197 jit, value,
198 exit.m_valueReps,
199 registerScratch, materializationToPointer);
200 };
201
202 // Note that we come in here, the stack used to be as B3 left it except that someone called pushToSave().
203 // We don't care about the value they saved. But, we do appreciate the fact that they did it, because we use
204 // that slot for saveAllRegisters().
205
206 saveAllRegisters(jit, registerScratch);
207
208 if (validateDFGDoesGC) {
209 // We're about to exit optimized code. So, there's no longer any optimized
210 // code running that expects no GC. We need to set this before object
211 // materialization below.
212
213 // Even though we set Heap::m_expectDoesGC in compileFTLOSRExit(), we also need
214 // to set it here because compileFTLOSRExit() is only called on the first time
215 // we exit from this site, but all subsequent exits will take this compiled
216 // ramp without calling compileFTLOSRExit() first.
217 jit.store8(CCallHelpers::TrustedImm32(true), vm.heap.addressOfExpectDoesGC());
218 }
219
220 // Bring the stack back into a sane form and assert that it's sane.
221 jit.popToRestore(GPRInfo::regT0);
222 jit.checkStackPointerAlignment();
223
224 if (UNLIKELY(vm.m_perBytecodeProfiler && jitCode->dfgCommon()->compilation)) {
225 Profiler::Database& database = *vm.m_perBytecodeProfiler;
226 Profiler::Compilation* compilation = jitCode->dfgCommon()->compilation.get();
227
228 Profiler::OSRExit* profilerExit = compilation->addOSRExit(
229 exitID, Profiler::OriginStack(database, codeBlock, exit.m_codeOrigin),
230 exit.m_kind, exit.m_kind == UncountableInvalidation);
231 jit.add64(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(profilerExit->counterAddress()));
232 }
233
234 // The remaining code assumes that SP/FP are in the same state that they were in the FTL's
235 // call frame.
236
237 // Get the call frame and tag thingies.
238 // Restore the exiting function's callFrame value into a regT4
239 jit.move(MacroAssembler::TrustedImm64(JSValue::NumberTag), GPRInfo::numberTagRegister);
240 jit.move(MacroAssembler::TrustedImm64(JSValue::NotCellMask), GPRInfo::notCellMaskRegister);
241
242 // Do some value profiling.
243 if (exit.m_descriptor->m_profileDataFormat != DataFormatNone) {
244 Location::forValueRep(exit.m_valueReps[0]).restoreInto(jit, registerScratch, GPRInfo::regT0);
245 reboxAccordingToFormat(
246 exit.m_descriptor->m_profileDataFormat, jit, GPRInfo::regT0, GPRInfo::regT1, GPRInfo::regT2);
247
248 if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
249 CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
250 CodeBlock* codeBlock = jit.baselineCodeBlockFor(codeOrigin);
251 if (ArrayProfile* arrayProfile = codeBlock->getArrayProfile(codeOrigin.bytecodeIndex())) {
252 const Instruction* instruction = codeBlock->instructions().at(codeOrigin.bytecodeIndex()).ptr();
253 CCallHelpers::Jump skipProfile;
254 if (instruction->is<OpGetById>()) {
255 auto& metadata = instruction->as<OpGetById>().metadata(codeBlock);
256 skipProfile = jit.branch8(CCallHelpers::NotEqual, CCallHelpers::AbsoluteAddress(&metadata.m_modeMetadata.mode), CCallHelpers::TrustedImm32(static_cast<uint8_t>(GetByIdMode::ArrayLength)));
257 }
258
259 jit.load32(MacroAssembler::Address(GPRInfo::regT0, JSCell::structureIDOffset()), GPRInfo::regT1);
260 jit.store32(GPRInfo::regT1, arrayProfile->addressOfLastSeenStructureID());
261
262 jit.load8(MacroAssembler::Address(GPRInfo::regT0, JSCell::typeInfoTypeOffset()), GPRInfo::regT2);
263 jit.sub32(MacroAssembler::TrustedImm32(FirstTypedArrayType), GPRInfo::regT2);
264 auto notTypedArray = jit.branch32(MacroAssembler::AboveOrEqual, GPRInfo::regT2, MacroAssembler::TrustedImm32(NumberOfTypedArrayTypesExcludingDataView));
265 jit.move(MacroAssembler::TrustedImmPtr(typedArrayModes), GPRInfo::regT1);
266 jit.load32(MacroAssembler::BaseIndex(GPRInfo::regT1, GPRInfo::regT2, MacroAssembler::TimesFour), GPRInfo::regT2);
267 auto storeArrayModes = jit.jump();
268
269 notTypedArray.link(&jit);
270 jit.load8(MacroAssembler::Address(GPRInfo::regT0, JSCell::indexingTypeAndMiscOffset()), GPRInfo::regT1);
271 jit.and32(MacroAssembler::TrustedImm32(IndexingModeMask), GPRInfo::regT1);
272 jit.move(MacroAssembler::TrustedImm32(1), GPRInfo::regT2);
273 jit.lshift32(GPRInfo::regT1, GPRInfo::regT2);
274 storeArrayModes.link(&jit);
275 jit.or32(GPRInfo::regT2, MacroAssembler::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
276
277 if (skipProfile.isSet())
278 skipProfile.link(&jit);
279 }
280 }
281
282 if (exit.m_descriptor->m_valueProfile)
283 exit.m_descriptor->m_valueProfile.emitReportValue(jit, JSValueRegs(GPRInfo::regT0));
284 }
285
286 // Materialize all objects. Don't materialize an object until all
287 // of the objects it needs have been materialized. We break cycles
288 // by populating objects late - we only consider an object as
289 // needing another object if the later is needed for the
290 // allocation of the former.
291
292 HashSet<ExitTimeObjectMaterialization*> toMaterialize;
293 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations)
294 toMaterialize.add(materialization);
295
296 while (!toMaterialize.isEmpty()) {
297 unsigned previousToMaterializeSize = toMaterialize.size();
298
299 Vector<ExitTimeObjectMaterialization*> worklist;
300 worklist.appendRange(toMaterialize.begin(), toMaterialize.end());
301 for (ExitTimeObjectMaterialization* materialization : worklist) {
302 // Check if we can do anything about this right now.
303 bool allGood = true;
304 for (ExitPropertyValue value : materialization->properties()) {
305 if (!value.value().isObjectMaterialization())
306 continue;
307 if (!value.location().neededForMaterialization())
308 continue;
309 if (toMaterialize.contains(value.value().objectMaterialization())) {
310 // Gotta skip this one, since it needs a
311 // materialization that hasn't been materialized.
312 allGood = false;
313 break;
314 }
315 }
316 if (!allGood)
317 continue;
318
319 // All systems go for materializing the object. First we
320 // recover the values of all of its fields and then we
321 // call a function to actually allocate the beast.
322 // We only recover the fields that are needed for the allocation.
323 for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
324 const ExitPropertyValue& property = materialization->properties()[propertyIndex];
325 if (!property.location().neededForMaterialization())
326 continue;
327
328 recoverValue(property.value());
329 jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex);
330 }
331
332 static_assert(FunctionTraits<decltype(operationMaterializeObjectInOSR)>::arity < GPRInfo::numberOfArgumentRegisters, "This call assumes that we don't pass arguments on the stack.");
333 jit.setupArguments<decltype(operationMaterializeObjectInOSR)>(
334 CCallHelpers::TrustedImmPtr(codeBlock->globalObjectFor(materialization->origin())),
335 CCallHelpers::TrustedImmPtr(materialization),
336 CCallHelpers::TrustedImmPtr(materializationArguments));
337 jit.prepareCallOperation(vm);
338 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationMaterializeObjectInOSR)), GPRInfo::nonArgGPR0);
339 jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
340 jit.storePtr(GPRInfo::returnValueGPR, materializationToPointer.get(materialization));
341
342 // Let everyone know that we're done.
343 toMaterialize.remove(materialization);
344 }
345
346 // We expect progress! This ensures that we crash rather than looping infinitely if there
347 // is something broken about this fixpoint. Or, this could happen if we ever violate the
348 // "materializations form a DAG" rule.
349 RELEASE_ASSERT(toMaterialize.size() < previousToMaterializeSize);
350 }
351
352 // Now that all the objects have been allocated, we populate them
353 // with the correct values. This time we can recover all the
354 // fields, including those that are only needed for the allocation.
355 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations) {
356 for (unsigned propertyIndex = materialization->properties().size(); propertyIndex--;) {
357 recoverValue(materialization->properties()[propertyIndex].value());
358 jit.storePtr(GPRInfo::regT0, materializationArguments + propertyIndex);
359 }
360
361 static_assert(FunctionTraits<decltype(operationPopulateObjectInOSR)>::arity < GPRInfo::numberOfArgumentRegisters, "This call assumes that we don't pass arguments on the stack.");
362 jit.setupArguments<decltype(operationPopulateObjectInOSR)>(
363 CCallHelpers::TrustedImmPtr(codeBlock->globalObjectFor(materialization->origin())),
364 CCallHelpers::TrustedImmPtr(materialization),
365 CCallHelpers::TrustedImmPtr(materializationToPointer.get(materialization)),
366 CCallHelpers::TrustedImmPtr(materializationArguments));
367 jit.prepareCallOperation(vm);
368 jit.move(CCallHelpers::TrustedImmPtr(tagCFunctionPtr<OperationPtrTag>(operationPopulateObjectInOSR)), GPRInfo::nonArgGPR0);
369 jit.call(GPRInfo::nonArgGPR0, OperationPtrTag);
370 }
371
372 // Save all state from wherever the exit data tells us it was, into the appropriate place in
373 // the scratch buffer. This also does the reboxing.
374
375 for (unsigned index = exit.m_descriptor->m_values.size(); index--;) {
376 recoverValue(exit.m_descriptor->m_values[index]);
377 jit.store64(GPRInfo::regT0, scratch + index);
378 }
379
380 // Henceforth we make it look like the exiting function was called through a register
381 // preservation wrapper. This implies that FP must be nudged down by a certain amount. Then
382 // we restore the various things according to either exit.m_descriptor->m_values or by copying from the
383 // old frame, and finally we save the various callee-save registers into where the
384 // restoration thunk would restore them from.
385
386 // Before we start messing with the frame, we need to set aside any registers that the
387 // FTL code was preserving.
388 for (unsigned i = codeBlock->calleeSaveRegisters()->size(); i--;) {
389 RegisterAtOffset entry = codeBlock->calleeSaveRegisters()->at(i);
390 jit.load64(
391 MacroAssembler::Address(MacroAssembler::framePointerRegister, entry.offset()),
392 GPRInfo::regT0);
393 jit.store64(GPRInfo::regT0, unwindScratch + i);
394 }
395
396 CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
397
398 // First set up SP so that our data doesn't get clobbered by signals.
399 unsigned conservativeStackDelta =
400 (exit.m_descriptor->m_values.numberOfLocals() + baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters()) * sizeof(Register) +
401 maxFrameExtentForSlowPathCall;
402 conservativeStackDelta = WTF::roundUpToMultipleOf(
403 stackAlignmentBytes(), conservativeStackDelta);
404 jit.addPtr(
405 MacroAssembler::TrustedImm32(-conservativeStackDelta),
406 MacroAssembler::framePointerRegister, MacroAssembler::stackPointerRegister);
407 jit.checkStackPointerAlignment();
408
409 RegisterSet allFTLCalleeSaves = RegisterSet::ftlCalleeSaveRegisters();
410 const RegisterAtOffsetList* baselineCalleeSaves = baselineCodeBlock->calleeSaveRegisters();
411 RegisterAtOffsetList* vmCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
412 RegisterSet vmCalleeSavesToSkip = RegisterSet::stackRegisters();
413 if (exit.isExceptionHandler()) {
414 jit.loadPtr(&vm.topEntryFrame, GPRInfo::regT1);
415 jit.addPtr(CCallHelpers::TrustedImm32(EntryFrame::calleeSaveRegistersBufferOffset()), GPRInfo::regT1);
416 }
417
418 for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
419 if (!allFTLCalleeSaves.get(reg)) {
420 if (exit.isExceptionHandler())
421 RELEASE_ASSERT(!vmCalleeSaves->find(reg));
422 continue;
423 }
424 unsigned unwindIndex = codeBlock->calleeSaveRegisters()->indexOf(reg);
425 const RegisterAtOffset* baselineRegisterOffset = baselineCalleeSaves->find(reg);
426 RegisterAtOffset* vmCalleeSave = nullptr;
427 if (exit.isExceptionHandler())
428 vmCalleeSave = vmCalleeSaves->find(reg);
429
430 if (reg.isGPR()) {
431 GPRReg regToLoad = baselineRegisterOffset ? GPRInfo::regT0 : reg.gpr();
432 RELEASE_ASSERT(regToLoad != GPRInfo::regT1);
433
434 if (unwindIndex == UINT_MAX) {
435 // The FTL compilation didn't preserve this register. This means that it also
436 // didn't use the register. So its value at the beginning of OSR exit should be
437 // preserved by the thunk. Luckily, we saved all registers into the register
438 // scratch buffer, so we can restore them from there.
439 jit.load64(registerScratch + offsetOfReg(reg), regToLoad);
440 } else {
441 // The FTL compilation preserved the register. Its new value is therefore
442 // irrelevant, but we can get the value that was preserved by using the unwind
443 // data. We've already copied all unwind-able preserved registers into the unwind
444 // scratch buffer, so we can get it from there.
445 jit.load64(unwindScratch + unwindIndex, regToLoad);
446 }
447
448 if (baselineRegisterOffset)
449 jit.store64(regToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
450 if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg()))
451 jit.store64(regToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
452 } else {
453 FPRReg fpRegToLoad = baselineRegisterOffset ? FPRInfo::fpRegT0 : reg.fpr();
454
455 if (unwindIndex == UINT_MAX)
456 jit.loadDouble(MacroAssembler::TrustedImmPtr(registerScratch + offsetOfReg(reg)), fpRegToLoad);
457 else
458 jit.loadDouble(MacroAssembler::TrustedImmPtr(unwindScratch + unwindIndex), fpRegToLoad);
459
460 if (baselineRegisterOffset)
461 jit.storeDouble(fpRegToLoad, MacroAssembler::Address(MacroAssembler::framePointerRegister, baselineRegisterOffset->offset()));
462 if (vmCalleeSave && !vmCalleeSavesToSkip.get(vmCalleeSave->reg()))
463 jit.storeDouble(fpRegToLoad, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
464 }
465 }
466
467 if (exit.isExceptionHandler()) {
468 RegisterAtOffset* vmCalleeSave = vmCalleeSaves->find(GPRInfo::numberTagRegister);
469 jit.store64(GPRInfo::numberTagRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
470
471 vmCalleeSave = vmCalleeSaves->find(GPRInfo::notCellMaskRegister);
472 jit.store64(GPRInfo::notCellMaskRegister, MacroAssembler::Address(GPRInfo::regT1, vmCalleeSave->offset()));
473 }
474
475 size_t baselineVirtualRegistersForCalleeSaves = baselineCodeBlock->calleeSaveSpaceAsVirtualRegisters();
476
477 // Now get state out of the scratch buffer and place it back into the stack. The values are
478 // already reboxed so we just move them.
479 for (unsigned index = exit.m_descriptor->m_values.size(); index--;) {
480 VirtualRegister reg = exit.m_descriptor->m_values.virtualRegisterForIndex(index);
481
482 if (reg.isLocal() && reg.toLocal() < static_cast<int>(baselineVirtualRegistersForCalleeSaves))
483 continue;
484
485 jit.load64(scratch + index, GPRInfo::regT0);
486 jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(reg));
487 }
488
489 handleExitCounts(vm, jit, exit);
490 reifyInlinedCallFrames(jit, exit);
491 adjustAndJumpToTarget(vm, jit, exit);
492
493 LinkBuffer patchBuffer(jit, codeBlock);
494 exit.m_code = FINALIZE_CODE_IF(
495 shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit(),
496 patchBuffer, OSRExitPtrTag,
497 "FTL OSR exit #%u (%s, %s) from %s, with operands = %s",
498 exitID, toCString(exit.m_codeOrigin).data(),
499 exitKindToString(exit.m_kind), toCString(*codeBlock).data(),
500 toCString(ignoringContext<DumpContext>(exit.m_descriptor->m_values)).data()
501 );
502}
503
504extern "C" JIT_OPERATION void* operationCompileFTLOSRExit(CallFrame* callFrame, unsigned exitID)
505{
506 if (shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit())
507 dataLog("Compiling OSR exit with exitID = ", exitID, "\n");
508
509 VM& vm = callFrame->deprecatedVM();
510
511 if (validateDFGDoesGC) {
512 // We're about to exit optimized code. So, there's no longer any optimized
513 // code running that expects no GC.
514 vm.heap.setExpectDoesGC(true);
515 }
516
517 if (vm.callFrameForCatch)
518 RELEASE_ASSERT(vm.callFrameForCatch == callFrame);
519
520 CodeBlock* codeBlock = callFrame->codeBlock();
521
522 ASSERT(codeBlock);
523 ASSERT(codeBlock->jitType() == JITType::FTLJIT);
524
525 // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't
526 // really be profitable.
527 DeferGCForAWhile deferGC(vm.heap);
528
529 JITCode* jitCode = codeBlock->jitCode()->ftl();
530 OSRExit& exit = jitCode->osrExit[exitID];
531
532 if (shouldDumpDisassembly() || Options::verboseOSR() || Options::verboseFTLOSRExit()) {
533 dataLog(" Owning block: ", pointerDump(codeBlock), "\n");
534 dataLog(" Origin: ", exit.m_codeOrigin, "\n");
535 if (exit.m_codeOriginForExitProfile != exit.m_codeOrigin)
536 dataLog(" Origin for exit profile: ", exit.m_codeOriginForExitProfile, "\n");
537 dataLog(" Current call site index: ", callFrame->callSiteIndex().bits(), "\n");
538 dataLog(" Exit is exception handler: ", exit.isExceptionHandler(), "\n");
539 dataLog(" Is unwind handler: ", exit.isGenericUnwindHandler(), "\n");
540 dataLog(" Exit values: ", exit.m_descriptor->m_values, "\n");
541 dataLog(" Value reps: ", listDump(exit.m_valueReps), "\n");
542 if (!exit.m_descriptor->m_materializations.isEmpty()) {
543 dataLog(" Materializations:\n");
544 for (ExitTimeObjectMaterialization* materialization : exit.m_descriptor->m_materializations)
545 dataLog(" ", pointerDump(materialization), "\n");
546 }
547 }
548
549 compileStub(vm, exitID, jitCode, exit, codeBlock);
550
551 MacroAssembler::repatchJump(
552 exit.codeLocationForRepatch(codeBlock), CodeLocationLabel<OSRExitPtrTag>(exit.m_code.code()));
553
554 return exit.m_code.code().executableAddress();
555}
556
557} } // namespace JSC::FTL
558
559#endif // ENABLE(FTL_JIT)
560
561