1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSREntry.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "CallFrame.h"
32#include "CodeBlock.h"
33#include "DFGJITCode.h"
34#include "DFGNode.h"
35#include "InterpreterInlines.h"
36#include "JIT.h"
37#include "JSCInlines.h"
38#include "VMInlines.h"
39#include <wtf/CommaPrinter.h>
40
41namespace JSC { namespace DFG {
42
43void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
44{
45 out.print(m_bytecodeIndex, ", machine code = ", RawPointer(m_machineCode.executableAddress()));
46 out.print(", stack rules = [");
47
48 auto printOperand = [&] (VirtualRegister reg) {
49 out.print(inContext(m_expectedValues.operand(reg), context), " (");
50 VirtualRegister toReg;
51 bool overwritten = false;
52 for (OSREntryReshuffling reshuffling : m_reshufflings) {
53 if (reg == VirtualRegister(reshuffling.fromOffset)) {
54 toReg = VirtualRegister(reshuffling.toOffset);
55 break;
56 }
57 if (reg == VirtualRegister(reshuffling.toOffset))
58 overwritten = true;
59 }
60 if (!overwritten && !toReg.isValid())
61 toReg = reg;
62 if (toReg.isValid()) {
63 if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal()))
64 out.print("ignored");
65 else
66 out.print("maps to ", toReg);
67 } else
68 out.print("overwritten");
69 if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal()))
70 out.print(", forced double");
71 if (reg.isLocal() && m_localsForcedAnyInt.get(reg.toLocal()))
72 out.print(", forced machine int");
73 out.print(")");
74 };
75
76 CommaPrinter comma;
77 for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) {
78 out.print(comma, "arg", argumentIndex, ":");
79 printOperand(virtualRegisterForArgument(argumentIndex));
80 }
81 for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) {
82 out.print(comma, "loc", localIndex, ":");
83 printOperand(virtualRegisterForLocal(localIndex));
84 }
85
86 out.print("], machine stack used = ", m_machineStackUsed);
87}
88
89void OSREntryData::dump(PrintStream& out) const
90{
91 dumpInContext(out, nullptr);
92}
93
94SUPPRESS_ASAN
95void* prepareOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* codeBlock, BytecodeIndex bytecodeIndex)
96{
97 ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
98 ASSERT(codeBlock->alternative());
99 ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT);
100 ASSERT(!codeBlock->jitCodeMap());
101 ASSERT(codeBlock->jitCode()->dfgCommon()->isStillValid);
102
103 if (!Options::useOSREntryToDFG())
104 return nullptr;
105
106 if (Options::verboseOSR()) {
107 dataLog(
108 "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
109 " from ", bytecodeIndex, "\n");
110 }
111
112 sanitizeStackForVM(vm);
113
114 if (bytecodeIndex)
115 codeBlock->ownerExecutable()->setDidTryToEnterInLoop(true);
116
117 if (codeBlock->jitType() != JITType::DFGJIT) {
118 RELEASE_ASSERT(codeBlock->jitType() == JITType::FTLJIT);
119
120 // When will this happen? We could have:
121 //
122 // - An exit from the FTL JIT into the baseline JIT followed by an attempt
123 // to reenter. We're fine with allowing this to fail. If it happens
124 // enough we'll just reoptimize. It basically means that the OSR exit cost
125 // us dearly and so reoptimizing is the right thing to do.
126 //
127 // - We have recursive code with hot loops. Consider that foo has a hot loop
128 // that calls itself. We have two foo's on the stack, lets call them foo1
129 // and foo2, with foo1 having called foo2 from foo's hot loop. foo2 gets
130 // optimized all the way into the FTL. Then it returns into foo1, and then
131 // foo1 wants to get optimized. It might reach this conclusion from its
132 // hot loop and attempt to OSR enter. And we'll tell it that it can't. It
133 // might be worth addressing this case, but I just think this case will
134 // be super rare. For now, if it does happen, it'll cause some compilation
135 // thrashing.
136
137 if (Options::verboseOSR())
138 dataLog(" OSR failed because the target code block is not DFG.\n");
139 return nullptr;
140 }
141
142 JITCode* jitCode = codeBlock->jitCode()->dfg();
143 OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
144
145 if (!entry) {
146 if (Options::verboseOSR())
147 dataLogF(" OSR failed because the entrypoint was optimized out.\n");
148 return nullptr;
149 }
150
151 ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
152
153 // The code below checks if it is safe to perform OSR entry. It may find
154 // that it is unsafe to do so, for any number of reasons, which are documented
155 // below. If the code decides not to OSR then it returns 0, and it's the caller's
156 // responsibility to patch up the state in such a way as to ensure that it's
157 // both safe and efficient to continue executing baseline code for now. This
158 // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
159 // or codeBlock->dontOptimizeAnytimeSoon().
160
161 // 1) Verify predictions. If the predictions are inconsistent with the actual
162 // values, then OSR entry is not possible at this time. It's tempting to
163 // assume that we could somehow avoid this case. We can certainly avoid it
164 // for first-time loop OSR - that is, OSR into a CodeBlock that we have just
165 // compiled. Then we are almost guaranteed that all of the predictions will
166 // check out. It would be pretty easy to make that a hard guarantee. But
167 // then there would still be the case where two call frames with the same
168 // baseline CodeBlock are on the stack at the same time. The top one
169 // triggers compilation and OSR. In that case, we may no longer have
170 // accurate value profiles for the one deeper in the stack. Hence, when we
171 // pop into the CodeBlock that is deeper on the stack, we might OSR and
172 // realize that the predictions are wrong. Probably, in most cases, this is
173 // just an anomaly in the sense that the older CodeBlock simply went off
174 // into a less-likely path. So, the wisest course of action is to simply not
175 // OSR at this time.
176
177 for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
178 JSValue value;
179 if (!argument)
180 value = callFrame->thisValue();
181 else
182 value = callFrame->argument(argument - 1);
183
184 if (!entry->m_expectedValues.argument(argument).validateOSREntryValue(value, FlushedJSValue)) {
185 if (Options::verboseOSR()) {
186 dataLog(
187 " OSR failed because argument ", argument, " is ", value,
188 ", expected ", entry->m_expectedValues.argument(argument), ".\n");
189 }
190 return nullptr;
191 }
192 }
193
194 for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
195 int localOffset = virtualRegisterForLocal(local).offset();
196 JSValue value = callFrame->registers()[localOffset].asanUnsafeJSValue();
197 FlushFormat format = FlushedJSValue;
198
199 if (entry->m_localsForcedAnyInt.get(local)) {
200 if (!value.isAnyInt()) {
201 dataLogLnIf(Options::verboseOSR(),
202 " OSR failed because variable ", localOffset, " is ",
203 value, ", expected ",
204 "machine int.");
205 return nullptr;
206 }
207 value = jsDoubleNumber(value.asAnyInt());
208 format = FlushedInt52;
209 }
210
211 if (entry->m_localsForcedDouble.get(local)) {
212 if (!value.isNumber()) {
213 dataLogLnIf(Options::verboseOSR(),
214 " OSR failed because variable ", localOffset, " is ",
215 value, ", expected number.");
216 return nullptr;
217 }
218 value = jsDoubleNumber(value.asNumber());
219 format = FlushedDouble;
220 }
221
222 if (!entry->m_expectedValues.local(local).validateOSREntryValue(value, format)) {
223 dataLogLnIf(Options::verboseOSR(),
224 " OSR failed because variable ", VirtualRegister(localOffset), " is ",
225 value, ", expected ",
226 entry->m_expectedValues.local(local), ".");
227 return nullptr;
228 }
229 }
230
231 // 2) Check the stack height. The DFG JIT may require a taller stack than the
232 // baseline JIT, in some cases. If we can't grow the stack, then don't do
233 // OSR right now. That's the only option we have unless we want basic block
234 // boundaries to start throwing RangeErrors. Although that would be possible,
235 // it seems silly: you'd be diverting the program to error handling when it
236 // would have otherwise just kept running albeit less quickly.
237
238 unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
239 if (UNLIKELY(!vm.ensureStackCapacityFor(&callFrame->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()]))) {
240 if (Options::verboseOSR())
241 dataLogF(" OSR failed because stack growth failed.\n");
242 return nullptr;
243 }
244
245 if (Options::verboseOSR())
246 dataLogF(" OSR should succeed.\n");
247
248 // At this point we're committed to entering. We will do some work to set things up,
249 // but we also rely on our caller recognizing that when we return a non-null pointer,
250 // that means that we're already past the point of no return and we must succeed at
251 // entering.
252
253 // 3) Set up the data in the scratch buffer and perform data format conversions.
254
255 unsigned frameSize = jitCode->common.frameRegisterCount;
256 unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
257 unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);
258
259 Register* scratch = bitwise_cast<Register*>(vm.scratchBufferForSize(sizeof(Register) * (2 + CallFrame::headerSizeInRegisters + maxFrameSize))->dataBuffer());
260
261 *bitwise_cast<size_t*>(scratch + 0) = frameSize;
262
263 void* targetPC = entry->m_machineCode.executableAddress();
264 RELEASE_ASSERT(codeBlock->jitCode()->contains(entry->m_machineCode.untaggedExecutableAddress()));
265 if (Options::verboseOSR())
266 dataLogF(" OSR using target PC %p.\n", targetPC);
267 RELEASE_ASSERT(targetPC);
268 *bitwise_cast<void**>(scratch + 1) = retagCodePtr(targetPC, OSREntryPtrTag, bitwise_cast<PtrTag>(callFrame));
269
270 Register* pivot = scratch + 2 + CallFrame::headerSizeInRegisters;
271
272 for (int index = -CallFrame::headerSizeInRegisters; index < static_cast<int>(baselineFrameSize); ++index) {
273 VirtualRegister reg(-1 - index);
274
275 if (reg.isLocal()) {
276 if (entry->m_localsForcedDouble.get(reg.toLocal())) {
277 *bitwise_cast<double*>(pivot + index) = callFrame->registers()[reg.offset()].asanUnsafeJSValue().asNumber();
278 continue;
279 }
280
281 if (entry->m_localsForcedAnyInt.get(reg.toLocal())) {
282 *bitwise_cast<int64_t*>(pivot + index) = callFrame->registers()[reg.offset()].asanUnsafeJSValue().asAnyInt() << JSValue::int52ShiftAmount;
283 continue;
284 }
285 }
286
287 pivot[index] = callFrame->registers()[reg.offset()].asanUnsafeJSValue();
288 }
289
290 // 4) Reshuffle those registers that need reshuffling.
291 Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
292 for (unsigned i = entry->m_reshufflings.size(); i--;)
293 temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].asanUnsafeJSValue();
294 for (unsigned i = entry->m_reshufflings.size(); i--;)
295 pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
296
297 // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
298 // some programs by eliminating some stale pointer pathologies.
299 for (unsigned i = frameSize; i--;) {
300 if (entry->m_machineStackUsed.get(i))
301 continue;
302 pivot[i] = JSValue();
303 }
304
305 // 6) Copy our callee saves to buffer.
306#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
307 const RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
308 RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
309 RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
310
311 unsigned registerCount = registerSaveLocations->size();
312 VMEntryRecord* record = vmEntryRecord(vm.topEntryFrame);
313 for (unsigned i = 0; i < registerCount; i++) {
314 RegisterAtOffset currentEntry = registerSaveLocations->at(i);
315 if (dontSaveRegisters.get(currentEntry.reg()))
316 continue;
317 RegisterAtOffset* calleeSavesEntry = allCalleeSaves->find(currentEntry.reg());
318
319 *(bitwise_cast<intptr_t*>(pivot - 1) - currentEntry.offsetAsIndex()) = record->calleeSaveRegistersBuffer[calleeSavesEntry->offsetAsIndex()];
320 }
321#endif
322
323 // 7) Fix the call frame to have the right code block.
324
325 *bitwise_cast<CodeBlock**>(pivot - 1 - CallFrameSlot::codeBlock) = codeBlock;
326
327 if (Options::verboseOSR())
328 dataLogF(" OSR returning data buffer %p.\n", scratch);
329 return scratch;
330}
331
332MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* baselineCodeBlock, CodeBlock* optimizedCodeBlock, BytecodeIndex bytecodeIndex)
333{
334 ASSERT(optimizedCodeBlock->jitType() == JITType::DFGJIT || optimizedCodeBlock->jitType() == JITType::FTLJIT);
335 ASSERT(optimizedCodeBlock->jitCode()->dfgCommon()->isStillValid);
336
337 if (!Options::useOSREntryToDFG() && optimizedCodeBlock->jitCode()->jitType() == JITType::DFGJIT)
338 return nullptr;
339 if (!Options::useOSREntryToFTL() && optimizedCodeBlock->jitCode()->jitType() == JITType::FTLJIT)
340 return nullptr;
341
342 CommonData* dfgCommon = optimizedCodeBlock->jitCode()->dfgCommon();
343 RELEASE_ASSERT(dfgCommon);
344 DFG::CatchEntrypointData* catchEntrypoint = dfgCommon->catchOSREntryDataForBytecodeIndex(bytecodeIndex);
345 if (!catchEntrypoint) {
346 // This can be null under some circumstances. The most common is that we didn't
347 // compile this op_catch as an entrypoint since it had never executed when starting
348 // the compilation.
349 return nullptr;
350 }
351
352 // We're only allowed to OSR enter if we've proven we have compatible argument types.
353 for (unsigned argument = 0; argument < catchEntrypoint->argumentFormats.size(); ++argument) {
354 JSValue value = callFrame->uncheckedR(virtualRegisterForArgument(argument)).jsValue();
355 switch (catchEntrypoint->argumentFormats[argument]) {
356 case DFG::FlushedInt32:
357 if (!value.isInt32())
358 return nullptr;
359 break;
360 case DFG::FlushedCell:
361 if (!value.isCell())
362 return nullptr;
363 break;
364 case DFG::FlushedBoolean:
365 if (!value.isBoolean())
366 return nullptr;
367 break;
368 case DFG::DeadFlush:
369 // This means the argument is not alive. Therefore, it's allowed to be any type.
370 break;
371 case DFG::FlushedJSValue:
372 // An argument is trivially a JSValue.
373 break;
374 default:
375 RELEASE_ASSERT_NOT_REACHED();
376 }
377 }
378
379 unsigned frameSizeForCheck = dfgCommon->requiredRegisterCountForExecutionAndExit();
380 if (UNLIKELY(!vm.ensureStackCapacityFor(&callFrame->registers()[virtualRegisterForLocal(frameSizeForCheck).offset()])))
381 return nullptr;
382
383 auto instruction = baselineCodeBlock->instructions().at(callFrame->bytecodeIndex());
384 ASSERT(instruction->is<OpCatch>());
385 ValueProfileAndOperandBuffer* buffer = instruction->as<OpCatch>().metadata(baselineCodeBlock).m_buffer;
386 JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer());
387 unsigned index = 0;
388 buffer->forEach([&] (ValueProfileAndOperand& profile) {
389 if (!VirtualRegister(profile.m_operand).isLocal())
390 return;
391 dataBuffer[index] = callFrame->uncheckedR(profile.m_operand).jsValue();
392 ++index;
393 });
394
395 // The active length of catchOSREntryBuffer will be zeroed by ClearCatchLocals node.
396 dfgCommon->catchOSREntryBuffer->setActiveLength(sizeof(JSValue) * index);
397 return catchEntrypoint->machineCode;
398}
399
400} } // namespace JSC::DFG
401
402#endif // ENABLE(DFG_JIT)
403