1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSREntry.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "CallFrame.h"
32#include "CodeBlock.h"
33#include "DFGJITCode.h"
34#include "DFGNode.h"
35#include "InterpreterInlines.h"
36#include "JIT.h"
37#include "JSCInlines.h"
38#include "VMInlines.h"
39#include <wtf/CommaPrinter.h>
40
41namespace JSC { namespace DFG {
42
43void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
44{
45 out.print("bc#", m_bytecodeIndex, ", machine code = ", RawPointer(m_machineCode.executableAddress()));
46 out.print(", stack rules = [");
47
48 auto printOperand = [&] (VirtualRegister reg) {
49 out.print(inContext(m_expectedValues.operand(reg), context), " (");
50 VirtualRegister toReg;
51 bool overwritten = false;
52 for (OSREntryReshuffling reshuffling : m_reshufflings) {
53 if (reg == VirtualRegister(reshuffling.fromOffset)) {
54 toReg = VirtualRegister(reshuffling.toOffset);
55 break;
56 }
57 if (reg == VirtualRegister(reshuffling.toOffset))
58 overwritten = true;
59 }
60 if (!overwritten && !toReg.isValid())
61 toReg = reg;
62 if (toReg.isValid()) {
63 if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal()))
64 out.print("ignored");
65 else
66 out.print("maps to ", toReg);
67 } else
68 out.print("overwritten");
69 if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal()))
70 out.print(", forced double");
71 if (reg.isLocal() && m_localsForcedAnyInt.get(reg.toLocal()))
72 out.print(", forced machine int");
73 out.print(")");
74 };
75
76 CommaPrinter comma;
77 for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) {
78 out.print(comma, "arg", argumentIndex, ":");
79 printOperand(virtualRegisterForArgument(argumentIndex));
80 }
81 for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) {
82 out.print(comma, "loc", localIndex, ":");
83 printOperand(virtualRegisterForLocal(localIndex));
84 }
85
86 out.print("], machine stack used = ", m_machineStackUsed);
87}
88
89void OSREntryData::dump(PrintStream& out) const
90{
91 dumpInContext(out, nullptr);
92}
93
94SUPPRESS_ASAN
95void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
96{
97 ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
98 ASSERT(codeBlock->alternative());
99 ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT);
100 ASSERT(!codeBlock->jitCodeMap());
101 ASSERT(codeBlock->jitCode()->dfgCommon()->isStillValid);
102
103 if (!Options::useOSREntryToDFG())
104 return nullptr;
105
106 if (Options::verboseOSR()) {
107 dataLog(
108 "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
109 " from bc#", bytecodeIndex, "\n");
110 }
111
112 VM* vm = &exec->vm();
113
114 sanitizeStackForVM(vm);
115
116 if (bytecodeIndex)
117 codeBlock->ownerExecutable()->setDidTryToEnterInLoop(true);
118
119 if (codeBlock->jitType() != JITType::DFGJIT) {
120 RELEASE_ASSERT(codeBlock->jitType() == JITType::FTLJIT);
121
122 // When will this happen? We could have:
123 //
124 // - An exit from the FTL JIT into the baseline JIT followed by an attempt
125 // to reenter. We're fine with allowing this to fail. If it happens
126 // enough we'll just reoptimize. It basically means that the OSR exit cost
127 // us dearly and so reoptimizing is the right thing to do.
128 //
129 // - We have recursive code with hot loops. Consider that foo has a hot loop
130 // that calls itself. We have two foo's on the stack, lets call them foo1
131 // and foo2, with foo1 having called foo2 from foo's hot loop. foo2 gets
132 // optimized all the way into the FTL. Then it returns into foo1, and then
133 // foo1 wants to get optimized. It might reach this conclusion from its
134 // hot loop and attempt to OSR enter. And we'll tell it that it can't. It
135 // might be worth addressing this case, but I just think this case will
136 // be super rare. For now, if it does happen, it'll cause some compilation
137 // thrashing.
138
139 if (Options::verboseOSR())
140 dataLog(" OSR failed because the target code block is not DFG.\n");
141 return nullptr;
142 }
143
144 JITCode* jitCode = codeBlock->jitCode()->dfg();
145 OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
146
147 if (!entry) {
148 if (Options::verboseOSR())
149 dataLogF(" OSR failed because the entrypoint was optimized out.\n");
150 return nullptr;
151 }
152
153 ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
154
155 // The code below checks if it is safe to perform OSR entry. It may find
156 // that it is unsafe to do so, for any number of reasons, which are documented
157 // below. If the code decides not to OSR then it returns 0, and it's the caller's
158 // responsibility to patch up the state in such a way as to ensure that it's
159 // both safe and efficient to continue executing baseline code for now. This
160 // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
161 // or codeBlock->dontOptimizeAnytimeSoon().
162
163 // 1) Verify predictions. If the predictions are inconsistent with the actual
164 // values, then OSR entry is not possible at this time. It's tempting to
165 // assume that we could somehow avoid this case. We can certainly avoid it
166 // for first-time loop OSR - that is, OSR into a CodeBlock that we have just
167 // compiled. Then we are almost guaranteed that all of the predictions will
168 // check out. It would be pretty easy to make that a hard guarantee. But
169 // then there would still be the case where two call frames with the same
170 // baseline CodeBlock are on the stack at the same time. The top one
171 // triggers compilation and OSR. In that case, we may no longer have
172 // accurate value profiles for the one deeper in the stack. Hence, when we
173 // pop into the CodeBlock that is deeper on the stack, we might OSR and
174 // realize that the predictions are wrong. Probably, in most cases, this is
175 // just an anomaly in the sense that the older CodeBlock simply went off
176 // into a less-likely path. So, the wisest course of action is to simply not
177 // OSR at this time.
178
179 for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
180 JSValue value;
181 if (!argument)
182 value = exec->thisValue();
183 else
184 value = exec->argument(argument - 1);
185
186 if (!entry->m_expectedValues.argument(argument).validateOSREntryValue(value, FlushedJSValue)) {
187 if (Options::verboseOSR()) {
188 dataLog(
189 " OSR failed because argument ", argument, " is ", value,
190 ", expected ", entry->m_expectedValues.argument(argument), ".\n");
191 }
192 return nullptr;
193 }
194 }
195
196 for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
197 int localOffset = virtualRegisterForLocal(local).offset();
198 JSValue value = exec->registers()[localOffset].asanUnsafeJSValue();
199 FlushFormat format = FlushedJSValue;
200
201 if (entry->m_localsForcedAnyInt.get(local)) {
202 if (!value.isAnyInt()) {
203 dataLogLnIf(Options::verboseOSR(),
204 " OSR failed because variable ", localOffset, " is ",
205 value, ", expected ",
206 "machine int.");
207 return nullptr;
208 }
209 value = jsDoubleNumber(value.asAnyInt());
210 format = FlushedInt52;
211 }
212
213 if (entry->m_localsForcedDouble.get(local)) {
214 if (!value.isNumber()) {
215 dataLogLnIf(Options::verboseOSR(),
216 " OSR failed because variable ", localOffset, " is ",
217 value, ", expected number.");
218 return nullptr;
219 }
220 value = jsDoubleNumber(value.asNumber());
221 format = FlushedDouble;
222 }
223
224 if (!entry->m_expectedValues.local(local).validateOSREntryValue(value, format)) {
225 dataLogLnIf(Options::verboseOSR(),
226 " OSR failed because variable ", VirtualRegister(localOffset), " is ",
227 value, ", expected ",
228 entry->m_expectedValues.local(local), ".");
229 return nullptr;
230 }
231 }
232
233 // 2) Check the stack height. The DFG JIT may require a taller stack than the
234 // baseline JIT, in some cases. If we can't grow the stack, then don't do
235 // OSR right now. That's the only option we have unless we want basic block
236 // boundaries to start throwing RangeErrors. Although that would be possible,
237 // it seems silly: you'd be diverting the program to error handling when it
238 // would have otherwise just kept running albeit less quickly.
239
240 unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
241 if (UNLIKELY(!vm->ensureStackCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()]))) {
242 if (Options::verboseOSR())
243 dataLogF(" OSR failed because stack growth failed.\n");
244 return nullptr;
245 }
246
247 if (Options::verboseOSR())
248 dataLogF(" OSR should succeed.\n");
249
250 // At this point we're committed to entering. We will do some work to set things up,
251 // but we also rely on our caller recognizing that when we return a non-null pointer,
252 // that means that we're already past the point of no return and we must succeed at
253 // entering.
254
255 // 3) Set up the data in the scratch buffer and perform data format conversions.
256
257 unsigned frameSize = jitCode->common.frameRegisterCount;
258 unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
259 unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);
260
261 Register* scratch = bitwise_cast<Register*>(vm->scratchBufferForSize(sizeof(Register) * (2 + CallFrame::headerSizeInRegisters + maxFrameSize))->dataBuffer());
262
263 *bitwise_cast<size_t*>(scratch + 0) = frameSize;
264
265 void* targetPC = entry->m_machineCode.executableAddress();
266 RELEASE_ASSERT(codeBlock->jitCode()->contains(entry->m_machineCode.untaggedExecutableAddress()));
267 if (Options::verboseOSR())
268 dataLogF(" OSR using target PC %p.\n", targetPC);
269 RELEASE_ASSERT(targetPC);
270 *bitwise_cast<void**>(scratch + 1) = retagCodePtr(targetPC, OSREntryPtrTag, bitwise_cast<PtrTag>(exec));
271
272 Register* pivot = scratch + 2 + CallFrame::headerSizeInRegisters;
273
274 for (int index = -CallFrame::headerSizeInRegisters; index < static_cast<int>(baselineFrameSize); ++index) {
275 VirtualRegister reg(-1 - index);
276
277 if (reg.isLocal()) {
278 if (entry->m_localsForcedDouble.get(reg.toLocal())) {
279 *bitwise_cast<double*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asNumber();
280 continue;
281 }
282
283 if (entry->m_localsForcedAnyInt.get(reg.toLocal())) {
284 *bitwise_cast<int64_t*>(pivot + index) = exec->registers()[reg.offset()].asanUnsafeJSValue().asAnyInt() << JSValue::int52ShiftAmount;
285 continue;
286 }
287 }
288
289 pivot[index] = exec->registers()[reg.offset()].asanUnsafeJSValue();
290 }
291
292 // 4) Reshuffle those registers that need reshuffling.
293 Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
294 for (unsigned i = entry->m_reshufflings.size(); i--;)
295 temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].asanUnsafeJSValue();
296 for (unsigned i = entry->m_reshufflings.size(); i--;)
297 pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
298
299 // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
300 // some programs by eliminating some stale pointer pathologies.
301 for (unsigned i = frameSize; i--;) {
302 if (entry->m_machineStackUsed.get(i))
303 continue;
304 pivot[i] = JSValue();
305 }
306
307 // 6) Copy our callee saves to buffer.
308#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
309 const RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
310 RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
311 RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
312
313 unsigned registerCount = registerSaveLocations->size();
314 VMEntryRecord* record = vmEntryRecord(vm->topEntryFrame);
315 for (unsigned i = 0; i < registerCount; i++) {
316 RegisterAtOffset currentEntry = registerSaveLocations->at(i);
317 if (dontSaveRegisters.get(currentEntry.reg()))
318 continue;
319 RegisterAtOffset* calleeSavesEntry = allCalleeSaves->find(currentEntry.reg());
320
321 *(bitwise_cast<intptr_t*>(pivot - 1) - currentEntry.offsetAsIndex()) = record->calleeSaveRegistersBuffer[calleeSavesEntry->offsetAsIndex()];
322 }
323#endif
324
325 // 7) Fix the call frame to have the right code block.
326
327 *bitwise_cast<CodeBlock**>(pivot - 1 - CallFrameSlot::codeBlock) = codeBlock;
328
329 if (Options::verboseOSR())
330 dataLogF(" OSR returning data buffer %p.\n", scratch);
331 return scratch;
332}
333
334MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
335{
336 ASSERT(codeBlock->jitType() == JITType::DFGJIT || codeBlock->jitType() == JITType::FTLJIT);
337 ASSERT(codeBlock->jitCode()->dfgCommon()->isStillValid);
338
339 if (!Options::useOSREntryToDFG() && codeBlock->jitCode()->jitType() == JITType::DFGJIT)
340 return nullptr;
341 if (!Options::useOSREntryToFTL() && codeBlock->jitCode()->jitType() == JITType::FTLJIT)
342 return nullptr;
343
344 VM& vm = exec->vm();
345
346 CommonData* dfgCommon = codeBlock->jitCode()->dfgCommon();
347 RELEASE_ASSERT(dfgCommon);
348 DFG::CatchEntrypointData* catchEntrypoint = dfgCommon->catchOSREntryDataForBytecodeIndex(bytecodeIndex);
349 if (!catchEntrypoint) {
350 // This can be null under some circumstances. The most common is that we didn't
351 // compile this op_catch as an entrypoint since it had never executed when starting
352 // the compilation.
353 return nullptr;
354 }
355
356 // We're only allowed to OSR enter if we've proven we have compatible argument types.
357 for (unsigned argument = 0; argument < catchEntrypoint->argumentFormats.size(); ++argument) {
358 JSValue value = exec->uncheckedR(virtualRegisterForArgument(argument)).jsValue();
359 switch (catchEntrypoint->argumentFormats[argument]) {
360 case DFG::FlushedInt32:
361 if (!value.isInt32())
362 return nullptr;
363 break;
364 case DFG::FlushedCell:
365 if (!value.isCell())
366 return nullptr;
367 break;
368 case DFG::FlushedBoolean:
369 if (!value.isBoolean())
370 return nullptr;
371 break;
372 case DFG::DeadFlush:
373 // This means the argument is not alive. Therefore, it's allowed to be any type.
374 break;
375 case DFG::FlushedJSValue:
376 // An argument is trivially a JSValue.
377 break;
378 default:
379 RELEASE_ASSERT_NOT_REACHED();
380 }
381 }
382
383 unsigned frameSizeForCheck = dfgCommon->requiredRegisterCountForExecutionAndExit();
384 if (UNLIKELY(!vm.ensureStackCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck).offset()])))
385 return nullptr;
386
387 auto instruction = exec->codeBlock()->instructions().at(exec->bytecodeOffset());
388 ASSERT(instruction->is<OpCatch>());
389 ValueProfileAndOperandBuffer* buffer = instruction->as<OpCatch>().metadata(exec).m_buffer;
390 JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer());
391 unsigned index = 0;
392 buffer->forEach([&] (ValueProfileAndOperand& profile) {
393 if (!VirtualRegister(profile.m_operand).isLocal())
394 return;
395 dataBuffer[index] = exec->uncheckedR(profile.m_operand).jsValue();
396 ++index;
397 });
398
399 // The active length of catchOSREntryBuffer will be zeroed by ClearCatchLocals node.
400 dfgCommon->catchOSREntryBuffer->setActiveLength(sizeof(JSValue) * index);
401 return catchEntrypoint->machineCode;
402}
403
404} } // namespace JSC::DFG
405
406#endif // ENABLE(DFG_JIT)
407