1 | /* |
2 | * Copyright (C) 2011-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | #include "DFGJITCompiler.h" |
28 | |
29 | #if ENABLE(DFG_JIT) |
30 | |
31 | #include "CodeBlock.h" |
32 | #include "DFGFailedFinalizer.h" |
33 | #include "DFGInlineCacheWrapperInlines.h" |
34 | #include "DFGJITCode.h" |
35 | #include "DFGJITFinalizer.h" |
36 | #include "DFGOSRExit.h" |
37 | #include "DFGOperations.h" |
38 | #include "DFGRegisterBank.h" |
39 | #include "DFGSlowPathGenerator.h" |
40 | #include "DFGSpeculativeJIT.h" |
41 | #include "DFGThunks.h" |
42 | #include "JSCInlines.h" |
43 | #include "JSCJSValueInlines.h" |
44 | #include "LinkBuffer.h" |
45 | #include "MaxFrameExtentForSlowPathCall.h" |
46 | #include "StructureStubInfo.h" |
47 | #include "ThunkGenerators.h" |
48 | #include "VM.h" |
49 | |
50 | namespace JSC { namespace DFG { |
51 | |
52 | JITCompiler::JITCompiler(Graph& dfg) |
53 | : CCallHelpers(dfg.m_codeBlock) |
54 | , m_graph(dfg) |
55 | , m_jitCode(adoptRef(new JITCode())) |
56 | , m_blockHeads(dfg.numBlocks()) |
57 | , m_pcToCodeOriginMapBuilder(dfg.m_vm) |
58 | { |
59 | if (UNLIKELY(shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)) |
60 | m_disassembler = makeUnique<Disassembler>(dfg); |
61 | #if ENABLE(FTL_JIT) |
62 | m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy()); |
63 | for (BytecodeIndex tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes()) |
64 | m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger); |
65 | #endif |
66 | } |
67 | |
68 | JITCompiler::~JITCompiler() |
69 | { |
70 | } |
71 | |
72 | void JITCompiler::linkOSRExits() |
73 | { |
74 | ASSERT(m_jitCode->osrExit.size() == m_exitCompilationInfo.size()); |
75 | if (UNLIKELY(m_graph.compilation())) { |
76 | for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) { |
77 | OSRExitCompilationInfo& info = m_exitCompilationInfo[i]; |
78 | Vector<Label> labels; |
79 | if (!info.m_failureJumps.empty()) { |
80 | for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j) |
81 | labels.append(info.m_failureJumps.jumps()[j].label()); |
82 | } else |
83 | labels.append(info.m_replacementSource); |
84 | m_exitSiteLabels.append(labels); |
85 | } |
86 | } |
87 | |
88 | MacroAssemblerCodeRef<JITThunkPtrTag> osrExitThunk = vm().getCTIStub(osrExitThunkGenerator); |
89 | auto osrExitThunkLabel = CodeLocationLabel<JITThunkPtrTag>(osrExitThunk.code()); |
90 | for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) { |
91 | OSRExitCompilationInfo& info = m_exitCompilationInfo[i]; |
92 | JumpList& failureJumps = info.m_failureJumps; |
93 | if (!failureJumps.empty()) |
94 | failureJumps.link(this); |
95 | else |
96 | info.m_replacementDestination = label(); |
97 | |
98 | jitAssertHasValidCallFrame(); |
99 | store32(TrustedImm32(i), &vm().osrExitIndex); |
100 | if (Options::useProbeOSRExit()) { |
101 | Jump target = jump(); |
102 | addLinkTask([target, osrExitThunkLabel] (LinkBuffer& linkBuffer) { |
103 | linkBuffer.link(target, osrExitThunkLabel); |
104 | }); |
105 | } else |
106 | info.m_patchableJump = patchableJump(); |
107 | } |
108 | } |
109 | |
110 | void JITCompiler::compileEntry() |
111 | { |
112 | // This code currently matches the old JIT. In the function header we need to |
113 | // save return address and call frame via the prologue and perform a fast stack check. |
114 | // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292 |
115 | // We'll need to convert the remaining cti_ style calls (specifically the stack |
116 | // check) which will be dependent on stack layout. (We'd need to account for this in |
117 | // both normal return code and when jumping to an exception handler). |
118 | emitFunctionPrologue(); |
119 | emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); |
120 | } |
121 | |
122 | void JITCompiler::compileSetupRegistersForEntry() |
123 | { |
124 | emitSaveCalleeSaves(); |
125 | emitMaterializeTagCheckRegisters(); |
126 | } |
127 | |
128 | void JITCompiler::compileEntryExecutionFlag() |
129 | { |
130 | #if ENABLE(FTL_JIT) |
131 | if (m_graph.m_plan.canTierUpAndOSREnter()) |
132 | store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry); |
133 | #endif // ENABLE(FTL_JIT) |
134 | } |
135 | |
136 | void JITCompiler::compileBody() |
137 | { |
138 | // We generate the speculative code path, followed by OSR exit code to return |
139 | // to the old JIT code if speculations fail. |
140 | |
141 | bool compiledSpeculative = m_speculative->compile(); |
142 | ASSERT_UNUSED(compiledSpeculative, compiledSpeculative); |
143 | } |
144 | |
145 | void JITCompiler::compileExceptionHandlers() |
146 | { |
147 | if (!m_exceptionChecksWithCallFrameRollback.empty()) { |
148 | m_exceptionChecksWithCallFrameRollback.link(this); |
149 | |
150 | copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm().topEntryFrame); |
151 | |
152 | // operationLookupExceptionHandlerFromCallerFrame is passed one argument, the VM*. |
153 | move(TrustedImmPtr(&vm()), GPRInfo::argumentGPR0); |
154 | prepareCallOperation(vm()); |
155 | addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); |
156 | |
157 | m_calls.append(CallLinkRecord(call(OperationPtrTag), FunctionPtr<OperationPtrTag>(operationLookupExceptionHandlerFromCallerFrame))); |
158 | |
159 | jumpToExceptionHandler(vm()); |
160 | } |
161 | |
162 | if (!m_exceptionChecks.empty()) { |
163 | m_exceptionChecks.link(this); |
164 | |
165 | copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm().topEntryFrame); |
166 | |
167 | // operationLookupExceptionHandler is passed one argument, the VM*. |
168 | move(TrustedImmPtr(&vm()), GPRInfo::argumentGPR0); |
169 | prepareCallOperation(vm()); |
170 | |
171 | m_calls.append(CallLinkRecord(call(OperationPtrTag), FunctionPtr<OperationPtrTag>(operationLookupExceptionHandler))); |
172 | |
173 | jumpToExceptionHandler(vm()); |
174 | } |
175 | } |
176 | |
177 | void JITCompiler::link(LinkBuffer& linkBuffer) |
178 | { |
179 | // Link the code, populate data in CodeBlock data structures. |
180 | m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount(); |
181 | m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit(); |
182 | |
183 | if (!m_graph.m_plan.inlineCallFrames()->isEmpty()) |
184 | m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames(); |
185 | |
186 | #if USE(JSVALUE32_64) |
187 | m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants); |
188 | #endif |
189 | |
190 | m_graph.registerFrozenValues(); |
191 | |
192 | BitVector usedJumpTables; |
193 | for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) { |
194 | SwitchData& data = **iter; |
195 | if (!data.didUseJumpTable) |
196 | continue; |
197 | |
198 | if (data.kind == SwitchString) |
199 | continue; |
200 | |
201 | RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar); |
202 | |
203 | usedJumpTables.set(data.switchTableIndex); |
204 | SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); |
205 | table.ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]); |
206 | table.ctiOffsets.grow(table.branchOffsets.size()); |
207 | for (unsigned j = table.ctiOffsets.size(); j--;) |
208 | table.ctiOffsets[j] = table.ctiDefault; |
209 | for (unsigned j = data.cases.size(); j--;) { |
210 | SwitchCase& myCase = data.cases[j]; |
211 | table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] = |
212 | linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[myCase.target.block->index]); |
213 | } |
214 | } |
215 | |
216 | for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) { |
217 | if (usedJumpTables.get(i)) |
218 | continue; |
219 | |
220 | m_codeBlock->switchJumpTable(i).clear(); |
221 | } |
222 | |
223 | // NOTE: we cannot clear string switch tables because (1) we're running concurrently |
224 | // and we cannot deref StringImpl's and (2) it would be weird to deref those |
225 | // StringImpl's since we refer to them. |
226 | for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) { |
227 | SwitchData& data = **switchDataIter; |
228 | if (!data.didUseJumpTable) |
229 | continue; |
230 | |
231 | if (data.kind != SwitchString) |
232 | continue; |
233 | |
234 | StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); |
235 | |
236 | table.ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]); |
237 | StringJumpTable::StringOffsetTable::iterator iter; |
238 | StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); |
239 | for (iter = table.offsetTable.begin(); iter != end; ++iter) |
240 | iter->value.ctiOffset = table.ctiDefault; |
241 | for (unsigned j = data.cases.size(); j--;) { |
242 | SwitchCase& myCase = data.cases[j]; |
243 | iter = table.offsetTable.find(myCase.value.stringImpl()); |
244 | RELEASE_ASSERT(iter != end); |
245 | iter->value.ctiOffset = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[myCase.target.block->index]); |
246 | } |
247 | } |
248 | |
249 | // Link all calls out from the JIT code to their respective functions. |
250 | for (unsigned i = 0; i < m_calls.size(); ++i) |
251 | linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); |
252 | |
253 | finalizeInlineCaches(m_getByIds, linkBuffer); |
254 | finalizeInlineCaches(m_getByIdsWithThis, linkBuffer); |
255 | finalizeInlineCaches(m_getByVals, linkBuffer); |
256 | finalizeInlineCaches(m_putByIds, linkBuffer); |
257 | finalizeInlineCaches(m_inByIds, linkBuffer); |
258 | finalizeInlineCaches(m_instanceOfs, linkBuffer); |
259 | |
260 | auto linkCallThunk = FunctionPtr<NoPtrTag>(vm().getCTIStub(linkCallThunkGenerator).retaggedCode<NoPtrTag>()); |
261 | for (auto& record : m_jsCalls) { |
262 | CallLinkInfo& info = *record.info; |
263 | linkBuffer.link(record.slowCall, linkCallThunk); |
264 | info.setCallLocations( |
265 | CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOfNearCall<JSInternalPtrTag>(record.slowCall)), |
266 | CodeLocationLabel<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(record.targetToCheck)), |
267 | linkBuffer.locationOfNearCall<JSInternalPtrTag>(record.fastCall)); |
268 | } |
269 | |
270 | for (JSDirectCallRecord& record : m_jsDirectCalls) { |
271 | CallLinkInfo& info = *record.info; |
272 | linkBuffer.link(record.call, linkBuffer.locationOf<NoPtrTag>(record.slowPath)); |
273 | info.setCallLocations( |
274 | CodeLocationLabel<JSInternalPtrTag>(), |
275 | linkBuffer.locationOf<JSInternalPtrTag>(record.slowPath), |
276 | linkBuffer.locationOfNearCall<JSInternalPtrTag>(record.call)); |
277 | } |
278 | |
279 | for (JSDirectTailCallRecord& record : m_jsDirectTailCalls) { |
280 | CallLinkInfo& info = *record.info; |
281 | info.setCallLocations( |
282 | linkBuffer.locationOf<JSInternalPtrTag>(record.patchableJump), |
283 | linkBuffer.locationOf<JSInternalPtrTag>(record.slowPath), |
284 | linkBuffer.locationOfNearCall<JSInternalPtrTag>(record.call)); |
285 | } |
286 | |
287 | MacroAssemblerCodeRef<JITThunkPtrTag> osrExitThunk = vm().getCTIStub(osrExitGenerationThunkGenerator); |
288 | auto target = CodeLocationLabel<JITThunkPtrTag>(osrExitThunk.code()); |
289 | for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) { |
290 | OSRExitCompilationInfo& info = m_exitCompilationInfo[i]; |
291 | if (!Options::useProbeOSRExit()) { |
292 | linkBuffer.link(info.m_patchableJump.m_jump, target); |
293 | OSRExit& exit = m_jitCode->osrExit[i]; |
294 | exit.m_patchableJumpLocation = linkBuffer.locationOf<JSInternalPtrTag>(info.m_patchableJump); |
295 | } |
296 | if (info.m_replacementSource.isSet()) { |
297 | m_jitCode->common.jumpReplacements.append(JumpReplacement( |
298 | linkBuffer.locationOf<JSInternalPtrTag>(info.m_replacementSource), |
299 | linkBuffer.locationOf<OSRExitPtrTag>(info.m_replacementDestination))); |
300 | } |
301 | } |
302 | |
303 | if (UNLIKELY(m_graph.compilation())) { |
304 | ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size()); |
305 | for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) { |
306 | Vector<Label>& labels = m_exitSiteLabels[i]; |
307 | Vector<MacroAssemblerCodePtr<JSInternalPtrTag>> addresses; |
308 | for (unsigned j = 0; j < labels.size(); ++j) |
309 | addresses.append(linkBuffer.locationOf<JSInternalPtrTag>(labels[j])); |
310 | m_graph.compilation()->addOSRExitSite(addresses); |
311 | } |
312 | } else |
313 | ASSERT(!m_exitSiteLabels.size()); |
314 | |
315 | m_jitCode->common.compilation = m_graph.compilation(); |
316 | |
317 | // Link new DFG exception handlers and remove baseline JIT handlers. |
318 | m_codeBlock->clearExceptionHandlers(); |
319 | for (unsigned i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) { |
320 | OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo; |
321 | if (info.m_replacementDestination.isSet()) { |
322 | // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow. |
323 | // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame. |
324 | // If this *is set*, it means we will be landing at this code location from genericUnwind from an |
325 | // exception thrown in a child call frame. |
326 | CodeLocationLabel<ExceptionHandlerPtrTag> catchLabel = linkBuffer.locationOf<ExceptionHandlerPtrTag>(info.m_replacementDestination); |
327 | HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler; |
328 | CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex; |
329 | newExceptionHandler.start = callSite.bits(); |
330 | newExceptionHandler.end = callSite.bits() + 1; |
331 | newExceptionHandler.nativeCode = catchLabel; |
332 | m_codeBlock->appendExceptionHandler(newExceptionHandler); |
333 | } |
334 | } |
335 | |
336 | if (m_pcToCodeOriginMapBuilder.didBuildMapping()) |
337 | m_codeBlock->setPCToCodeOriginMap(makeUnique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer)); |
338 | } |
339 | |
340 | static void emitStackOverflowCheck(JITCompiler& jit, MacroAssembler::JumpList& stackOverflow) |
341 | { |
342 | int frameTopOffset = virtualRegisterForLocal(jit.graph().requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register); |
343 | unsigned maxFrameSize = -frameTopOffset; |
344 | |
345 | jit.addPtr(MacroAssembler::TrustedImm32(frameTopOffset), GPRInfo::callFrameRegister, GPRInfo::regT1); |
346 | if (UNLIKELY(maxFrameSize > Options::reservedZoneSize())) |
347 | stackOverflow.append(jit.branchPtr(MacroAssembler::Above, GPRInfo::regT1, GPRInfo::callFrameRegister)); |
348 | stackOverflow.append(jit.branchPtr(MacroAssembler::Above, MacroAssembler::AbsoluteAddress(jit.vm().addressOfSoftStackLimit()), GPRInfo::regT1)); |
349 | } |
350 | |
351 | void JITCompiler::compile() |
352 | { |
353 | makeCatchOSREntryBuffer(); |
354 | |
355 | setStartOfCode(); |
356 | compileEntry(); |
357 | m_speculative = makeUnique<SpeculativeJIT>(*this); |
358 | |
359 | // Plant a check that sufficient space is available in the JSStack. |
360 | JumpList stackOverflow; |
361 | emitStackOverflowCheck(*this, stackOverflow); |
362 | |
363 | addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister); |
364 | if (Options::zeroStackFrame()) |
365 | clearStackFrame(GPRInfo::callFrameRegister, stackPointerRegister, GPRInfo::regT0, m_graph.frameRegisterCount() * sizeof(Register)); |
366 | checkStackPointerAlignment(); |
367 | compileSetupRegistersForEntry(); |
368 | compileEntryExecutionFlag(); |
369 | compileBody(); |
370 | setEndOfMainPath(); |
371 | |
372 | // === Footer code generation === |
373 | // |
374 | // Generate the stack overflow handling; if the stack check in the entry head fails, |
375 | // we need to call out to a helper function to throw the StackOverflowError. |
376 | stackOverflow.link(this); |
377 | |
378 | emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0))); |
379 | |
380 | if (maxFrameExtentForSlowPathCall) |
381 | addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); |
382 | |
383 | m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); |
384 | |
385 | // Generate slow path code. |
386 | m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); |
387 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); |
388 | |
389 | compileExceptionHandlers(); |
390 | linkOSRExits(); |
391 | |
392 | // Create OSR entry trampolines if necessary. |
393 | m_speculative->createOSREntries(); |
394 | setEndOfCode(); |
395 | |
396 | auto linkBuffer = makeUnique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail); |
397 | if (linkBuffer->didFailToAllocate()) { |
398 | m_graph.m_plan.setFinalizer(makeUnique<FailedFinalizer>(m_graph.m_plan)); |
399 | return; |
400 | } |
401 | |
402 | link(*linkBuffer); |
403 | m_speculative->linkOSREntries(*linkBuffer); |
404 | |
405 | m_jitCode->shrinkToFit(); |
406 | codeBlock()->shrinkToFit(CodeBlock::LateShrink); |
407 | |
408 | disassemble(*linkBuffer); |
409 | |
410 | m_graph.m_plan.setFinalizer(makeUnique<JITFinalizer>( |
411 | m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer))); |
412 | } |
413 | |
414 | void JITCompiler::compileFunction() |
415 | { |
416 | makeCatchOSREntryBuffer(); |
417 | |
418 | setStartOfCode(); |
419 | Label entryLabel(this); |
420 | compileEntry(); |
421 | |
422 | // === Function header code generation === |
423 | // This is the main entry point, without performing an arity check. |
424 | // If we needed to perform an arity check we will already have moved the return address, |
425 | // so enter after this. |
426 | Label fromArityCheck(this); |
427 | // Plant a check that sufficient space is available in the JSStack. |
428 | JumpList stackOverflow; |
429 | emitStackOverflowCheck(*this, stackOverflow); |
430 | |
431 | // Move the stack pointer down to accommodate locals |
432 | addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister); |
433 | if (Options::zeroStackFrame()) |
434 | clearStackFrame(GPRInfo::callFrameRegister, stackPointerRegister, GPRInfo::regT0, m_graph.frameRegisterCount() * sizeof(Register)); |
435 | checkStackPointerAlignment(); |
436 | |
437 | compileSetupRegistersForEntry(); |
438 | compileEntryExecutionFlag(); |
439 | |
440 | // === Function body code generation === |
441 | m_speculative = makeUnique<SpeculativeJIT>(*this); |
442 | compileBody(); |
443 | setEndOfMainPath(); |
444 | |
445 | // === Function footer code generation === |
446 | // |
447 | // Generate code to perform the stack overflow handling (if the stack check in |
448 | // the function header fails), and generate the entry point with arity check. |
449 | // |
450 | // Generate the stack overflow handling; if the stack check in the function head fails, |
451 | // we need to call out to a helper function to throw the StackOverflowError. |
452 | stackOverflow.link(this); |
453 | |
454 | emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0))); |
455 | |
456 | if (maxFrameExtentForSlowPathCall) |
457 | addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); |
458 | |
459 | m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); |
460 | |
461 | // The fast entry point into a function does not check the correct number of arguments |
462 | // have been passed to the call (we only use the fast entry point where we can statically |
463 | // determine the correct number of arguments have been passed, or have already checked). |
464 | // In cases where an arity check is necessary, we enter here. |
465 | // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). |
466 | Call callArityFixup; |
467 | Label arityCheck; |
468 | bool requiresArityFixup = m_codeBlock->numParameters() != 1; |
469 | if (requiresArityFixup) { |
470 | arityCheck = label(); |
471 | compileEntry(); |
472 | |
473 | load32(AssemblyHelpers::payloadFor((VirtualRegister)CallFrameSlot::argumentCount), GPRInfo::regT1); |
474 | branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); |
475 | emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0))); |
476 | if (maxFrameExtentForSlowPathCall) |
477 | addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); |
478 | m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0, m_codeBlock->globalObject()); |
479 | if (maxFrameExtentForSlowPathCall) |
480 | addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); |
481 | branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this); |
482 | emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0))); |
483 | move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0); |
484 | callArityFixup = nearCall(); |
485 | jump(fromArityCheck); |
486 | } else |
487 | arityCheck = entryLabel; |
488 | |
489 | // Generate slow path code. |
490 | m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); |
491 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); |
492 | |
493 | compileExceptionHandlers(); |
494 | linkOSRExits(); |
495 | |
496 | // Create OSR entry trampolines if necessary. |
497 | m_speculative->createOSREntries(); |
498 | setEndOfCode(); |
499 | |
500 | // === Link === |
501 | auto linkBuffer = makeUnique<LinkBuffer>(*this, m_codeBlock, JITCompilationCanFail); |
502 | if (linkBuffer->didFailToAllocate()) { |
503 | m_graph.m_plan.setFinalizer(makeUnique<FailedFinalizer>(m_graph.m_plan)); |
504 | return; |
505 | } |
506 | link(*linkBuffer); |
507 | m_speculative->linkOSREntries(*linkBuffer); |
508 | |
509 | m_jitCode->shrinkToFit(); |
510 | codeBlock()->shrinkToFit(CodeBlock::LateShrink); |
511 | |
512 | if (requiresArityFixup) |
513 | linkBuffer->link(callArityFixup, FunctionPtr<JITThunkPtrTag>(vm().getCTIStub(arityFixupGenerator).code())); |
514 | |
515 | disassemble(*linkBuffer); |
516 | |
517 | MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = linkBuffer->locationOf<JSEntryPtrTag>(arityCheck); |
518 | |
519 | m_graph.m_plan.setFinalizer(makeUnique<JITFinalizer>( |
520 | m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck)); |
521 | } |
522 | |
523 | void JITCompiler::disassemble(LinkBuffer& linkBuffer) |
524 | { |
525 | if (shouldDumpDisassembly()) { |
526 | m_disassembler->dump(linkBuffer); |
527 | linkBuffer.didAlreadyDisassemble(); |
528 | } |
529 | |
530 | if (UNLIKELY(m_graph.m_plan.compilation())) |
531 | m_disassembler->reportToProfiler(m_graph.m_plan.compilation(), linkBuffer); |
532 | } |
533 | |
534 | #if USE(JSVALUE32_64) |
535 | void* JITCompiler::addressOfDoubleConstant(Node* node) |
536 | { |
537 | double value = node->asNumber(); |
538 | int64_t valueBits = bitwise_cast<int64_t>(value); |
539 | auto it = m_graph.m_doubleConstantsMap.find(valueBits); |
540 | if (it != m_graph.m_doubleConstantsMap.end()) |
541 | return it->second; |
542 | |
543 | if (!m_graph.m_doubleConstants) |
544 | m_graph.m_doubleConstants = makeUnique<Bag<double>>(); |
545 | |
546 | double* addressInConstantPool = m_graph.m_doubleConstants->add(); |
547 | *addressInConstantPool = value; |
548 | m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool; |
549 | return addressInConstantPool; |
550 | } |
551 | #endif |
552 | |
553 | void JITCompiler::noticeCatchEntrypoint(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer, Vector<FlushFormat>&& argumentFormats) |
554 | { |
555 | RELEASE_ASSERT(basicBlock.isCatchEntrypoint); |
556 | RELEASE_ASSERT(basicBlock.intersectionOfCFAHasVisited); // An entrypoint is reachable by definition. |
557 | m_jitCode->common.appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.locationOf<ExceptionHandlerPtrTag>(blockHead), WTFMove(argumentFormats)); |
558 | } |
559 | |
560 | void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer) |
561 | { |
562 | RELEASE_ASSERT(!basicBlock.isCatchEntrypoint); |
563 | |
564 | // OSR entry is not allowed into blocks deemed unreachable by control flow analysis. |
565 | if (!basicBlock.intersectionOfCFAHasVisited) |
566 | return; |
567 | |
568 | OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.locationOf<OSREntryPtrTag>(blockHead)); |
569 | |
570 | entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead; |
571 | |
572 | // Fix the expected values: in our protocol, a dead variable will have an expected |
573 | // value of (None, []). But the old JIT may stash some values there. So we really |
574 | // need (Top, TOP). |
575 | for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) { |
576 | Node* node = basicBlock.variablesAtHead.argument(argument); |
577 | if (!node || !node->shouldGenerate()) |
578 | entry->m_expectedValues.argument(argument).makeBytecodeTop(); |
579 | } |
580 | for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) { |
581 | Node* node = basicBlock.variablesAtHead.local(local); |
582 | if (!node || !node->shouldGenerate()) |
583 | entry->m_expectedValues.local(local).makeBytecodeTop(); |
584 | else { |
585 | VariableAccessData* variable = node->variableAccessData(); |
586 | entry->m_machineStackUsed.set(variable->machineLocal().toLocal()); |
587 | |
588 | switch (variable->flushFormat()) { |
589 | case FlushedDouble: |
590 | entry->m_localsForcedDouble.set(local); |
591 | break; |
592 | case FlushedInt52: |
593 | entry->m_localsForcedAnyInt.set(local); |
594 | break; |
595 | default: |
596 | break; |
597 | } |
598 | |
599 | if (variable->local() != variable->machineLocal()) { |
600 | entry->m_reshufflings.append( |
601 | OSREntryReshuffling( |
602 | variable->local().offset(), variable->machineLocal().offset())); |
603 | } |
604 | } |
605 | } |
606 | |
607 | entry->m_reshufflings.shrinkToFit(); |
608 | } |
609 | |
610 | void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail) |
611 | { |
612 | OSRExit exit(kind, JSValueRegs(), MethodOfGettingAValueProfile(), m_speculative.get(), eventStreamIndex); |
613 | exit.m_codeOrigin = opCatchOrigin; |
614 | exit.m_exceptionHandlerCallSiteIndex = callSite; |
615 | OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail); |
616 | jitCode()->appendOSRExit(exit); |
617 | m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite }); |
618 | } |
619 | |
620 | void JITCompiler::exceptionCheck() |
621 | { |
622 | // It's important that we use origin.forExit here. Consider if we hoist string |
623 | // addition outside a loop, and that we exit at the point of that concatenation |
624 | // from an out of memory exception. |
625 | // If the original loop had a try/catch around string concatenation, if we "catch" |
626 | // that exception inside the loop, then the loops induction variable will be undefined |
627 | // in the OSR exit value recovery. It's more defensible for the string concatenation, |
628 | // then, to not be caught by the for loops' try/catch. |
629 | // Here is the program I'm speaking about: |
630 | // |
631 | // >>>> lets presume "c = a + b" gets hoisted here. |
632 | // for (var i = 0; i < length; i++) { |
633 | // try { |
634 | // c = a + b |
635 | // } catch(e) { |
636 | // If we threw an out of memory error, and we cought the exception |
637 | // right here, then "i" would almost certainly be undefined, which |
638 | // would make no sense. |
639 | // ... |
640 | // } |
641 | // } |
642 | CodeOrigin opCatchOrigin; |
643 | HandlerInfo* exceptionHandler; |
644 | bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler); |
645 | if (willCatchException) { |
646 | unsigned streamIndex = m_speculative->m_outOfLineStreamIndex ? *m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size(); |
647 | MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck(vm()); |
648 | // We assume here that this is called after callOpeartion()/appendCall() is called. |
649 | appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException); |
650 | } else |
651 | m_exceptionChecks.append(emitExceptionCheck(vm())); |
652 | } |
653 | |
654 | CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex) |
655 | { |
656 | CodeOrigin opCatchOrigin; |
657 | HandlerInfo* exceptionHandler; |
658 | bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler); |
659 | CallSiteIndex callSite = addCallSite(callSiteCodeOrigin); |
660 | if (willCatchException) |
661 | appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite); |
662 | return callSite; |
663 | } |
664 | |
665 | void JITCompiler::setEndOfMainPath() |
666 | { |
667 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic); |
668 | if (LIKELY(!m_disassembler)) |
669 | return; |
670 | m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints()); |
671 | } |
672 | |
673 | void JITCompiler::setEndOfCode() |
674 | { |
675 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); |
676 | if (LIKELY(!m_disassembler)) |
677 | return; |
678 | m_disassembler->setEndOfCode(labelIgnoringWatchpoints()); |
679 | } |
680 | |
681 | void JITCompiler::makeCatchOSREntryBuffer() |
682 | { |
683 | if (m_graph.m_maxLocalsForCatchOSREntry) { |
684 | uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer. |
685 | m_jitCode->common.catchOSREntryBuffer = vm().scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals); |
686 | } |
687 | } |
688 | |
689 | } } // namespace JSC::DFG |
690 | |
691 | #endif // ENABLE(DFG_JIT) |
692 | |