1 | /* |
2 | * Copyright (C) 2011-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #if ENABLE(DFG_JIT) |
29 | |
30 | #include "CCallHelpers.h" |
31 | #include "CodeBlock.h" |
32 | #include "DFGDisassembler.h" |
33 | #include "DFGGraph.h" |
34 | #include "DFGInlineCacheWrapper.h" |
35 | #include "DFGJITCode.h" |
36 | #include "DFGOSRExitCompilationInfo.h" |
37 | #include "GPRInfo.h" |
38 | #include "HandlerInfo.h" |
39 | #include "JITCode.h" |
40 | #include "JITInlineCacheGenerator.h" |
41 | #include "LinkBuffer.h" |
42 | #include "MacroAssembler.h" |
43 | #include "PCToCodeOriginMap.h" |
44 | |
45 | namespace JSC { |
46 | |
47 | class AbstractSamplingCounter; |
48 | class CodeBlock; |
49 | class VM; |
50 | |
51 | namespace DFG { |
52 | |
53 | class JITCodeGenerator; |
54 | class NodeToRegisterMap; |
55 | class OSRExitJumpPlaceholder; |
56 | class SlowPathGenerator; |
57 | class SpeculativeJIT; |
58 | class SpeculationRecovery; |
59 | |
60 | struct EntryLocation; |
61 | struct OSRExit; |
62 | |
63 | // === CallLinkRecord === |
64 | // |
65 | // A record of a call out from JIT code that needs linking to a helper function. |
66 | // Every CallLinkRecord contains a reference to the call instruction & the function |
67 | // that it needs to be linked to. |
68 | struct CallLinkRecord { |
69 | CallLinkRecord(MacroAssembler::Call call, FunctionPtr<OperationPtrTag> function) |
70 | : m_call(call) |
71 | , m_function(function) |
72 | { |
73 | } |
74 | |
75 | MacroAssembler::Call m_call; |
76 | FunctionPtr<OperationPtrTag> m_function; |
77 | }; |
78 | |
79 | // === JITCompiler === |
80 | // |
81 | // DFG::JITCompiler is responsible for generating JIT code from the dataflow graph. |
82 | // It does so by delegating to the speculative & non-speculative JITs, which |
83 | // generate to a MacroAssembler (which the JITCompiler owns through an inheritance |
84 | // relationship). The JITCompiler holds references to information required during |
85 | // compilation, and also records information used in linking (e.g. a list of all |
86 | // call to be linked). |
87 | class JITCompiler : public CCallHelpers { |
88 | public: |
89 | JITCompiler(Graph& dfg); |
90 | ~JITCompiler(); |
91 | |
92 | void compile(); |
93 | void compileFunction(); |
94 | |
95 | // Accessors for properties. |
96 | Graph& graph() { return m_graph; } |
97 | |
98 | // Methods to set labels for the disassembler. |
99 | void setStartOfCode() |
100 | { |
101 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), CodeOrigin(BytecodeIndex(0))); |
102 | if (LIKELY(!m_disassembler)) |
103 | return; |
104 | m_disassembler->setStartOfCode(labelIgnoringWatchpoints()); |
105 | } |
106 | |
107 | void setForBlockIndex(BlockIndex blockIndex) |
108 | { |
109 | if (LIKELY(!m_disassembler)) |
110 | return; |
111 | m_disassembler->setForBlockIndex(blockIndex, labelIgnoringWatchpoints()); |
112 | } |
113 | |
114 | void setForNode(Node* node) |
115 | { |
116 | if (LIKELY(!m_disassembler)) |
117 | return; |
118 | m_disassembler->setForNode(node, labelIgnoringWatchpoints()); |
119 | } |
120 | |
121 | void setEndOfMainPath(); |
122 | void setEndOfCode(); |
123 | |
124 | CallSiteIndex addCallSite(CodeOrigin codeOrigin) |
125 | { |
126 | return m_jitCode->common.addCodeOrigin(codeOrigin); |
127 | } |
128 | |
129 | CallSiteIndex emitStoreCodeOrigin(CodeOrigin codeOrigin) |
130 | { |
131 | CallSiteIndex callSite = addCallSite(codeOrigin); |
132 | emitStoreCallSiteIndex(callSite); |
133 | return callSite; |
134 | } |
135 | |
136 | void emitStoreCallSiteIndex(CallSiteIndex callSite) |
137 | { |
138 | store32(TrustedImm32(callSite.bits()), tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount))); |
139 | } |
140 | |
141 | // Add a call out from JIT code, without an exception check. |
142 | Call appendCall(const FunctionPtr<CFunctionPtrTag> function) |
143 | { |
144 | Call functionCall = call(OperationPtrTag); |
145 | m_calls.append(CallLinkRecord(functionCall, function.retagged<OperationPtrTag>())); |
146 | return functionCall; |
147 | } |
148 | |
149 | void exceptionCheck(); |
150 | |
151 | void exceptionCheckWithCallFrameRollback() |
152 | { |
153 | m_exceptionChecksWithCallFrameRollback.append(emitExceptionCheck(vm())); |
154 | } |
155 | |
156 | // Add a call out from JIT code, with a fast exception check that tests if the return value is zero. |
157 | void fastExceptionCheck() |
158 | { |
159 | callExceptionFuzz(vm()); |
160 | m_exceptionChecks.append(branchTestPtr(Zero, GPRInfo::returnValueGPR)); |
161 | } |
162 | |
163 | OSRExitCompilationInfo& appendExitInfo(MacroAssembler::JumpList jumpsToFail = MacroAssembler::JumpList()) |
164 | { |
165 | OSRExitCompilationInfo info; |
166 | info.m_failureJumps = jumpsToFail; |
167 | m_exitCompilationInfo.append(info); |
168 | return m_exitCompilationInfo.last(); |
169 | } |
170 | |
171 | #if USE(JSVALUE32_64) |
172 | void* addressOfDoubleConstant(Node*); |
173 | #endif |
174 | |
175 | void addGetById(const JITGetByIdGenerator& gen, SlowPathGenerator* slowPath) |
176 | { |
177 | m_getByIds.append(InlineCacheWrapper<JITGetByIdGenerator>(gen, slowPath)); |
178 | } |
179 | |
180 | void addGetByIdWithThis(const JITGetByIdWithThisGenerator& gen, SlowPathGenerator* slowPath) |
181 | { |
182 | m_getByIdsWithThis.append(InlineCacheWrapper<JITGetByIdWithThisGenerator>(gen, slowPath)); |
183 | } |
184 | |
185 | void addGetByVal(const JITGetByValGenerator& gen, SlowPathGenerator* slowPath) |
186 | { |
187 | m_getByVals.append(InlineCacheWrapper<JITGetByValGenerator>(gen, slowPath)); |
188 | } |
189 | |
190 | void addPutById(const JITPutByIdGenerator& gen, SlowPathGenerator* slowPath) |
191 | { |
192 | m_putByIds.append(InlineCacheWrapper<JITPutByIdGenerator>(gen, slowPath)); |
193 | } |
194 | |
195 | void addInstanceOf(const JITInstanceOfGenerator& gen, SlowPathGenerator* slowPath) |
196 | { |
197 | m_instanceOfs.append(InlineCacheWrapper<JITInstanceOfGenerator>(gen, slowPath)); |
198 | } |
199 | |
200 | void addInById(const JITInByIdGenerator& gen, SlowPathGenerator* slowPath) |
201 | { |
202 | m_inByIds.append(InlineCacheWrapper<JITInByIdGenerator>(gen, slowPath)); |
203 | } |
204 | |
205 | void addJSCall(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo* info) |
206 | { |
207 | m_jsCalls.append(JSCallRecord(fastCall, slowCall, targetToCheck, info)); |
208 | } |
209 | |
210 | void addJSDirectCall(Call call, Label slowPath, CallLinkInfo* info) |
211 | { |
212 | m_jsDirectCalls.append(JSDirectCallRecord(call, slowPath, info)); |
213 | } |
214 | |
215 | void addJSDirectTailCall(PatchableJump patchableJump, Call call, Label slowPath, CallLinkInfo* info) |
216 | { |
217 | m_jsDirectTailCalls.append(JSDirectTailCallRecord(patchableJump, call, slowPath, info)); |
218 | } |
219 | |
220 | void addWeakReference(JSCell* target) |
221 | { |
222 | m_graph.m_plan.weakReferences().addLazily(target); |
223 | } |
224 | |
225 | void addWeakReferences(const StructureSet& structureSet) |
226 | { |
227 | for (unsigned i = structureSet.size(); i--;) |
228 | addWeakReference(structureSet[i]); |
229 | } |
230 | |
231 | template<typename T> |
232 | Jump branchWeakPtr(RelationalCondition cond, T left, JSCell* weakPtr) |
233 | { |
234 | Jump result = branchPtr(cond, left, TrustedImmPtr(weakPtr)); |
235 | addWeakReference(weakPtr); |
236 | return result; |
237 | } |
238 | |
239 | template<typename T> |
240 | Jump branchWeakStructure(RelationalCondition cond, T left, RegisteredStructure weakStructure) |
241 | { |
242 | Structure* structure = weakStructure.get(); |
243 | #if USE(JSVALUE64) |
244 | Jump result = branch32(cond, left, TrustedImm32(structure->id())); |
245 | return result; |
246 | #else |
247 | return branchPtr(cond, left, TrustedImmPtr(structure)); |
248 | #endif |
249 | } |
250 | |
251 | void noticeOSREntry(BasicBlock&, JITCompiler::Label blockHead, LinkBuffer&); |
252 | void noticeCatchEntrypoint(BasicBlock&, JITCompiler::Label blockHead, LinkBuffer&, Vector<FlushFormat>&& argumentFormats); |
253 | |
254 | RefPtr<JITCode> jitCode() { return m_jitCode; } |
255 | |
256 | Vector<Label>& blockHeads() { return m_blockHeads; } |
257 | |
258 | CallSiteIndex recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin&, unsigned eventStreamIndex); |
259 | |
260 | PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder() { return m_pcToCodeOriginMapBuilder; } |
261 | |
262 | VM& vm() { return m_graph.m_vm; } |
263 | |
264 | private: |
265 | friend class OSRExitJumpPlaceholder; |
266 | |
267 | // Internal implementation to compile. |
268 | void compileEntry(); |
269 | void compileSetupRegistersForEntry(); |
270 | void compileEntryExecutionFlag(); |
271 | void compileBody(); |
272 | void link(LinkBuffer&); |
273 | |
274 | void exitSpeculativeWithOSR(const OSRExit&, SpeculationRecovery*); |
275 | void compileExceptionHandlers(); |
276 | void linkOSRExits(); |
277 | void disassemble(LinkBuffer&); |
278 | |
279 | void appendExceptionHandlingOSRExit(ExitKind, unsigned eventStreamIndex, CodeOrigin, HandlerInfo* exceptionHandler, CallSiteIndex, MacroAssembler::JumpList jumpsToFail = MacroAssembler::JumpList()); |
280 | |
281 | void makeCatchOSREntryBuffer(); |
282 | |
283 | // The dataflow graph currently being generated. |
284 | Graph& m_graph; |
285 | |
286 | std::unique_ptr<Disassembler> m_disassembler; |
287 | |
288 | RefPtr<JITCode> m_jitCode; |
289 | |
290 | // Vector of calls out from JIT code, including exception handler information. |
291 | // Count of the number of CallRecords with exception handlers. |
292 | Vector<CallLinkRecord> m_calls; |
293 | JumpList m_exceptionChecks; |
294 | JumpList m_exceptionChecksWithCallFrameRollback; |
295 | |
296 | Vector<Label> m_blockHeads; |
297 | |
298 | |
299 | struct JSCallRecord { |
300 | JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, CallLinkInfo* info) |
301 | : fastCall(fastCall) |
302 | , slowCall(slowCall) |
303 | , targetToCheck(targetToCheck) |
304 | , info(info) |
305 | { |
306 | ASSERT(fastCall.isFlagSet(Call::Near)); |
307 | ASSERT(slowCall.isFlagSet(Call::Near)); |
308 | } |
309 | |
310 | Call fastCall; |
311 | Call slowCall; |
312 | DataLabelPtr targetToCheck; |
313 | CallLinkInfo* info; |
314 | }; |
315 | |
316 | struct JSDirectCallRecord { |
317 | JSDirectCallRecord(Call call, Label slowPath, CallLinkInfo* info) |
318 | : call(call) |
319 | , slowPath(slowPath) |
320 | , info(info) |
321 | { |
322 | ASSERT(call.isFlagSet(Call::Near)); |
323 | } |
324 | |
325 | Call call; |
326 | Label slowPath; |
327 | CallLinkInfo* info; |
328 | }; |
329 | |
330 | struct JSDirectTailCallRecord { |
331 | JSDirectTailCallRecord(PatchableJump patchableJump, Call call, Label slowPath, CallLinkInfo* info) |
332 | : patchableJump(patchableJump) |
333 | , call(call) |
334 | , slowPath(slowPath) |
335 | , info(info) |
336 | { |
337 | ASSERT(call.isFlagSet(Call::Near) && call.isFlagSet(Call::Tail)); |
338 | } |
339 | |
340 | PatchableJump patchableJump; |
341 | Call call; |
342 | Label slowPath; |
343 | CallLinkInfo* info; |
344 | }; |
345 | |
346 | |
347 | Vector<InlineCacheWrapper<JITGetByIdGenerator>, 4> m_getByIds; |
348 | Vector<InlineCacheWrapper<JITGetByIdWithThisGenerator>, 4> m_getByIdsWithThis; |
349 | Vector<InlineCacheWrapper<JITGetByValGenerator>, 4> m_getByVals; |
350 | Vector<InlineCacheWrapper<JITPutByIdGenerator>, 4> m_putByIds; |
351 | Vector<InlineCacheWrapper<JITInByIdGenerator>, 4> m_inByIds; |
352 | Vector<InlineCacheWrapper<JITInstanceOfGenerator>, 4> m_instanceOfs; |
353 | Vector<JSCallRecord, 4> m_jsCalls; |
354 | Vector<JSDirectCallRecord, 4> m_jsDirectCalls; |
355 | Vector<JSDirectTailCallRecord, 4> m_jsDirectTailCalls; |
356 | SegmentedVector<OSRExitCompilationInfo, 4> m_exitCompilationInfo; |
357 | Vector<Vector<Label>> m_exitSiteLabels; |
358 | |
359 | struct ExceptionHandlingOSRExitInfo { |
360 | OSRExitCompilationInfo& exitInfo; |
361 | HandlerInfo baselineExceptionHandler; |
362 | CallSiteIndex callSiteIndex; |
363 | }; |
364 | Vector<ExceptionHandlingOSRExitInfo> m_exceptionHandlerOSRExitCallSites; |
365 | |
366 | std::unique_ptr<SpeculativeJIT> m_speculative; |
367 | PCToCodeOriginMapBuilder m_pcToCodeOriginMapBuilder; |
368 | }; |
369 | |
370 | } } // namespace JSC::DFG |
371 | |
372 | #endif |
373 | |