1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27
28#if ENABLE(JIT)
29#if USE(JSVALUE64)
30#include "JIT.h"
31
32#include "CallFrameShuffler.h"
33#include "CodeBlock.h"
34#include "JITInlines.h"
35#include "JSArray.h"
36#include "JSFunction.h"
37#include "Interpreter.h"
38#include "JSCInlines.h"
39#include "LinkBuffer.h"
40#include "OpcodeInlines.h"
41#include "ResultType.h"
42#include "SetupVarargsFrame.h"
43#include "StackAlignment.h"
44#include "ThunkGenerators.h"
45#include <wtf/StringPrintStream.h>
46
47
48namespace JSC {
49
50template<typename Op>
51void JIT::emitPutCallResult(const Op& bytecode)
52{
53 emitValueProfilingSite(bytecode.metadata(m_codeBlock));
54 emitPutVirtualRegister(bytecode.m_dst.offset());
55}
56
57template<typename Op>
58std::enable_if_t<
59 Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs
60 && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments
61, void>
62JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo*)
63{
64 auto& metadata = bytecode.metadata(m_codeBlock);
65 int argCount = bytecode.m_argc;
66 int registerOffset = -static_cast<int>(bytecode.m_argv);
67
68 if (Op::opcodeID == op_call && shouldEmitProfiling()) {
69 emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0);
70 Jump done = branchIfNotCell(regT0);
71 load32(Address(regT0, JSCell::structureIDOffset()), regT0);
72 store32(regT0, metadata.m_callLinkInfo.m_arrayProfile.addressOfLastSeenStructureID());
73 done.link(this);
74 }
75
76 addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
77 store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
78}
79
80
81template<typename Op>
82std::enable_if_t<
83 Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs
84 || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments
85, void>
86JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo* info)
87{
88 int thisValue = bytecode.m_thisValue.offset();
89 int arguments = bytecode.m_arguments.offset();
90 int firstFreeRegister = bytecode.m_firstFree.offset();
91 int firstVarArgOffset = bytecode.m_firstVarArg;
92
93 emitGetVirtualRegister(arguments, regT1);
94 Z_JITOperation_EJZZ sizeOperation;
95 if (Op::opcodeID == op_tail_call_forward_arguments)
96 sizeOperation = operationSizeFrameForForwardArguments;
97 else
98 sizeOperation = operationSizeFrameForVarargs;
99 callOperation(sizeOperation, regT1, -firstFreeRegister, firstVarArgOffset);
100 move(TrustedImm32(-firstFreeRegister), regT1);
101 emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
102 addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister);
103 emitGetVirtualRegister(arguments, regT2);
104 F_JITOperation_EFJZZ setupOperation;
105 if (Op::opcodeID == op_tail_call_forward_arguments)
106 setupOperation = operationSetupForwardArgumentsFrame;
107 else
108 setupOperation = operationSetupVarargsFrame;
109 callOperation(setupOperation, regT1, regT2, firstVarArgOffset, regT0);
110 move(returnValueGPR, regT1);
111
112 // Profile the argument count.
113 load32(Address(regT1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
114 load32(info->addressOfMaxNumArguments(), regT0);
115 Jump notBiggest = branch32(Above, regT0, regT2);
116 store32(regT2, info->addressOfMaxNumArguments());
117 notBiggest.link(this);
118
119 // Initialize 'this'.
120 emitGetVirtualRegister(thisValue, regT0);
121 store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
122
123 addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
124}
125
126template<typename Op>
127bool JIT::compileCallEval(const Op&)
128{
129 return false;
130}
131
132template<>
133bool JIT::compileCallEval(const OpCallEval& bytecode)
134{
135 addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
136 storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
137
138 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
139 checkStackPointerAlignment();
140
141 callOperation(operationCallEval, regT1);
142
143 addSlowCase(branchIfEmpty(regT0));
144
145 sampleCodeBlock(m_codeBlock);
146
147 emitPutCallResult(bytecode);
148
149 return true;
150}
151
152void JIT::compileCallEvalSlowCase(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
153{
154 linkAllSlowCases(iter);
155
156 auto bytecode = instruction->as<OpCallEval>();
157 CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
158 info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
159
160 int registerOffset = -bytecode.m_argv;
161
162 addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
163
164 load64(Address(stackPointerRegister, sizeof(Register) * CallFrameSlot::callee - sizeof(CallerFrameAndPC)), regT0);
165 emitDumbVirtualCall(*vm(), info);
166 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
167 checkStackPointerAlignment();
168
169 sampleCodeBlock(m_codeBlock);
170
171 emitPutCallResult(bytecode);
172}
173
174template<typename Op>
175bool JIT::compileTailCall(const Op&, CallLinkInfo*, unsigned)
176{
177 return false;
178}
179
180template<>
181bool JIT::compileTailCall(const OpTailCall& bytecode, CallLinkInfo* info, unsigned callLinkInfoIndex)
182{
183 CallFrameShuffleData shuffleData;
184 shuffleData.numPassedArgs = bytecode.m_argc;
185 shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister;
186 shuffleData.numLocals =
187 bytecode.m_argv - sizeof(CallerFrameAndPC) / sizeof(Register);
188 shuffleData.args.resize(bytecode.m_argc);
189 for (unsigned i = 0; i < bytecode.m_argc; ++i) {
190 shuffleData.args[i] =
191 ValueRecovery::displacedInJSStack(
192 virtualRegisterForArgument(i) - bytecode.m_argv,
193 DataFormatJS);
194 }
195 shuffleData.callee =
196 ValueRecovery::inGPR(regT0, DataFormatJS);
197 shuffleData.setupCalleeSaveRegisters(m_codeBlock);
198 info->setFrameShuffleData(shuffleData);
199 CallFrameShuffler(*this, shuffleData).prepareForTailCall();
200 m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
201 return true;
202}
203
204template<typename Op>
205void JIT::compileOpCall(const Instruction* instruction, unsigned callLinkInfoIndex)
206{
207 OpcodeID opcodeID = Op::opcodeID;
208 auto bytecode = instruction->as<Op>();
209 int callee = bytecode.m_callee.offset();
210
211 /* Caller always:
212 - Updates callFrameRegister to callee callFrame.
213 - Initializes ArgumentCount; CallerFrame; Callee.
214
215 For a JS call:
216 - Callee initializes ReturnPC; CodeBlock.
217 - Callee restores callFrameRegister before return.
218
219 For a non-JS call:
220 - Caller initializes ReturnPC; CodeBlock.
221 - Caller restores callFrameRegister after return.
222 */
223 CallLinkInfo* info = nullptr;
224 if (opcodeID != op_call_eval)
225 info = m_codeBlock->addCallLinkInfo();
226 compileSetupFrame(bytecode, info);
227
228 // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
229 uint32_t bytecodeOffset = m_codeBlock->bytecodeOffset(instruction);
230 uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits();
231 store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + TagOffset));
232
233 emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
234 store64(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));
235
236 if (compileCallEval(bytecode)) {
237 return;
238 }
239
240 DataLabelPtr addressOfLinkedFunctionCheck;
241 Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(nullptr));
242 addSlowCase(slowCase);
243
244 ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
245 info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
246 m_callCompilationInfo.append(CallCompilationInfo());
247 m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
248 m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
249
250 if (compileTailCall(bytecode, info, callLinkInfoIndex)) {
251 return;
252 }
253
254 if (opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) {
255 emitRestoreCalleeSaves();
256 prepareForTailCallSlow();
257 m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
258 return;
259 }
260
261 m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
262
263 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
264 checkStackPointerAlignment();
265
266 sampleCodeBlock(m_codeBlock);
267
268 emitPutCallResult(bytecode);
269}
270
271template<typename Op>
272void JIT::compileOpCallSlowCase(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
273{
274 OpcodeID opcodeID = Op::opcodeID;
275 ASSERT(opcodeID != op_call_eval);
276
277 linkAllSlowCases(iter);
278
279 if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
280 emitRestoreCalleeSaves();
281
282 move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
283
284 m_callCompilationInfo[callLinkInfoIndex].callReturnLocation =
285 emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).retaggedCode<NoPtrTag>());
286
287 if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
288 abortWithReason(JITDidReturnFromTailCall);
289 return;
290 }
291
292 addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
293 checkStackPointerAlignment();
294
295 sampleCodeBlock(m_codeBlock);
296
297 auto bytecode = instruction->as<Op>();
298 emitPutCallResult(bytecode);
299}
300
301void JIT::emit_op_call(const Instruction* currentInstruction)
302{
303 compileOpCall<OpCall>(currentInstruction, m_callLinkInfoIndex++);
304}
305
306void JIT::emit_op_tail_call(const Instruction* currentInstruction)
307{
308 compileOpCall<OpTailCall>(currentInstruction, m_callLinkInfoIndex++);
309}
310
311void JIT::emit_op_call_eval(const Instruction* currentInstruction)
312{
313 compileOpCall<OpCallEval>(currentInstruction, m_callLinkInfoIndex);
314}
315
316void JIT::emit_op_call_varargs(const Instruction* currentInstruction)
317{
318 compileOpCall<OpCallVarargs>(currentInstruction, m_callLinkInfoIndex++);
319}
320
321void JIT::emit_op_tail_call_varargs(const Instruction* currentInstruction)
322{
323 compileOpCall<OpTailCallVarargs>(currentInstruction, m_callLinkInfoIndex++);
324}
325
326void JIT::emit_op_tail_call_forward_arguments(const Instruction* currentInstruction)
327{
328 compileOpCall<OpTailCallForwardArguments>(currentInstruction, m_callLinkInfoIndex++);
329}
330
331void JIT::emit_op_construct_varargs(const Instruction* currentInstruction)
332{
333 compileOpCall<OpConstructVarargs>(currentInstruction, m_callLinkInfoIndex++);
334}
335
336void JIT::emit_op_construct(const Instruction* currentInstruction)
337{
338 compileOpCall<OpConstruct>(currentInstruction, m_callLinkInfoIndex++);
339}
340
341void JIT::emitSlow_op_call(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
342{
343 compileOpCallSlowCase<OpCall>(currentInstruction, iter, m_callLinkInfoIndex++);
344}
345
346void JIT::emitSlow_op_tail_call(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
347{
348 compileOpCallSlowCase<OpTailCall>(currentInstruction, iter, m_callLinkInfoIndex++);
349}
350
351void JIT::emitSlow_op_call_eval(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
352{
353 compileCallEvalSlowCase(currentInstruction, iter);
354}
355
356void JIT::emitSlow_op_call_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
357{
358 compileOpCallSlowCase<OpCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++);
359}
360
361void JIT::emitSlow_op_tail_call_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
362{
363 compileOpCallSlowCase<OpTailCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++);
364}
365
366void JIT::emitSlow_op_tail_call_forward_arguments(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
367{
368 compileOpCallSlowCase<OpTailCallForwardArguments>(currentInstruction, iter, m_callLinkInfoIndex++);
369}
370
371void JIT::emitSlow_op_construct_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
372{
373 compileOpCallSlowCase<OpConstructVarargs>(currentInstruction, iter, m_callLinkInfoIndex++);
374}
375
376void JIT::emitSlow_op_construct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
377{
378 compileOpCallSlowCase<OpConstruct>(currentInstruction, iter, m_callLinkInfoIndex++);
379}
380
381} // namespace JSC
382
383#endif // USE(JSVALUE64)
384#endif // ENABLE(JIT)
385