1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27
28#if ENABLE(JIT)
29#include "JIT.h"
30
31#include "ArithProfile.h"
32#include "CodeBlock.h"
33#include "JITAddGenerator.h"
34#include "JITBitAndGenerator.h"
35#include "JITBitOrGenerator.h"
36#include "JITBitXorGenerator.h"
37#include "JITDivGenerator.h"
38#include "JITInlines.h"
39#include "JITLeftShiftGenerator.h"
40#include "JITMathIC.h"
41#include "JITMulGenerator.h"
42#include "JITNegGenerator.h"
43#include "JITOperations.h"
44#include "JITSubGenerator.h"
45#include "JSArray.h"
46#include "JSFunction.h"
47#include "Interpreter.h"
48#include "JSCInlines.h"
49#include "LinkBuffer.h"
50#include "ResultType.h"
51#include "SlowPathCall.h"
52
53namespace JSC {
54
55void JIT::emit_op_jless(const Instruction* currentInstruction)
56{
57 emit_compareAndJump<OpJless>(currentInstruction, LessThan);
58}
59
60void JIT::emit_op_jlesseq(const Instruction* currentInstruction)
61{
62 emit_compareAndJump<OpJlesseq>(currentInstruction, LessThanOrEqual);
63}
64
65void JIT::emit_op_jgreater(const Instruction* currentInstruction)
66{
67 emit_compareAndJump<OpJgreater>(currentInstruction, GreaterThan);
68}
69
70void JIT::emit_op_jgreatereq(const Instruction* currentInstruction)
71{
72 emit_compareAndJump<OpJgreatereq>(currentInstruction, GreaterThanOrEqual);
73}
74
75void JIT::emit_op_jnless(const Instruction* currentInstruction)
76{
77 emit_compareAndJump<OpJnless>(currentInstruction, GreaterThanOrEqual);
78}
79
80void JIT::emit_op_jnlesseq(const Instruction* currentInstruction)
81{
82 emit_compareAndJump<OpJnlesseq>(currentInstruction, GreaterThan);
83}
84
85void JIT::emit_op_jngreater(const Instruction* currentInstruction)
86{
87 emit_compareAndJump<OpJngreater>(currentInstruction, LessThanOrEqual);
88}
89
90void JIT::emit_op_jngreatereq(const Instruction* currentInstruction)
91{
92 emit_compareAndJump<OpJngreatereq>(currentInstruction, LessThan);
93}
94
95void JIT::emitSlow_op_jless(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
96{
97 emit_compareAndJumpSlow<OpJless>(currentInstruction, DoubleLessThan, operationCompareLess, false, iter);
98}
99
100void JIT::emitSlow_op_jlesseq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
101{
102 emit_compareAndJumpSlow<OpJlesseq>(currentInstruction, DoubleLessThanOrEqual, operationCompareLessEq, false, iter);
103}
104
105void JIT::emitSlow_op_jgreater(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
106{
107 emit_compareAndJumpSlow<OpJgreater>(currentInstruction, DoubleGreaterThan, operationCompareGreater, false, iter);
108}
109
110void JIT::emitSlow_op_jgreatereq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
111{
112 emit_compareAndJumpSlow<OpJgreatereq>(currentInstruction, DoubleGreaterThanOrEqual, operationCompareGreaterEq, false, iter);
113}
114
115void JIT::emitSlow_op_jnless(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
116{
117 emit_compareAndJumpSlow<OpJnless>(currentInstruction, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter);
118}
119
120void JIT::emitSlow_op_jnlesseq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
121{
122 emit_compareAndJumpSlow<OpJnlesseq>(currentInstruction, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter);
123}
124
125void JIT::emitSlow_op_jngreater(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
126{
127 emit_compareAndJumpSlow<OpJngreater>(currentInstruction, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter);
128}
129
130void JIT::emitSlow_op_jngreatereq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
131{
132 emit_compareAndJumpSlow<OpJngreatereq>(currentInstruction, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter);
133}
134
135void JIT::emit_op_below(const Instruction* currentInstruction)
136{
137 emit_compareUnsigned<OpBelow>(currentInstruction, Below);
138}
139
140void JIT::emit_op_beloweq(const Instruction* currentInstruction)
141{
142 emit_compareUnsigned<OpBeloweq>(currentInstruction, BelowOrEqual);
143}
144
145void JIT::emit_op_jbelow(const Instruction* currentInstruction)
146{
147 emit_compareUnsignedAndJump<OpJbelow>(currentInstruction, Below);
148}
149
150void JIT::emit_op_jbeloweq(const Instruction* currentInstruction)
151{
152 emit_compareUnsignedAndJump<OpJbeloweq>(currentInstruction, BelowOrEqual);
153}
154
155#if USE(JSVALUE64)
156
157void JIT::emit_op_unsigned(const Instruction* currentInstruction)
158{
159 auto bytecode = currentInstruction->as<OpUnsigned>();
160 int result = bytecode.m_dst.offset();
161 int op1 = bytecode.m_operand.offset();
162
163 emitGetVirtualRegister(op1, regT0);
164 emitJumpSlowCaseIfNotInt(regT0);
165 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
166 boxInt32(regT0, JSValueRegs { regT0 });
167 emitPutVirtualRegister(result, regT0);
168}
169
170template<typename Op>
171void JIT::emit_compareAndJump(const Instruction* instruction, RelationalCondition condition)
172{
173 // We generate inline code for the following cases in the fast path:
174 // - int immediate to constant int immediate
175 // - constant int immediate to int immediate
176 // - int immediate to int immediate
177
178 auto bytecode = instruction->as<Op>();
179 int op1 = bytecode.m_lhs.offset();
180 int op2 = bytecode.m_rhs.offset();
181 unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
182 bool disallowAllocation = false;
183 if (isOperandConstantChar(op1)) {
184 emitGetVirtualRegister(op2, regT0);
185 addSlowCase(branchIfNotCell(regT0));
186 JumpList failures;
187 emitLoadCharacterString(regT0, regT0, failures);
188 addSlowCase(failures);
189 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue(disallowAllocation)[0])), target);
190 return;
191 }
192 if (isOperandConstantChar(op2)) {
193 emitGetVirtualRegister(op1, regT0);
194 addSlowCase(branchIfNotCell(regT0));
195 JumpList failures;
196 emitLoadCharacterString(regT0, regT0, failures);
197 addSlowCase(failures);
198 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue(disallowAllocation)[0])), target);
199 return;
200 }
201 if (isOperandConstantInt(op2)) {
202 emitGetVirtualRegister(op1, regT0);
203 emitJumpSlowCaseIfNotInt(regT0);
204 int32_t op2imm = getOperandConstantInt(op2);
205 addJump(branch32(condition, regT0, Imm32(op2imm)), target);
206 return;
207 }
208 if (isOperandConstantInt(op1)) {
209 emitGetVirtualRegister(op2, regT1);
210 emitJumpSlowCaseIfNotInt(regT1);
211 int32_t op1imm = getOperandConstantInt(op1);
212 addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
213 return;
214 }
215
216 emitGetVirtualRegisters(op1, regT0, op2, regT1);
217 emitJumpSlowCaseIfNotInt(regT0);
218 emitJumpSlowCaseIfNotInt(regT1);
219
220 addJump(branch32(condition, regT0, regT1), target);
221}
222
223template<typename Op>
224void JIT::emit_compareUnsignedAndJump(const Instruction* instruction, RelationalCondition condition)
225{
226 auto bytecode = instruction->as<Op>();
227 int op1 = bytecode.m_lhs.offset();
228 int op2 = bytecode.m_rhs.offset();
229 unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
230 if (isOperandConstantInt(op2)) {
231 emitGetVirtualRegister(op1, regT0);
232 int32_t op2imm = getOperandConstantInt(op2);
233 addJump(branch32(condition, regT0, Imm32(op2imm)), target);
234 } else if (isOperandConstantInt(op1)) {
235 emitGetVirtualRegister(op2, regT1);
236 int32_t op1imm = getOperandConstantInt(op1);
237 addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
238 } else {
239 emitGetVirtualRegisters(op1, regT0, op2, regT1);
240 addJump(branch32(condition, regT0, regT1), target);
241 }
242}
243
244template<typename Op>
245void JIT::emit_compareUnsigned(const Instruction* instruction, RelationalCondition condition)
246{
247 auto bytecode = instruction->as<Op>();
248 int dst = bytecode.m_dst.offset();
249 int op1 = bytecode.m_lhs.offset();
250 int op2 = bytecode.m_rhs.offset();
251 if (isOperandConstantInt(op2)) {
252 emitGetVirtualRegister(op1, regT0);
253 int32_t op2imm = getOperandConstantInt(op2);
254 compare32(condition, regT0, Imm32(op2imm), regT0);
255 } else if (isOperandConstantInt(op1)) {
256 emitGetVirtualRegister(op2, regT0);
257 int32_t op1imm = getOperandConstantInt(op1);
258 compare32(commute(condition), regT0, Imm32(op1imm), regT0);
259 } else {
260 emitGetVirtualRegisters(op1, regT0, op2, regT1);
261 compare32(condition, regT0, regT1, regT0);
262 }
263 boxBoolean(regT0, JSValueRegs { regT0 });
264 emitPutVirtualRegister(dst);
265}
266
267template<typename Op>
268void JIT::emit_compareAndJumpSlow(const Instruction* instruction, DoubleCondition condition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
269{
270 auto bytecode = instruction->as<Op>();
271 int op1 = bytecode.m_lhs.offset();
272 int op2 = bytecode.m_rhs.offset();
273 unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
274
275 // We generate inline code for the following cases in the slow path:
276 // - floating-point number to constant int immediate
277 // - constant int immediate to floating-point number
278 // - floating-point number to floating-point number.
279 if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
280 linkAllSlowCases(iter);
281
282 emitGetVirtualRegister(op1, argumentGPR0);
283 emitGetVirtualRegister(op2, argumentGPR1);
284 callOperation(operation, argumentGPR0, argumentGPR1);
285 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
286 return;
287 }
288
289 if (isOperandConstantInt(op2)) {
290 linkAllSlowCases(iter);
291
292 if (supportsFloatingPoint()) {
293 Jump fail1 = branchIfNotNumber(regT0);
294 add64(tagTypeNumberRegister, regT0);
295 move64ToDouble(regT0, fpRegT0);
296
297 int32_t op2imm = getConstantOperand(op2).asInt32();
298
299 move(Imm32(op2imm), regT1);
300 convertInt32ToDouble(regT1, fpRegT1);
301
302 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
303
304 emitJumpSlowToHot(jump(), instruction->size());
305
306 fail1.link(this);
307 }
308
309 emitGetVirtualRegister(op2, regT1);
310 callOperation(operation, regT0, regT1);
311 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
312 return;
313 }
314
315 if (isOperandConstantInt(op1)) {
316 linkAllSlowCases(iter);
317
318 if (supportsFloatingPoint()) {
319 Jump fail1 = branchIfNotNumber(regT1);
320 add64(tagTypeNumberRegister, regT1);
321 move64ToDouble(regT1, fpRegT1);
322
323 int32_t op1imm = getConstantOperand(op1).asInt32();
324
325 move(Imm32(op1imm), regT0);
326 convertInt32ToDouble(regT0, fpRegT0);
327
328 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
329
330 emitJumpSlowToHot(jump(), instruction->size());
331
332 fail1.link(this);
333 }
334
335 emitGetVirtualRegister(op1, regT2);
336 callOperation(operation, regT2, regT1);
337 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
338 return;
339 }
340
341 linkSlowCase(iter); // LHS is not Int.
342
343 if (supportsFloatingPoint()) {
344 Jump fail1 = branchIfNotNumber(regT0);
345 Jump fail2 = branchIfNotNumber(regT1);
346 Jump fail3 = branchIfInt32(regT1);
347 add64(tagTypeNumberRegister, regT0);
348 add64(tagTypeNumberRegister, regT1);
349 move64ToDouble(regT0, fpRegT0);
350 move64ToDouble(regT1, fpRegT1);
351
352 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
353
354 emitJumpSlowToHot(jump(), instruction->size());
355
356 fail1.link(this);
357 fail2.link(this);
358 fail3.link(this);
359 }
360
361 linkSlowCase(iter); // RHS is not Int.
362 callOperation(operation, regT0, regT1);
363 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
364}
365
366void JIT::emit_op_inc(const Instruction* currentInstruction)
367{
368 auto bytecode = currentInstruction->as<OpInc>();
369 int srcDst = bytecode.m_srcDst.offset();
370
371 emitGetVirtualRegister(srcDst, regT0);
372 emitJumpSlowCaseIfNotInt(regT0);
373 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
374 boxInt32(regT0, JSValueRegs { regT0 });
375 emitPutVirtualRegister(srcDst);
376}
377
378void JIT::emit_op_dec(const Instruction* currentInstruction)
379{
380 auto bytecode = currentInstruction->as<OpDec>();
381 int srcDst = bytecode.m_srcDst.offset();
382
383 emitGetVirtualRegister(srcDst, regT0);
384 emitJumpSlowCaseIfNotInt(regT0);
385 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
386 boxInt32(regT0, JSValueRegs { regT0 });
387 emitPutVirtualRegister(srcDst);
388}
389
390/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
391
392#if CPU(X86_64)
393
394void JIT::emit_op_mod(const Instruction* currentInstruction)
395{
396 auto bytecode = currentInstruction->as<OpMod>();
397 int result = bytecode.m_dst.offset();
398 int op1 = bytecode.m_lhs.offset();
399 int op2 = bytecode.m_rhs.offset();
400
401 // Make sure registers are correct for x86 IDIV instructions.
402 ASSERT(regT0 == X86Registers::eax);
403 auto edx = X86Registers::edx;
404 auto ecx = X86Registers::ecx;
405 ASSERT(regT4 != edx);
406 ASSERT(regT4 != ecx);
407
408 emitGetVirtualRegisters(op1, regT4, op2, ecx);
409 emitJumpSlowCaseIfNotInt(regT4);
410 emitJumpSlowCaseIfNotInt(ecx);
411
412 move(regT4, regT0);
413 addSlowCase(branchTest32(Zero, ecx));
414 Jump denominatorNotNeg1 = branch32(NotEqual, ecx, TrustedImm32(-1));
415 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
416 denominatorNotNeg1.link(this);
417 x86ConvertToDoubleWord32();
418 x86Div32(ecx);
419 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT4, TrustedImm32(0));
420 addSlowCase(branchTest32(Zero, edx));
421 numeratorPositive.link(this);
422 boxInt32(edx, JSValueRegs { regT0 });
423 emitPutVirtualRegister(result);
424}
425
426void JIT::emitSlow_op_mod(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
427{
428 linkAllSlowCases(iter);
429
430 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
431 slowPathCall.call();
432}
433
434#else // CPU(X86_64)
435
436void JIT::emit_op_mod(const Instruction* currentInstruction)
437{
438 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
439 slowPathCall.call();
440}
441
442void JIT::emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&)
443{
444 UNREACHABLE_FOR_PLATFORM();
445}
446
447#endif // CPU(X86_64)
448
449/* ------------------------------ END: OP_MOD ------------------------------ */
450
451#endif // USE(JSVALUE64)
452
453void JIT::emit_op_negate(const Instruction* currentInstruction)
454{
455 ArithProfile* arithProfile = &currentInstruction->as<OpNegate>().metadata(m_codeBlock).m_arithProfile;
456 JITNegIC* negateIC = m_codeBlock->addJITNegIC(arithProfile);
457 m_instructionToMathIC.add(currentInstruction, negateIC);
458 emitMathICFast<OpNegate>(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate);
459}
460
461void JIT::emitSlow_op_negate(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
462{
463 linkAllSlowCases(iter);
464
465 JITNegIC* negIC = bitwise_cast<JITNegIC*>(m_instructionToMathIC.get(currentInstruction));
466 emitMathICSlow<OpNegate>(negIC, currentInstruction, operationArithNegateProfiledOptimize, operationArithNegateProfiled, operationArithNegateOptimize);
467}
468
469template<typename Op, typename SnippetGenerator>
470void JIT::emitBitBinaryOpFastPath(const Instruction* currentInstruction, ProfilingPolicy profilingPolicy)
471{
472 auto bytecode = currentInstruction->as<Op>();
473 int result = bytecode.m_dst.offset();
474 int op1 = bytecode.m_lhs.offset();
475 int op2 = bytecode.m_rhs.offset();
476
477#if USE(JSVALUE64)
478 JSValueRegs leftRegs = JSValueRegs(regT0);
479 JSValueRegs rightRegs = JSValueRegs(regT1);
480 JSValueRegs resultRegs = leftRegs;
481 GPRReg scratchGPR = regT2;
482#else
483 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
484 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
485 JSValueRegs resultRegs = leftRegs;
486 GPRReg scratchGPR = regT4;
487#endif
488
489 SnippetOperand leftOperand;
490 SnippetOperand rightOperand;
491
492 if (isOperandConstantInt(op1))
493 leftOperand.setConstInt32(getOperandConstantInt(op1));
494 else if (isOperandConstantInt(op2))
495 rightOperand.setConstInt32(getOperandConstantInt(op2));
496
497 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
498
499 if (!leftOperand.isConst())
500 emitGetVirtualRegister(op1, leftRegs);
501 if (!rightOperand.isConst())
502 emitGetVirtualRegister(op2, rightRegs);
503
504 SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
505
506 gen.generateFastPath(*this);
507
508 ASSERT(gen.didEmitFastPath());
509 gen.endJumpList().link(this);
510 if (profilingPolicy == ProfilingPolicy::ShouldEmitProfiling)
511 emitValueProfilingSiteIfProfiledOpcode(bytecode);
512 emitPutVirtualRegister(result, resultRegs);
513
514 addSlowCase(gen.slowPathJumpList());
515}
516
517void JIT::emit_op_bitnot(const Instruction* currentInstruction)
518{
519 auto bytecode = currentInstruction->as<OpBitnot>();
520 int result = bytecode.m_dst.offset();
521 int op1 = bytecode.m_operand.offset();
522
523#if USE(JSVALUE64)
524 JSValueRegs leftRegs = JSValueRegs(regT0);
525#else
526 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
527#endif
528
529 emitGetVirtualRegister(op1, leftRegs);
530
531 addSlowCase(branchIfNotInt32(leftRegs));
532 not32(leftRegs.payloadGPR());
533#if USE(JSVALUE64)
534 boxInt32(leftRegs.payloadGPR(), leftRegs);
535#endif
536
537 emitValueProfilingSiteIfProfiledOpcode(bytecode);
538
539 emitPutVirtualRegister(result, leftRegs);
540}
541
542void JIT::emit_op_bitand(const Instruction* currentInstruction)
543{
544 emitBitBinaryOpFastPath<OpBitand, JITBitAndGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);
545}
546
547void JIT::emit_op_bitor(const Instruction* currentInstruction)
548{
549 emitBitBinaryOpFastPath<OpBitor, JITBitOrGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);
550}
551
552void JIT::emit_op_bitxor(const Instruction* currentInstruction)
553{
554 emitBitBinaryOpFastPath<OpBitxor, JITBitXorGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);
555}
556
557void JIT::emit_op_lshift(const Instruction* currentInstruction)
558{
559 emitBitBinaryOpFastPath<OpLshift, JITLeftShiftGenerator>(currentInstruction);
560}
561
562void JIT::emitRightShiftFastPath(const Instruction* currentInstruction, OpcodeID opcodeID)
563{
564 ASSERT(opcodeID == op_rshift || opcodeID == op_urshift);
565 switch (opcodeID) {
566 case op_rshift:
567 emitRightShiftFastPath<OpRshift>(currentInstruction, JITRightShiftGenerator::SignedShift);
568 break;
569 case op_urshift:
570 emitRightShiftFastPath<OpUrshift>(currentInstruction, JITRightShiftGenerator::UnsignedShift);
571 break;
572 default:
573 ASSERT_NOT_REACHED();
574 }
575}
576
577template<typename Op>
578void JIT::emitRightShiftFastPath(const Instruction* currentInstruction, JITRightShiftGenerator::ShiftType snippetShiftType)
579{
580 auto bytecode = currentInstruction->as<Op>();
581 int result = bytecode.m_dst.offset();
582 int op1 = bytecode.m_lhs.offset();
583 int op2 = bytecode.m_rhs.offset();
584
585#if USE(JSVALUE64)
586 JSValueRegs leftRegs = JSValueRegs(regT0);
587 JSValueRegs rightRegs = JSValueRegs(regT1);
588 JSValueRegs resultRegs = leftRegs;
589 GPRReg scratchGPR = regT2;
590 FPRReg scratchFPR = InvalidFPRReg;
591#else
592 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
593 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
594 JSValueRegs resultRegs = leftRegs;
595 GPRReg scratchGPR = regT4;
596 FPRReg scratchFPR = fpRegT2;
597#endif
598
599 SnippetOperand leftOperand;
600 SnippetOperand rightOperand;
601
602 if (isOperandConstantInt(op1))
603 leftOperand.setConstInt32(getOperandConstantInt(op1));
604 else if (isOperandConstantInt(op2))
605 rightOperand.setConstInt32(getOperandConstantInt(op2));
606
607 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
608
609 if (!leftOperand.isConst())
610 emitGetVirtualRegister(op1, leftRegs);
611 if (!rightOperand.isConst())
612 emitGetVirtualRegister(op2, rightRegs);
613
614 JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
615 fpRegT0, scratchGPR, scratchFPR, snippetShiftType);
616
617 gen.generateFastPath(*this);
618
619 ASSERT(gen.didEmitFastPath());
620 gen.endJumpList().link(this);
621 emitPutVirtualRegister(result, resultRegs);
622
623 addSlowCase(gen.slowPathJumpList());
624}
625
626void JIT::emit_op_rshift(const Instruction* currentInstruction)
627{
628 emitRightShiftFastPath(currentInstruction, op_rshift);
629}
630
631void JIT::emit_op_urshift(const Instruction* currentInstruction)
632{
633 emitRightShiftFastPath(currentInstruction, op_urshift);
634}
635
636ALWAYS_INLINE static OperandTypes getOperandTypes(const ArithProfile& arithProfile)
637{
638 return OperandTypes(arithProfile.lhsResultType(), arithProfile.rhsResultType());
639}
640
641void JIT::emit_op_add(const Instruction* currentInstruction)
642{
643 ArithProfile* arithProfile = &currentInstruction->as<OpAdd>().metadata(m_codeBlock).m_arithProfile;
644 JITAddIC* addIC = m_codeBlock->addJITAddIC(arithProfile);
645 m_instructionToMathIC.add(currentInstruction, addIC);
646 emitMathICFast<OpAdd>(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd);
647}
648
649void JIT::emitSlow_op_add(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
650{
651 linkAllSlowCases(iter);
652
653 JITAddIC* addIC = bitwise_cast<JITAddIC*>(m_instructionToMathIC.get(currentInstruction));
654 emitMathICSlow<OpAdd>(addIC, currentInstruction, operationValueAddProfiledOptimize, operationValueAddProfiled, operationValueAddOptimize);
655}
656
657template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
658void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
659{
660 auto bytecode = currentInstruction->as<Op>();
661 int result = bytecode.m_dst.offset();
662 int operand = bytecode.m_operand.offset();
663
664#if USE(JSVALUE64)
665 // ArithNegate benefits from using the same register as src and dst.
666 // Since regT1==argumentGPR1, using regT1 avoid shuffling register to call the slow path.
667 JSValueRegs srcRegs = JSValueRegs(regT1);
668 JSValueRegs resultRegs = JSValueRegs(regT1);
669 GPRReg scratchGPR = regT2;
670#else
671 JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
672 JSValueRegs resultRegs = JSValueRegs(regT3, regT2);
673 GPRReg scratchGPR = regT4;
674#endif
675
676#if ENABLE(MATH_IC_STATS)
677 auto inlineStart = label();
678#endif
679
680 mathIC->m_generator = Generator(resultRegs, srcRegs, scratchGPR);
681
682 emitGetVirtualRegister(operand, srcRegs);
683
684 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, MathICGenerationState()).iterator->value;
685
686 bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
687 if (!generatedInlineCode) {
688 ArithProfile* arithProfile = mathIC->arithProfile();
689 if (arithProfile && shouldEmitProfiling())
690 callOperationWithResult(profiledFunction, resultRegs, srcRegs, arithProfile);
691 else
692 callOperationWithResult(nonProfiledFunction, resultRegs, srcRegs);
693 } else
694 addSlowCase(mathICGenerationState.slowPathJumps);
695
696#if ENABLE(MATH_IC_STATS)
697 auto inlineEnd = label();
698 addLinkTask([=] (LinkBuffer& linkBuffer) {
699 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
700 mathIC->m_generatedCodeSize += size;
701 });
702#endif
703
704 emitPutVirtualRegister(result, resultRegs);
705}
706
707template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
708void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
709{
710 auto bytecode = currentInstruction->as<Op>();
711 OperandTypes types = getOperandTypes(copiedArithProfile(bytecode));
712 int result = bytecode.m_dst.offset();
713 int op1 = bytecode.m_lhs.offset();
714 int op2 = bytecode.m_rhs.offset();
715
716#if USE(JSVALUE64)
717 JSValueRegs leftRegs = JSValueRegs(regT1);
718 JSValueRegs rightRegs = JSValueRegs(regT2);
719 JSValueRegs resultRegs = JSValueRegs(regT0);
720 GPRReg scratchGPR = regT3;
721 FPRReg scratchFPR = fpRegT2;
722#else
723 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
724 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
725 JSValueRegs resultRegs = leftRegs;
726 GPRReg scratchGPR = regT4;
727 FPRReg scratchFPR = fpRegT2;
728#endif
729
730 SnippetOperand leftOperand(types.first());
731 SnippetOperand rightOperand(types.second());
732
733 if (isOperandConstantInt(op1))
734 leftOperand.setConstInt32(getOperandConstantInt(op1));
735 else if (isOperandConstantInt(op2))
736 rightOperand.setConstInt32(getOperandConstantInt(op2));
737
738 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
739
740 mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, fpRegT0, fpRegT1, scratchGPR, scratchFPR);
741
742 ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
743
744 if (!Generator::isLeftOperandValidConstant(leftOperand))
745 emitGetVirtualRegister(op1, leftRegs);
746 if (!Generator::isRightOperandValidConstant(rightOperand))
747 emitGetVirtualRegister(op2, rightRegs);
748
749#if ENABLE(MATH_IC_STATS)
750 auto inlineStart = label();
751#endif
752
753 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, MathICGenerationState()).iterator->value;
754
755 bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
756 if (!generatedInlineCode) {
757 if (leftOperand.isConst())
758 emitGetVirtualRegister(op1, leftRegs);
759 else if (rightOperand.isConst())
760 emitGetVirtualRegister(op2, rightRegs);
761 ArithProfile* arithProfile = mathIC->arithProfile();
762 if (arithProfile && shouldEmitProfiling())
763 callOperationWithResult(profiledFunction, resultRegs, leftRegs, rightRegs, arithProfile);
764 else
765 callOperationWithResult(nonProfiledFunction, resultRegs, leftRegs, rightRegs);
766 } else
767 addSlowCase(mathICGenerationState.slowPathJumps);
768
769#if ENABLE(MATH_IC_STATS)
770 auto inlineEnd = label();
771 addLinkTask([=] (LinkBuffer& linkBuffer) {
772 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
773 mathIC->m_generatedCodeSize += size;
774 });
775#endif
776
777 emitPutVirtualRegister(result, resultRegs);
778}
779
780template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
781void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
782{
783 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
784 mathICGenerationState.slowPathStart = label();
785
786 auto bytecode = currentInstruction->as<Op>();
787 int result = bytecode.m_dst.offset();
788
789#if USE(JSVALUE64)
790 JSValueRegs srcRegs = JSValueRegs(regT1);
791 JSValueRegs resultRegs = JSValueRegs(regT0);
792#else
793 JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
794 JSValueRegs resultRegs = JSValueRegs(regT3, regT2);
795#endif
796
797#if ENABLE(MATH_IC_STATS)
798 auto slowPathStart = label();
799#endif
800
801 ArithProfile* arithProfile = mathIC->arithProfile();
802 if (arithProfile && shouldEmitProfiling()) {
803 if (mathICGenerationState.shouldSlowPathRepatch)
804 mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_EJMic>(profiledRepatchFunction), resultRegs, srcRegs, TrustedImmPtr(mathIC));
805 else
806 mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, srcRegs, arithProfile);
807 } else
808 mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_EJMic>(repatchFunction), resultRegs, srcRegs, TrustedImmPtr(mathIC));
809
810#if ENABLE(MATH_IC_STATS)
811 auto slowPathEnd = label();
812 addLinkTask([=] (LinkBuffer& linkBuffer) {
813 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
814 mathIC->m_generatedCodeSize += size;
815 });
816#endif
817
818 emitPutVirtualRegister(result, resultRegs);
819
820 addLinkTask([=] (LinkBuffer& linkBuffer) {
821 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
822 mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
823 });
824}
825
826template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
827void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
828{
829 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
830 mathICGenerationState.slowPathStart = label();
831
832 auto bytecode = currentInstruction->as<Op>();
833 OperandTypes types = getOperandTypes(copiedArithProfile(bytecode));
834 int result = bytecode.m_dst.offset();
835 int op1 = bytecode.m_lhs.offset();
836 int op2 = bytecode.m_rhs.offset();
837
838#if USE(JSVALUE64)
839 JSValueRegs leftRegs = JSValueRegs(regT1);
840 JSValueRegs rightRegs = JSValueRegs(regT2);
841 JSValueRegs resultRegs = JSValueRegs(regT0);
842#else
843 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
844 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
845 JSValueRegs resultRegs = leftRegs;
846#endif
847
848 SnippetOperand leftOperand(types.first());
849 SnippetOperand rightOperand(types.second());
850
851 if (isOperandConstantInt(op1))
852 leftOperand.setConstInt32(getOperandConstantInt(op1));
853 else if (isOperandConstantInt(op2))
854 rightOperand.setConstInt32(getOperandConstantInt(op2));
855
856 ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
857
858 if (Generator::isLeftOperandValidConstant(leftOperand))
859 emitGetVirtualRegister(op1, leftRegs);
860 else if (Generator::isRightOperandValidConstant(rightOperand))
861 emitGetVirtualRegister(op2, rightRegs);
862
863#if ENABLE(MATH_IC_STATS)
864 auto slowPathStart = label();
865#endif
866
867 ArithProfile* arithProfile = mathIC->arithProfile();
868 if (arithProfile && shouldEmitProfiling()) {
869 if (mathICGenerationState.shouldSlowPathRepatch)
870 mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_EJJMic>(profiledRepatchFunction), resultRegs, leftRegs, rightRegs, TrustedImmPtr(mathIC));
871 else
872 mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, leftRegs, rightRegs, arithProfile);
873 } else
874 mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_EJJMic>(repatchFunction), resultRegs, leftRegs, rightRegs, TrustedImmPtr(mathIC));
875
876#if ENABLE(MATH_IC_STATS)
877 auto slowPathEnd = label();
878 addLinkTask([=] (LinkBuffer& linkBuffer) {
879 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
880 mathIC->m_generatedCodeSize += size;
881 });
882#endif
883
884 emitPutVirtualRegister(result, resultRegs);
885
886 addLinkTask([=] (LinkBuffer& linkBuffer) {
887 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
888 mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
889 });
890}
891
892void JIT::emit_op_div(const Instruction* currentInstruction)
893{
894 auto bytecode = currentInstruction->as<OpDiv>();
895 auto& metadata = bytecode.metadata(m_codeBlock);
896 int result = bytecode.m_dst.offset();
897 int op1 = bytecode.m_lhs.offset();
898 int op2 = bytecode.m_rhs.offset();
899
900#if USE(JSVALUE64)
901 OperandTypes types = getOperandTypes(metadata.m_arithProfile);
902 JSValueRegs leftRegs = JSValueRegs(regT0);
903 JSValueRegs rightRegs = JSValueRegs(regT1);
904 JSValueRegs resultRegs = leftRegs;
905 GPRReg scratchGPR = regT2;
906#else
907 OperandTypes types = getOperandTypes(metadata.m_arithProfile);
908 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
909 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
910 JSValueRegs resultRegs = leftRegs;
911 GPRReg scratchGPR = regT4;
912#endif
913 FPRReg scratchFPR = fpRegT2;
914
915 ArithProfile* arithProfile = nullptr;
916 if (shouldEmitProfiling())
917 arithProfile = &currentInstruction->as<OpDiv>().metadata(m_codeBlock).m_arithProfile;
918
919 SnippetOperand leftOperand(types.first());
920 SnippetOperand rightOperand(types.second());
921
922 if (isOperandConstantInt(op1))
923 leftOperand.setConstInt32(getOperandConstantInt(op1));
924#if USE(JSVALUE64)
925 else if (isOperandConstantDouble(op1))
926 leftOperand.setConstDouble(getOperandConstantDouble(op1));
927#endif
928 else if (isOperandConstantInt(op2))
929 rightOperand.setConstInt32(getOperandConstantInt(op2));
930#if USE(JSVALUE64)
931 else if (isOperandConstantDouble(op2))
932 rightOperand.setConstDouble(getOperandConstantDouble(op2));
933#endif
934
935 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
936
937 if (!leftOperand.isConst())
938 emitGetVirtualRegister(op1, leftRegs);
939 if (!rightOperand.isConst())
940 emitGetVirtualRegister(op2, rightRegs);
941
942 JITDivGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
943 fpRegT0, fpRegT1, scratchGPR, scratchFPR, arithProfile);
944
945 gen.generateFastPath(*this);
946
947 if (gen.didEmitFastPath()) {
948 gen.endJumpList().link(this);
949 emitPutVirtualRegister(result, resultRegs);
950
951 addSlowCase(gen.slowPathJumpList());
952 } else {
953 ASSERT(gen.endJumpList().empty());
954 ASSERT(gen.slowPathJumpList().empty());
955 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
956 slowPathCall.call();
957 }
958}
959
960void JIT::emit_op_mul(const Instruction* currentInstruction)
961{
962 ArithProfile* arithProfile = &currentInstruction->as<OpMul>().metadata(m_codeBlock).m_arithProfile;
963 JITMulIC* mulIC = m_codeBlock->addJITMulIC(arithProfile);
964 m_instructionToMathIC.add(currentInstruction, mulIC);
965 emitMathICFast<OpMul>(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul);
966}
967
968void JIT::emitSlow_op_mul(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
969{
970 linkAllSlowCases(iter);
971
972 JITMulIC* mulIC = bitwise_cast<JITMulIC*>(m_instructionToMathIC.get(currentInstruction));
973 emitMathICSlow<OpMul>(mulIC, currentInstruction, operationValueMulProfiledOptimize, operationValueMulProfiled, operationValueMulOptimize);
974}
975
976void JIT::emit_op_sub(const Instruction* currentInstruction)
977{
978 ArithProfile* arithProfile = &currentInstruction->as<OpSub>().metadata(m_codeBlock).m_arithProfile;
979 JITSubIC* subIC = m_codeBlock->addJITSubIC(arithProfile);
980 m_instructionToMathIC.add(currentInstruction, subIC);
981 emitMathICFast<OpSub>(subIC, currentInstruction, operationValueSubProfiled, operationValueSub);
982}
983
984void JIT::emitSlow_op_sub(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
985{
986 linkAllSlowCases(iter);
987
988 JITSubIC* subIC = bitwise_cast<JITSubIC*>(m_instructionToMathIC.get(currentInstruction));
989 emitMathICSlow<OpSub>(subIC, currentInstruction, operationValueSubProfiledOptimize, operationValueSubProfiled, operationValueSubOptimize);
990}
991
992/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL, OP_POW ------------------------------ */
993
994} // namespace JSC
995
996#endif // ENABLE(JIT)
997