1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27
28#if ENABLE(JIT)
29#include "JIT.h"
30
31#include "ArithProfile.h"
32#include "BytecodeGenerator.h"
33#include "CodeBlock.h"
34#include "JITAddGenerator.h"
35#include "JITBitAndGenerator.h"
36#include "JITBitOrGenerator.h"
37#include "JITBitXorGenerator.h"
38#include "JITDivGenerator.h"
39#include "JITInlines.h"
40#include "JITLeftShiftGenerator.h"
41#include "JITMathIC.h"
42#include "JITMulGenerator.h"
43#include "JITNegGenerator.h"
44#include "JITOperations.h"
45#include "JITSubGenerator.h"
46#include "JSArray.h"
47#include "JSFunction.h"
48#include "Interpreter.h"
49#include "JSCInlines.h"
50#include "LinkBuffer.h"
51#include "ResultType.h"
52#include "SlowPathCall.h"
53
54namespace JSC {
55
56void JIT::emit_op_jless(const Instruction* currentInstruction)
57{
58 emit_compareAndJump<OpJless>(currentInstruction, LessThan);
59}
60
61void JIT::emit_op_jlesseq(const Instruction* currentInstruction)
62{
63 emit_compareAndJump<OpJlesseq>(currentInstruction, LessThanOrEqual);
64}
65
66void JIT::emit_op_jgreater(const Instruction* currentInstruction)
67{
68 emit_compareAndJump<OpJgreater>(currentInstruction, GreaterThan);
69}
70
71void JIT::emit_op_jgreatereq(const Instruction* currentInstruction)
72{
73 emit_compareAndJump<OpJgreatereq>(currentInstruction, GreaterThanOrEqual);
74}
75
76void JIT::emit_op_jnless(const Instruction* currentInstruction)
77{
78 emit_compareAndJump<OpJnless>(currentInstruction, GreaterThanOrEqual);
79}
80
81void JIT::emit_op_jnlesseq(const Instruction* currentInstruction)
82{
83 emit_compareAndJump<OpJnlesseq>(currentInstruction, GreaterThan);
84}
85
86void JIT::emit_op_jngreater(const Instruction* currentInstruction)
87{
88 emit_compareAndJump<OpJngreater>(currentInstruction, LessThanOrEqual);
89}
90
91void JIT::emit_op_jngreatereq(const Instruction* currentInstruction)
92{
93 emit_compareAndJump<OpJngreatereq>(currentInstruction, LessThan);
94}
95
96void JIT::emitSlow_op_jless(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
97{
98 emit_compareAndJumpSlow<OpJless>(currentInstruction, DoubleLessThan, operationCompareLess, false, iter);
99}
100
101void JIT::emitSlow_op_jlesseq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
102{
103 emit_compareAndJumpSlow<OpJlesseq>(currentInstruction, DoubleLessThanOrEqual, operationCompareLessEq, false, iter);
104}
105
106void JIT::emitSlow_op_jgreater(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
107{
108 emit_compareAndJumpSlow<OpJgreater>(currentInstruction, DoubleGreaterThan, operationCompareGreater, false, iter);
109}
110
111void JIT::emitSlow_op_jgreatereq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
112{
113 emit_compareAndJumpSlow<OpJgreatereq>(currentInstruction, DoubleGreaterThanOrEqual, operationCompareGreaterEq, false, iter);
114}
115
116void JIT::emitSlow_op_jnless(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
117{
118 emit_compareAndJumpSlow<OpJnless>(currentInstruction, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter);
119}
120
121void JIT::emitSlow_op_jnlesseq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
122{
123 emit_compareAndJumpSlow<OpJnlesseq>(currentInstruction, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter);
124}
125
126void JIT::emitSlow_op_jngreater(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
127{
128 emit_compareAndJumpSlow<OpJngreater>(currentInstruction, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter);
129}
130
131void JIT::emitSlow_op_jngreatereq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
132{
133 emit_compareAndJumpSlow<OpJngreatereq>(currentInstruction, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter);
134}
135
136void JIT::emit_op_below(const Instruction* currentInstruction)
137{
138 emit_compareUnsigned<OpBelow>(currentInstruction, Below);
139}
140
141void JIT::emit_op_beloweq(const Instruction* currentInstruction)
142{
143 emit_compareUnsigned<OpBeloweq>(currentInstruction, BelowOrEqual);
144}
145
146void JIT::emit_op_jbelow(const Instruction* currentInstruction)
147{
148 emit_compareUnsignedAndJump<OpJbelow>(currentInstruction, Below);
149}
150
151void JIT::emit_op_jbeloweq(const Instruction* currentInstruction)
152{
153 emit_compareUnsignedAndJump<OpJbeloweq>(currentInstruction, BelowOrEqual);
154}
155
156#if USE(JSVALUE64)
157
158void JIT::emit_op_unsigned(const Instruction* currentInstruction)
159{
160 auto bytecode = currentInstruction->as<OpUnsigned>();
161 int result = bytecode.m_dst.offset();
162 int op1 = bytecode.m_operand.offset();
163
164 emitGetVirtualRegister(op1, regT0);
165 emitJumpSlowCaseIfNotInt(regT0);
166 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
167 boxInt32(regT0, JSValueRegs { regT0 });
168 emitPutVirtualRegister(result, regT0);
169}
170
171template<typename Op>
172void JIT::emit_compareAndJump(const Instruction* instruction, RelationalCondition condition)
173{
174 // We generate inline code for the following cases in the fast path:
175 // - int immediate to constant int immediate
176 // - constant int immediate to int immediate
177 // - int immediate to int immediate
178
179 auto bytecode = instruction->as<Op>();
180 int op1 = bytecode.m_lhs.offset();
181 int op2 = bytecode.m_rhs.offset();
182 unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
183 bool disallowAllocation = false;
184 if (isOperandConstantChar(op1)) {
185 emitGetVirtualRegister(op2, regT0);
186 addSlowCase(branchIfNotCell(regT0));
187 JumpList failures;
188 emitLoadCharacterString(regT0, regT0, failures);
189 addSlowCase(failures);
190 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue(disallowAllocation)[0])), target);
191 return;
192 }
193 if (isOperandConstantChar(op2)) {
194 emitGetVirtualRegister(op1, regT0);
195 addSlowCase(branchIfNotCell(regT0));
196 JumpList failures;
197 emitLoadCharacterString(regT0, regT0, failures);
198 addSlowCase(failures);
199 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue(disallowAllocation)[0])), target);
200 return;
201 }
202 if (isOperandConstantInt(op2)) {
203 emitGetVirtualRegister(op1, regT0);
204 emitJumpSlowCaseIfNotInt(regT0);
205 int32_t op2imm = getOperandConstantInt(op2);
206 addJump(branch32(condition, regT0, Imm32(op2imm)), target);
207 return;
208 }
209 if (isOperandConstantInt(op1)) {
210 emitGetVirtualRegister(op2, regT1);
211 emitJumpSlowCaseIfNotInt(regT1);
212 int32_t op1imm = getOperandConstantInt(op1);
213 addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
214 return;
215 }
216
217 emitGetVirtualRegisters(op1, regT0, op2, regT1);
218 emitJumpSlowCaseIfNotInt(regT0);
219 emitJumpSlowCaseIfNotInt(regT1);
220
221 addJump(branch32(condition, regT0, regT1), target);
222}
223
224template<typename Op>
225void JIT::emit_compareUnsignedAndJump(const Instruction* instruction, RelationalCondition condition)
226{
227 auto bytecode = instruction->as<Op>();
228 int op1 = bytecode.m_lhs.offset();
229 int op2 = bytecode.m_rhs.offset();
230 unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
231 if (isOperandConstantInt(op2)) {
232 emitGetVirtualRegister(op1, regT0);
233 int32_t op2imm = getOperandConstantInt(op2);
234 addJump(branch32(condition, regT0, Imm32(op2imm)), target);
235 } else if (isOperandConstantInt(op1)) {
236 emitGetVirtualRegister(op2, regT1);
237 int32_t op1imm = getOperandConstantInt(op1);
238 addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
239 } else {
240 emitGetVirtualRegisters(op1, regT0, op2, regT1);
241 addJump(branch32(condition, regT0, regT1), target);
242 }
243}
244
245template<typename Op>
246void JIT::emit_compareUnsigned(const Instruction* instruction, RelationalCondition condition)
247{
248 auto bytecode = instruction->as<Op>();
249 int dst = bytecode.m_dst.offset();
250 int op1 = bytecode.m_lhs.offset();
251 int op2 = bytecode.m_rhs.offset();
252 if (isOperandConstantInt(op2)) {
253 emitGetVirtualRegister(op1, regT0);
254 int32_t op2imm = getOperandConstantInt(op2);
255 compare32(condition, regT0, Imm32(op2imm), regT0);
256 } else if (isOperandConstantInt(op1)) {
257 emitGetVirtualRegister(op2, regT0);
258 int32_t op1imm = getOperandConstantInt(op1);
259 compare32(commute(condition), regT0, Imm32(op1imm), regT0);
260 } else {
261 emitGetVirtualRegisters(op1, regT0, op2, regT1);
262 compare32(condition, regT0, regT1, regT0);
263 }
264 boxBoolean(regT0, JSValueRegs { regT0 });
265 emitPutVirtualRegister(dst);
266}
267
268template<typename Op>
269void JIT::emit_compareAndJumpSlow(const Instruction* instruction, DoubleCondition condition, size_t (JIT_OPERATION *operation)(JSGlobalObject*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
270{
271 auto bytecode = instruction->as<Op>();
272 int op1 = bytecode.m_lhs.offset();
273 int op2 = bytecode.m_rhs.offset();
274 unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
275
276 // We generate inline code for the following cases in the slow path:
277 // - floating-point number to constant int immediate
278 // - constant int immediate to floating-point number
279 // - floating-point number to floating-point number.
280 if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
281 linkAllSlowCases(iter);
282
283 emitGetVirtualRegister(op1, argumentGPR0);
284 emitGetVirtualRegister(op2, argumentGPR1);
285 callOperation(operation, TrustedImmPtr(m_codeBlock->globalObject()), argumentGPR0, argumentGPR1);
286 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
287 return;
288 }
289
290 if (isOperandConstantInt(op2)) {
291 linkAllSlowCases(iter);
292
293 if (supportsFloatingPoint()) {
294 Jump fail1 = branchIfNotNumber(regT0);
295 add64(numberTagRegister, regT0);
296 move64ToDouble(regT0, fpRegT0);
297
298 int32_t op2imm = getConstantOperand(op2).asInt32();
299
300 move(Imm32(op2imm), regT1);
301 convertInt32ToDouble(regT1, fpRegT1);
302
303 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
304
305 emitJumpSlowToHot(jump(), instruction->size());
306
307 fail1.link(this);
308 }
309
310 emitGetVirtualRegister(op2, regT1);
311 callOperation(operation, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
312 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
313 return;
314 }
315
316 if (isOperandConstantInt(op1)) {
317 linkAllSlowCases(iter);
318
319 if (supportsFloatingPoint()) {
320 Jump fail1 = branchIfNotNumber(regT1);
321 add64(numberTagRegister, regT1);
322 move64ToDouble(regT1, fpRegT1);
323
324 int32_t op1imm = getConstantOperand(op1).asInt32();
325
326 move(Imm32(op1imm), regT0);
327 convertInt32ToDouble(regT0, fpRegT0);
328
329 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
330
331 emitJumpSlowToHot(jump(), instruction->size());
332
333 fail1.link(this);
334 }
335
336 emitGetVirtualRegister(op1, regT2);
337 callOperation(operation, TrustedImmPtr(m_codeBlock->globalObject()), regT2, regT1);
338 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
339 return;
340 }
341
342 linkSlowCase(iter); // LHS is not Int.
343
344 if (supportsFloatingPoint()) {
345 Jump fail1 = branchIfNotNumber(regT0);
346 Jump fail2 = branchIfNotNumber(regT1);
347 Jump fail3 = branchIfInt32(regT1);
348 add64(numberTagRegister, regT0);
349 add64(numberTagRegister, regT1);
350 move64ToDouble(regT0, fpRegT0);
351 move64ToDouble(regT1, fpRegT1);
352
353 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
354
355 emitJumpSlowToHot(jump(), instruction->size());
356
357 fail1.link(this);
358 fail2.link(this);
359 fail3.link(this);
360 }
361
362 linkSlowCase(iter); // RHS is not Int.
363 callOperation(operation, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1);
364 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
365}
366
367void JIT::emit_op_inc(const Instruction* currentInstruction)
368{
369 auto bytecode = currentInstruction->as<OpInc>();
370 int srcDst = bytecode.m_srcDst.offset();
371
372 emitGetVirtualRegister(srcDst, regT0);
373 emitJumpSlowCaseIfNotInt(regT0);
374 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
375 boxInt32(regT0, JSValueRegs { regT0 });
376 emitPutVirtualRegister(srcDst);
377}
378
379void JIT::emit_op_dec(const Instruction* currentInstruction)
380{
381 auto bytecode = currentInstruction->as<OpDec>();
382 int srcDst = bytecode.m_srcDst.offset();
383
384 emitGetVirtualRegister(srcDst, regT0);
385 emitJumpSlowCaseIfNotInt(regT0);
386 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
387 boxInt32(regT0, JSValueRegs { regT0 });
388 emitPutVirtualRegister(srcDst);
389}
390
391/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
392
393#if CPU(X86_64)
394
395void JIT::emit_op_mod(const Instruction* currentInstruction)
396{
397 auto bytecode = currentInstruction->as<OpMod>();
398 int result = bytecode.m_dst.offset();
399 int op1 = bytecode.m_lhs.offset();
400 int op2 = bytecode.m_rhs.offset();
401
402 // Make sure registers are correct for x86 IDIV instructions.
403 ASSERT(regT0 == X86Registers::eax);
404 auto edx = X86Registers::edx;
405 auto ecx = X86Registers::ecx;
406 ASSERT(regT4 != edx);
407 ASSERT(regT4 != ecx);
408
409 emitGetVirtualRegisters(op1, regT4, op2, ecx);
410 emitJumpSlowCaseIfNotInt(regT4);
411 emitJumpSlowCaseIfNotInt(ecx);
412
413 move(regT4, regT0);
414 addSlowCase(branchTest32(Zero, ecx));
415 Jump denominatorNotNeg1 = branch32(NotEqual, ecx, TrustedImm32(-1));
416 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
417 denominatorNotNeg1.link(this);
418 x86ConvertToDoubleWord32();
419 x86Div32(ecx);
420 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT4, TrustedImm32(0));
421 addSlowCase(branchTest32(Zero, edx));
422 numeratorPositive.link(this);
423 boxInt32(edx, JSValueRegs { regT0 });
424 emitPutVirtualRegister(result);
425}
426
427void JIT::emitSlow_op_mod(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
428{
429 linkAllSlowCases(iter);
430
431 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
432 slowPathCall.call();
433}
434
435#else // CPU(X86_64)
436
437void JIT::emit_op_mod(const Instruction* currentInstruction)
438{
439 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
440 slowPathCall.call();
441}
442
443void JIT::emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&)
444{
445 UNREACHABLE_FOR_PLATFORM();
446}
447
448#endif // CPU(X86_64)
449
450/* ------------------------------ END: OP_MOD ------------------------------ */
451
452#endif // USE(JSVALUE64)
453
454void JIT::emit_op_negate(const Instruction* currentInstruction)
455{
456 UnaryArithProfile* arithProfile = &currentInstruction->as<OpNegate>().metadata(m_codeBlock).m_arithProfile;
457 JITNegIC* negateIC = m_codeBlock->addJITNegIC(arithProfile);
458 m_instructionToMathIC.add(currentInstruction, negateIC);
459 emitMathICFast<OpNegate>(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate);
460}
461
462void JIT::emitSlow_op_negate(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
463{
464 linkAllSlowCases(iter);
465
466 JITNegIC* negIC = bitwise_cast<JITNegIC*>(m_instructionToMathIC.get(currentInstruction));
467 emitMathICSlow<OpNegate>(negIC, currentInstruction, operationArithNegateProfiledOptimize, operationArithNegateProfiled, operationArithNegateOptimize);
468}
469
470template<typename Op, typename SnippetGenerator>
471void JIT::emitBitBinaryOpFastPath(const Instruction* currentInstruction, ProfilingPolicy profilingPolicy)
472{
473 auto bytecode = currentInstruction->as<Op>();
474 int result = bytecode.m_dst.offset();
475 int op1 = bytecode.m_lhs.offset();
476 int op2 = bytecode.m_rhs.offset();
477
478#if USE(JSVALUE64)
479 JSValueRegs leftRegs = JSValueRegs(regT0);
480 JSValueRegs rightRegs = JSValueRegs(regT1);
481 JSValueRegs resultRegs = leftRegs;
482 GPRReg scratchGPR = regT2;
483#else
484 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
485 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
486 JSValueRegs resultRegs = leftRegs;
487 GPRReg scratchGPR = regT4;
488#endif
489
490 SnippetOperand leftOperand;
491 SnippetOperand rightOperand;
492
493 if (isOperandConstantInt(op1))
494 leftOperand.setConstInt32(getOperandConstantInt(op1));
495 else if (isOperandConstantInt(op2))
496 rightOperand.setConstInt32(getOperandConstantInt(op2));
497
498 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
499
500 if (!leftOperand.isConst())
501 emitGetVirtualRegister(op1, leftRegs);
502 if (!rightOperand.isConst())
503 emitGetVirtualRegister(op2, rightRegs);
504
505 SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
506
507 gen.generateFastPath(*this);
508
509 ASSERT(gen.didEmitFastPath());
510 gen.endJumpList().link(this);
511 if (profilingPolicy == ProfilingPolicy::ShouldEmitProfiling)
512 emitValueProfilingSiteIfProfiledOpcode(bytecode);
513 emitPutVirtualRegister(result, resultRegs);
514
515 addSlowCase(gen.slowPathJumpList());
516}
517
518void JIT::emit_op_bitnot(const Instruction* currentInstruction)
519{
520 auto bytecode = currentInstruction->as<OpBitnot>();
521 int result = bytecode.m_dst.offset();
522 int op1 = bytecode.m_operand.offset();
523
524#if USE(JSVALUE64)
525 JSValueRegs leftRegs = JSValueRegs(regT0);
526#else
527 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
528#endif
529
530 emitGetVirtualRegister(op1, leftRegs);
531
532 addSlowCase(branchIfNotInt32(leftRegs));
533 not32(leftRegs.payloadGPR());
534#if USE(JSVALUE64)
535 boxInt32(leftRegs.payloadGPR(), leftRegs);
536#endif
537
538 emitValueProfilingSiteIfProfiledOpcode(bytecode);
539
540 emitPutVirtualRegister(result, leftRegs);
541}
542
543void JIT::emit_op_bitand(const Instruction* currentInstruction)
544{
545 emitBitBinaryOpFastPath<OpBitand, JITBitAndGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);
546}
547
548void JIT::emit_op_bitor(const Instruction* currentInstruction)
549{
550 emitBitBinaryOpFastPath<OpBitor, JITBitOrGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);
551}
552
553void JIT::emit_op_bitxor(const Instruction* currentInstruction)
554{
555 emitBitBinaryOpFastPath<OpBitxor, JITBitXorGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);
556}
557
558void JIT::emit_op_lshift(const Instruction* currentInstruction)
559{
560 emitBitBinaryOpFastPath<OpLshift, JITLeftShiftGenerator>(currentInstruction);
561}
562
563void JIT::emitRightShiftFastPath(const Instruction* currentInstruction, OpcodeID opcodeID)
564{
565 ASSERT(opcodeID == op_rshift || opcodeID == op_urshift);
566 switch (opcodeID) {
567 case op_rshift:
568 emitRightShiftFastPath<OpRshift>(currentInstruction, JITRightShiftGenerator::SignedShift);
569 break;
570 case op_urshift:
571 emitRightShiftFastPath<OpUrshift>(currentInstruction, JITRightShiftGenerator::UnsignedShift);
572 break;
573 default:
574 ASSERT_NOT_REACHED();
575 }
576}
577
578template<typename Op>
579void JIT::emitRightShiftFastPath(const Instruction* currentInstruction, JITRightShiftGenerator::ShiftType snippetShiftType)
580{
581 auto bytecode = currentInstruction->as<Op>();
582 int result = bytecode.m_dst.offset();
583 int op1 = bytecode.m_lhs.offset();
584 int op2 = bytecode.m_rhs.offset();
585
586#if USE(JSVALUE64)
587 JSValueRegs leftRegs = JSValueRegs(regT0);
588 JSValueRegs rightRegs = JSValueRegs(regT1);
589 JSValueRegs resultRegs = leftRegs;
590 GPRReg scratchGPR = regT2;
591 FPRReg scratchFPR = InvalidFPRReg;
592#else
593 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
594 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
595 JSValueRegs resultRegs = leftRegs;
596 GPRReg scratchGPR = regT4;
597 FPRReg scratchFPR = fpRegT2;
598#endif
599
600 SnippetOperand leftOperand;
601 SnippetOperand rightOperand;
602
603 if (isOperandConstantInt(op1))
604 leftOperand.setConstInt32(getOperandConstantInt(op1));
605 else if (isOperandConstantInt(op2))
606 rightOperand.setConstInt32(getOperandConstantInt(op2));
607
608 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
609
610 if (!leftOperand.isConst())
611 emitGetVirtualRegister(op1, leftRegs);
612 if (!rightOperand.isConst())
613 emitGetVirtualRegister(op2, rightRegs);
614
615 JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
616 fpRegT0, scratchGPR, scratchFPR, snippetShiftType);
617
618 gen.generateFastPath(*this);
619
620 ASSERT(gen.didEmitFastPath());
621 gen.endJumpList().link(this);
622 emitPutVirtualRegister(result, resultRegs);
623
624 addSlowCase(gen.slowPathJumpList());
625}
626
627void JIT::emit_op_rshift(const Instruction* currentInstruction)
628{
629 emitRightShiftFastPath(currentInstruction, op_rshift);
630}
631
632void JIT::emit_op_urshift(const Instruction* currentInstruction)
633{
634 emitRightShiftFastPath(currentInstruction, op_urshift);
635}
636
637void JIT::emit_op_add(const Instruction* currentInstruction)
638{
639 BinaryArithProfile* arithProfile = &currentInstruction->as<OpAdd>().metadata(m_codeBlock).m_arithProfile;
640 JITAddIC* addIC = m_codeBlock->addJITAddIC(arithProfile);
641 m_instructionToMathIC.add(currentInstruction, addIC);
642 emitMathICFast<OpAdd>(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd);
643}
644
645void JIT::emitSlow_op_add(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
646{
647 linkAllSlowCases(iter);
648
649 JITAddIC* addIC = bitwise_cast<JITAddIC*>(m_instructionToMathIC.get(currentInstruction));
650 emitMathICSlow<OpAdd>(addIC, currentInstruction, operationValueAddProfiledOptimize, operationValueAddProfiled, operationValueAddOptimize);
651}
652
653template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
654void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
655{
656 auto bytecode = currentInstruction->as<Op>();
657 int result = bytecode.m_dst.offset();
658 int operand = bytecode.m_operand.offset();
659
660#if USE(JSVALUE64)
661 // ArithNegate benefits from using the same register as src and dst.
662 // Since regT1==argumentGPR1, using regT1 avoid shuffling register to call the slow path.
663 JSValueRegs srcRegs = JSValueRegs(regT1);
664 JSValueRegs resultRegs = JSValueRegs(regT1);
665 GPRReg scratchGPR = regT2;
666#else
667 JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
668 JSValueRegs resultRegs = JSValueRegs(regT3, regT2);
669 GPRReg scratchGPR = regT4;
670#endif
671
672#if ENABLE(MATH_IC_STATS)
673 auto inlineStart = label();
674#endif
675
676 mathIC->m_generator = Generator(resultRegs, srcRegs, scratchGPR);
677
678 emitGetVirtualRegister(operand, srcRegs);
679
680 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, MathICGenerationState()).iterator->value;
681
682 bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
683 if (!generatedInlineCode) {
684 UnaryArithProfile* arithProfile = mathIC->arithProfile();
685 if (arithProfile && shouldEmitProfiling())
686 callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, arithProfile);
687 else
688 callOperationWithResult(nonProfiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs);
689 } else
690 addSlowCase(mathICGenerationState.slowPathJumps);
691
692#if ENABLE(MATH_IC_STATS)
693 auto inlineEnd = label();
694 addLinkTask([=] (LinkBuffer& linkBuffer) {
695 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
696 mathIC->m_generatedCodeSize += size;
697 });
698#endif
699
700 emitPutVirtualRegister(result, resultRegs);
701}
702
703template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
704void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
705{
706 auto bytecode = currentInstruction->as<Op>();
707 int result = bytecode.m_dst.offset();
708 int op1 = bytecode.m_lhs.offset();
709 int op2 = bytecode.m_rhs.offset();
710
711#if USE(JSVALUE64)
712 JSValueRegs leftRegs = JSValueRegs(regT1);
713 JSValueRegs rightRegs = JSValueRegs(regT2);
714 JSValueRegs resultRegs = JSValueRegs(regT0);
715 GPRReg scratchGPR = regT3;
716 FPRReg scratchFPR = fpRegT2;
717#else
718 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
719 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
720 JSValueRegs resultRegs = leftRegs;
721 GPRReg scratchGPR = regT4;
722 FPRReg scratchFPR = fpRegT2;
723#endif
724
725 SnippetOperand leftOperand(bytecode.m_operandTypes.first());
726 SnippetOperand rightOperand(bytecode.m_operandTypes.second());
727
728 if (isOperandConstantInt(op1))
729 leftOperand.setConstInt32(getOperandConstantInt(op1));
730 else if (isOperandConstantInt(op2))
731 rightOperand.setConstInt32(getOperandConstantInt(op2));
732
733 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
734
735 mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, fpRegT0, fpRegT1, scratchGPR, scratchFPR);
736
737 ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
738
739 if (!Generator::isLeftOperandValidConstant(leftOperand))
740 emitGetVirtualRegister(op1, leftRegs);
741 if (!Generator::isRightOperandValidConstant(rightOperand))
742 emitGetVirtualRegister(op2, rightRegs);
743
744#if ENABLE(MATH_IC_STATS)
745 auto inlineStart = label();
746#endif
747
748 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, MathICGenerationState()).iterator->value;
749
750 bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
751 if (!generatedInlineCode) {
752 if (leftOperand.isConst())
753 emitGetVirtualRegister(op1, leftRegs);
754 else if (rightOperand.isConst())
755 emitGetVirtualRegister(op2, rightRegs);
756 BinaryArithProfile* arithProfile = mathIC->arithProfile();
757 if (arithProfile && shouldEmitProfiling())
758 callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, arithProfile);
759 else
760 callOperationWithResult(nonProfiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs);
761 } else
762 addSlowCase(mathICGenerationState.slowPathJumps);
763
764#if ENABLE(MATH_IC_STATS)
765 auto inlineEnd = label();
766 addLinkTask([=] (LinkBuffer& linkBuffer) {
767 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
768 mathIC->m_generatedCodeSize += size;
769 });
770#endif
771
772 emitPutVirtualRegister(result, resultRegs);
773}
774
775template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
776void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
777{
778 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
779 mathICGenerationState.slowPathStart = label();
780
781 auto bytecode = currentInstruction->as<Op>();
782 int result = bytecode.m_dst.offset();
783
784#if USE(JSVALUE64)
785 JSValueRegs srcRegs = JSValueRegs(regT1);
786 JSValueRegs resultRegs = JSValueRegs(regT0);
787#else
788 JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
789 JSValueRegs resultRegs = JSValueRegs(regT3, regT2);
790#endif
791
792#if ENABLE(MATH_IC_STATS)
793 auto slowPathStart = label();
794#endif
795
796 UnaryArithProfile* arithProfile = mathIC->arithProfile();
797 if (arithProfile && shouldEmitProfiling()) {
798 if (mathICGenerationState.shouldSlowPathRepatch)
799 mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_GJMic>(profiledRepatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, TrustedImmPtr(mathIC));
800 else
801 mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, arithProfile);
802 } else
803 mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_GJMic>(repatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), srcRegs, TrustedImmPtr(mathIC));
804
805#if ENABLE(MATH_IC_STATS)
806 auto slowPathEnd = label();
807 addLinkTask([=] (LinkBuffer& linkBuffer) {
808 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
809 mathIC->m_generatedCodeSize += size;
810 });
811#endif
812
813 emitPutVirtualRegister(result, resultRegs);
814
815 addLinkTask([=] (LinkBuffer& linkBuffer) {
816 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
817 mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
818 });
819}
820
821template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
822void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
823{
824 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
825 mathICGenerationState.slowPathStart = label();
826
827 auto bytecode = currentInstruction->as<Op>();
828 int result = bytecode.m_dst.offset();
829 int op1 = bytecode.m_lhs.offset();
830 int op2 = bytecode.m_rhs.offset();
831
832#if USE(JSVALUE64)
833 JSValueRegs leftRegs = JSValueRegs(regT1);
834 JSValueRegs rightRegs = JSValueRegs(regT2);
835 JSValueRegs resultRegs = JSValueRegs(regT0);
836#else
837 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
838 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
839 JSValueRegs resultRegs = leftRegs;
840#endif
841
842 SnippetOperand leftOperand(bytecode.m_operandTypes.first());
843 SnippetOperand rightOperand(bytecode.m_operandTypes.second());
844
845 if (isOperandConstantInt(op1))
846 leftOperand.setConstInt32(getOperandConstantInt(op1));
847 else if (isOperandConstantInt(op2))
848 rightOperand.setConstInt32(getOperandConstantInt(op2));
849
850 ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
851
852 if (Generator::isLeftOperandValidConstant(leftOperand))
853 emitGetVirtualRegister(op1, leftRegs);
854 else if (Generator::isRightOperandValidConstant(rightOperand))
855 emitGetVirtualRegister(op2, rightRegs);
856
857#if ENABLE(MATH_IC_STATS)
858 auto slowPathStart = label();
859#endif
860
861 BinaryArithProfile* arithProfile = mathIC->arithProfile();
862 if (arithProfile && shouldEmitProfiling()) {
863 if (mathICGenerationState.shouldSlowPathRepatch)
864 mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_GJJMic>(profiledRepatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, TrustedImmPtr(mathIC));
865 else
866 mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, arithProfile);
867 } else
868 mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_GJJMic>(repatchFunction), resultRegs, TrustedImmPtr(m_codeBlock->globalObject()), leftRegs, rightRegs, TrustedImmPtr(mathIC));
869
870#if ENABLE(MATH_IC_STATS)
871 auto slowPathEnd = label();
872 addLinkTask([=] (LinkBuffer& linkBuffer) {
873 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
874 mathIC->m_generatedCodeSize += size;
875 });
876#endif
877
878 emitPutVirtualRegister(result, resultRegs);
879
880 addLinkTask([=] (LinkBuffer& linkBuffer) {
881 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
882 mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
883 });
884}
885
886void JIT::emit_op_div(const Instruction* currentInstruction)
887{
888 auto bytecode = currentInstruction->as<OpDiv>();
889 int result = bytecode.m_dst.offset();
890 int op1 = bytecode.m_lhs.offset();
891 int op2 = bytecode.m_rhs.offset();
892
893#if USE(JSVALUE64)
894 JSValueRegs leftRegs = JSValueRegs(regT0);
895 JSValueRegs rightRegs = JSValueRegs(regT1);
896 JSValueRegs resultRegs = leftRegs;
897 GPRReg scratchGPR = regT2;
898#else
899 JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
900 JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
901 JSValueRegs resultRegs = leftRegs;
902 GPRReg scratchGPR = regT4;
903#endif
904 FPRReg scratchFPR = fpRegT2;
905
906 BinaryArithProfile* arithProfile = nullptr;
907 if (shouldEmitProfiling())
908 arithProfile = &currentInstruction->as<OpDiv>().metadata(m_codeBlock).m_arithProfile;
909
910 SnippetOperand leftOperand(bytecode.m_operandTypes.first());
911 SnippetOperand rightOperand(bytecode.m_operandTypes.second());
912
913 if (isOperandConstantInt(op1))
914 leftOperand.setConstInt32(getOperandConstantInt(op1));
915#if USE(JSVALUE64)
916 else if (isOperandConstantDouble(op1))
917 leftOperand.setConstDouble(getOperandConstantDouble(op1));
918#endif
919 else if (isOperandConstantInt(op2))
920 rightOperand.setConstInt32(getOperandConstantInt(op2));
921#if USE(JSVALUE64)
922 else if (isOperandConstantDouble(op2))
923 rightOperand.setConstDouble(getOperandConstantDouble(op2));
924#endif
925
926 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
927
928 if (!leftOperand.isConst())
929 emitGetVirtualRegister(op1, leftRegs);
930 if (!rightOperand.isConst())
931 emitGetVirtualRegister(op2, rightRegs);
932
933 JITDivGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
934 fpRegT0, fpRegT1, scratchGPR, scratchFPR, arithProfile);
935
936 gen.generateFastPath(*this);
937
938 if (gen.didEmitFastPath()) {
939 gen.endJumpList().link(this);
940 emitPutVirtualRegister(result, resultRegs);
941
942 addSlowCase(gen.slowPathJumpList());
943 } else {
944 ASSERT(gen.endJumpList().empty());
945 ASSERT(gen.slowPathJumpList().empty());
946 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
947 slowPathCall.call();
948 }
949}
950
951void JIT::emit_op_mul(const Instruction* currentInstruction)
952{
953 BinaryArithProfile* arithProfile = &currentInstruction->as<OpMul>().metadata(m_codeBlock).m_arithProfile;
954 JITMulIC* mulIC = m_codeBlock->addJITMulIC(arithProfile);
955 m_instructionToMathIC.add(currentInstruction, mulIC);
956 emitMathICFast<OpMul>(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul);
957}
958
959void JIT::emitSlow_op_mul(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
960{
961 linkAllSlowCases(iter);
962
963 JITMulIC* mulIC = bitwise_cast<JITMulIC*>(m_instructionToMathIC.get(currentInstruction));
964 emitMathICSlow<OpMul>(mulIC, currentInstruction, operationValueMulProfiledOptimize, operationValueMulProfiled, operationValueMulOptimize);
965}
966
967void JIT::emit_op_sub(const Instruction* currentInstruction)
968{
969 BinaryArithProfile* arithProfile = &currentInstruction->as<OpSub>().metadata(m_codeBlock).m_arithProfile;
970 JITSubIC* subIC = m_codeBlock->addJITSubIC(arithProfile);
971 m_instructionToMathIC.add(currentInstruction, subIC);
972 emitMathICFast<OpSub>(subIC, currentInstruction, operationValueSubProfiled, operationValueSub);
973}
974
975void JIT::emitSlow_op_sub(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
976{
977 linkAllSlowCases(iter);
978
979 JITSubIC* subIC = bitwise_cast<JITSubIC*>(m_instructionToMathIC.get(currentInstruction));
980 emitMathICSlow<OpSub>(subIC, currentInstruction, operationValueSubProfiledOptimize, operationValueSubProfiled, operationValueSubOptimize);
981}
982
983/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL, OP_POW ------------------------------ */
984
985} // namespace JSC
986
987#endif // ENABLE(JIT)
988