1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #include "config.h" |
27 | |
28 | #if ENABLE(JIT) |
29 | #if USE(JSVALUE32_64) |
30 | #include "JIT.h" |
31 | |
32 | #include "CodeBlock.h" |
33 | #include "JITInlines.h" |
34 | #include "JSArray.h" |
35 | #include "JSFunction.h" |
36 | #include "Interpreter.h" |
37 | #include "JSCInlines.h" |
38 | #include "ResultType.h" |
39 | #include "SlowPathCall.h" |
40 | |
41 | |
42 | namespace JSC { |
43 | |
44 | template <typename Op> |
45 | void JIT::emit_compareAndJump(const Instruction* instruction, RelationalCondition condition) |
46 | { |
47 | JumpList notInt32Op1; |
48 | JumpList notInt32Op2; |
49 | |
50 | auto bytecode = instruction->as<Op>(); |
51 | int op1 = bytecode.m_lhs.offset(); |
52 | int op2 = bytecode.m_rhs.offset(); |
53 | unsigned target = jumpTarget(instruction, bytecode.m_targetLabel); |
54 | |
55 | // Character less. |
56 | if (isOperandConstantChar(op1)) { |
57 | emitLoad(op2, regT1, regT0); |
58 | addSlowCase(branchIfNotCell(regT1)); |
59 | JumpList failures; |
60 | emitLoadCharacterString(regT0, regT0, failures); |
61 | addSlowCase(failures); |
62 | addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target); |
63 | return; |
64 | } |
65 | if (isOperandConstantChar(op2)) { |
66 | emitLoad(op1, regT1, regT0); |
67 | addSlowCase(branchIfNotCell(regT1)); |
68 | JumpList failures; |
69 | emitLoadCharacterString(regT0, regT0, failures); |
70 | addSlowCase(failures); |
71 | addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target); |
72 | return; |
73 | } |
74 | if (isOperandConstantInt(op1)) { |
75 | emitLoad(op2, regT3, regT2); |
76 | notInt32Op2.append(branchIfNotInt32(regT3)); |
77 | addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target); |
78 | } else if (isOperandConstantInt(op2)) { |
79 | emitLoad(op1, regT1, regT0); |
80 | notInt32Op1.append(branchIfNotInt32(regT1)); |
81 | addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target); |
82 | } else { |
83 | emitLoad2(op1, regT1, regT0, op2, regT3, regT2); |
84 | notInt32Op1.append(branchIfNotInt32(regT1)); |
85 | notInt32Op2.append(branchIfNotInt32(regT3)); |
86 | addJump(branch32(condition, regT0, regT2), target); |
87 | } |
88 | |
89 | if (!supportsFloatingPoint()) { |
90 | addSlowCase(notInt32Op1); |
91 | addSlowCase(notInt32Op2); |
92 | return; |
93 | } |
94 | Jump end = jump(); |
95 | |
96 | // Double less. |
97 | emitBinaryDoubleOp<Op>(instruction, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantInt(op1), isOperandConstantInt(op1) || !isOperandConstantInt(op2)); |
98 | end.link(this); |
99 | } |
100 | |
101 | template <typename Op> |
102 | void JIT::emit_compareUnsignedAndJump(const Instruction* instruction, RelationalCondition condition) |
103 | { |
104 | auto bytecode = instruction->as<Op>(); |
105 | int op1 = bytecode.m_lhs.offset(); |
106 | int op2 = bytecode.m_rhs.offset(); |
107 | unsigned target = jumpTarget(instruction, bytecode.m_targetLabel); |
108 | |
109 | if (isOperandConstantInt(op1)) { |
110 | emitLoad(op2, regT3, regT2); |
111 | addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target); |
112 | } else if (isOperandConstantInt(op2)) { |
113 | emitLoad(op1, regT1, regT0); |
114 | addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target); |
115 | } else { |
116 | emitLoad2(op1, regT1, regT0, op2, regT3, regT2); |
117 | addJump(branch32(condition, regT0, regT2), target); |
118 | } |
119 | } |
120 | |
121 | template <typename Op> |
122 | void JIT::emit_compareUnsigned(const Instruction* instruction, RelationalCondition condition) |
123 | { |
124 | auto bytecode = instruction->as<Op>(); |
125 | int dst = bytecode.m_dst.offset(); |
126 | int op1 = bytecode.m_lhs.offset(); |
127 | int op2 = bytecode.m_rhs.offset(); |
128 | |
129 | if (isOperandConstantInt(op1)) { |
130 | emitLoad(op2, regT3, regT2); |
131 | compare32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32()), regT0); |
132 | } else if (isOperandConstantInt(op2)) { |
133 | emitLoad(op1, regT1, regT0); |
134 | compare32(condition, regT0, Imm32(getConstantOperand(op2).asInt32()), regT0); |
135 | } else { |
136 | emitLoad2(op1, regT1, regT0, op2, regT3, regT2); |
137 | compare32(condition, regT0, regT2, regT0); |
138 | } |
139 | emitStoreBool(dst, regT0); |
140 | } |
141 | |
142 | template <typename Op> |
143 | void JIT::emit_compareAndJumpSlow(const Instruction *instruction, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter) |
144 | { |
145 | auto bytecode = instruction->as<Op>(); |
146 | int op1 = bytecode.m_lhs.offset(); |
147 | int op2 = bytecode.m_rhs.offset(); |
148 | unsigned target = jumpTarget(instruction, bytecode.m_targetLabel); |
149 | |
150 | linkAllSlowCases(iter); |
151 | |
152 | emitLoad(op1, regT1, regT0); |
153 | emitLoad(op2, regT3, regT2); |
154 | callOperation(operation, JSValueRegs(regT1, regT0), JSValueRegs(regT3, regT2)); |
155 | emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target); |
156 | } |
157 | |
158 | void JIT::emit_op_unsigned(const Instruction* currentInstruction) |
159 | { |
160 | auto bytecode = currentInstruction->as<OpUnsigned>(); |
161 | int result = bytecode.m_dst.offset(); |
162 | int op1 = bytecode.m_operand.offset(); |
163 | |
164 | emitLoad(op1, regT1, regT0); |
165 | |
166 | addSlowCase(branchIfNotInt32(regT1)); |
167 | addSlowCase(branch32(LessThan, regT0, TrustedImm32(0))); |
168 | emitStoreInt32(result, regT0, result == op1); |
169 | } |
170 | |
171 | void JIT::emit_op_inc(const Instruction* currentInstruction) |
172 | { |
173 | auto bytecode = currentInstruction->as<OpInc>(); |
174 | int srcDst = bytecode.m_srcDst.offset(); |
175 | |
176 | emitLoad(srcDst, regT1, regT0); |
177 | |
178 | addSlowCase(branchIfNotInt32(regT1)); |
179 | addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0)); |
180 | emitStoreInt32(srcDst, regT0, true); |
181 | } |
182 | |
183 | void JIT::emit_op_dec(const Instruction* currentInstruction) |
184 | { |
185 | auto bytecode = currentInstruction->as<OpDec>(); |
186 | int srcDst = bytecode.m_srcDst.offset(); |
187 | |
188 | emitLoad(srcDst, regT1, regT0); |
189 | |
190 | addSlowCase(branchIfNotInt32(regT1)); |
191 | addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0)); |
192 | emitStoreInt32(srcDst, regT0, true); |
193 | } |
194 | |
195 | template <typename Op> |
196 | void JIT::emitBinaryDoubleOp(const Instruction *instruction, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters) |
197 | { |
198 | JumpList end; |
199 | |
200 | auto bytecode = instruction->as<Op>(); |
201 | int opcodeID = Op::opcodeID; |
202 | int target = jumpTarget(instruction, bytecode.m_targetLabel); |
203 | int op1 = bytecode.m_lhs.offset(); |
204 | int op2 = bytecode.m_rhs.offset(); |
205 | |
206 | if (!notInt32Op1.empty()) { |
207 | // Double case 1: Op1 is not int32; Op2 is unknown. |
208 | notInt32Op1.link(this); |
209 | |
210 | ASSERT(op1IsInRegisters); |
211 | |
212 | // Verify Op1 is double. |
213 | if (!types.first().definitelyIsNumber()) |
214 | addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); |
215 | |
216 | if (!op2IsInRegisters) |
217 | emitLoad(op2, regT3, regT2); |
218 | |
219 | Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag)); |
220 | |
221 | if (!types.second().definitelyIsNumber()) |
222 | addSlowCase(branchIfNotInt32(regT3)); |
223 | |
224 | convertInt32ToDouble(regT2, fpRegT0); |
225 | Jump doTheMath = jump(); |
226 | |
227 | // Load Op2 as double into double register. |
228 | doubleOp2.link(this); |
229 | emitLoadDouble(op2, fpRegT0); |
230 | |
231 | // Do the math. |
232 | doTheMath.link(this); |
233 | switch (opcodeID) { |
234 | case op_jless: |
235 | emitLoadDouble(op1, fpRegT2); |
236 | addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), target); |
237 | break; |
238 | case op_jlesseq: |
239 | emitLoadDouble(op1, fpRegT2); |
240 | addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), target); |
241 | break; |
242 | case op_jgreater: |
243 | emitLoadDouble(op1, fpRegT2); |
244 | addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), target); |
245 | break; |
246 | case op_jgreatereq: |
247 | emitLoadDouble(op1, fpRegT2); |
248 | addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), target); |
249 | break; |
250 | case op_jnless: |
251 | emitLoadDouble(op1, fpRegT2); |
252 | addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), target); |
253 | break; |
254 | case op_jnlesseq: |
255 | emitLoadDouble(op1, fpRegT2); |
256 | addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), target); |
257 | break; |
258 | case op_jngreater: |
259 | emitLoadDouble(op1, fpRegT2); |
260 | addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), target); |
261 | break; |
262 | case op_jngreatereq: |
263 | emitLoadDouble(op1, fpRegT2); |
264 | addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), target); |
265 | break; |
266 | default: |
267 | RELEASE_ASSERT_NOT_REACHED(); |
268 | } |
269 | |
270 | if (!notInt32Op2.empty()) |
271 | end.append(jump()); |
272 | } |
273 | |
274 | if (!notInt32Op2.empty()) { |
275 | // Double case 2: Op1 is int32; Op2 is not int32. |
276 | notInt32Op2.link(this); |
277 | |
278 | ASSERT(op2IsInRegisters); |
279 | |
280 | if (!op1IsInRegisters) |
281 | emitLoadPayload(op1, regT0); |
282 | |
283 | convertInt32ToDouble(regT0, fpRegT0); |
284 | |
285 | // Verify op2 is double. |
286 | if (!types.second().definitelyIsNumber()) |
287 | addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag))); |
288 | |
289 | // Do the math. |
290 | switch (opcodeID) { |
291 | case op_jless: |
292 | emitLoadDouble(op2, fpRegT1); |
293 | addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target); |
294 | break; |
295 | case op_jlesseq: |
296 | emitLoadDouble(op2, fpRegT1); |
297 | addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), target); |
298 | break; |
299 | case op_jgreater: |
300 | emitLoadDouble(op2, fpRegT1); |
301 | addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), target); |
302 | break; |
303 | case op_jgreatereq: |
304 | emitLoadDouble(op2, fpRegT1); |
305 | addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), target); |
306 | break; |
307 | case op_jnless: |
308 | emitLoadDouble(op2, fpRegT1); |
309 | addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target); |
310 | break; |
311 | case op_jnlesseq: |
312 | emitLoadDouble(op2, fpRegT1); |
313 | addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target); |
314 | break; |
315 | case op_jngreater: |
316 | emitLoadDouble(op2, fpRegT1); |
317 | addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), target); |
318 | break; |
319 | case op_jngreatereq: |
320 | emitLoadDouble(op2, fpRegT1); |
321 | addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), target); |
322 | break; |
323 | default: |
324 | RELEASE_ASSERT_NOT_REACHED(); |
325 | } |
326 | } |
327 | |
328 | end.link(this); |
329 | } |
330 | |
331 | // Mod (%) |
332 | |
333 | /* ------------------------------ BEGIN: OP_MOD ------------------------------ */ |
334 | |
335 | void JIT::emit_op_mod(const Instruction* currentInstruction) |
336 | { |
337 | #if CPU(X86) |
338 | auto bytecode = instruction->as<OpMod>(); |
339 | int dst = bytecode.m_dst.offset(); |
340 | int op1 = bytecode.m_lhs.offset(); |
341 | int op2 = bytecode.m_rhs.offset(); |
342 | |
343 | // Make sure registers are correct for x86 IDIV instructions. |
344 | ASSERT(regT0 == X86Registers::eax); |
345 | ASSERT(regT1 == X86Registers::edx); |
346 | ASSERT(regT2 == X86Registers::ecx); |
347 | ASSERT(regT3 == X86Registers::ebx); |
348 | |
349 | emitLoad2(op1, regT0, regT3, op2, regT1, regT2); |
350 | addSlowCase(branchIfNotInt32(regT1)); |
351 | addSlowCase(branchIfNotInt32(regT0)); |
352 | |
353 | move(regT3, regT0); |
354 | addSlowCase(branchTest32(Zero, regT2)); |
355 | Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1)); |
356 | addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1))); |
357 | denominatorNotNeg1.link(this); |
358 | x86ConvertToDoubleWord32(); |
359 | x86Div32(regT2); |
360 | Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0)); |
361 | addSlowCase(branchTest32(Zero, regT1)); |
362 | numeratorPositive.link(this); |
363 | emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst)); |
364 | #else |
365 | JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod); |
366 | slowPathCall.call(); |
367 | #endif |
368 | } |
369 | |
370 | void JIT::emitSlow_op_mod(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
371 | { |
372 | #if CPU(X86) |
373 | linkAllSlowCases(iter); |
374 | |
375 | JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod); |
376 | slowPathCall.call(); |
377 | #else |
378 | UNUSED_PARAM(currentInstruction); |
379 | UNUSED_PARAM(iter); |
380 | // We would have really useful assertions here if it wasn't for the compiler's |
381 | // insistence on attribute noreturn. |
382 | // RELEASE_ASSERT_NOT_REACHED(); |
383 | #endif |
384 | } |
385 | |
386 | /* ------------------------------ END: OP_MOD ------------------------------ */ |
387 | |
388 | } // namespace JSC |
389 | |
390 | #endif // USE(JSVALUE32_64) |
391 | #endif // ENABLE(JIT) |
392 | |