1 | /* |
2 | * Copyright (C) 2008-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #if ENABLE(ASSEMBLER) && CPU(X86_64) |
29 | |
30 | #include "MacroAssemblerX86Common.h" |
31 | |
32 | #define REPATCH_OFFSET_CALL_R11 3 |
33 | |
34 | inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; } |
35 | |
36 | namespace JSC { |
37 | |
38 | class MacroAssemblerX86_64 : public MacroAssemblerX86Common { |
39 | public: |
40 | static constexpr unsigned numGPRs = 16; |
41 | static constexpr unsigned numFPRs = 16; |
42 | |
43 | static constexpr Scale ScalePtr = TimesEight; |
44 | |
45 | using MacroAssemblerX86Common::add32; |
46 | using MacroAssemblerX86Common::and32; |
47 | using MacroAssemblerX86Common::branch32; |
48 | using MacroAssemblerX86Common::branchAdd32; |
49 | using MacroAssemblerX86Common::or32; |
50 | using MacroAssemblerX86Common::or16; |
51 | using MacroAssemblerX86Common::sub32; |
52 | using MacroAssemblerX86Common::load8; |
53 | using MacroAssemblerX86Common::load32; |
54 | using MacroAssemblerX86Common::store32; |
55 | using MacroAssemblerX86Common::store8; |
56 | using MacroAssemblerX86Common::call; |
57 | using MacroAssemblerX86Common::jump; |
58 | using MacroAssemblerX86Common::farJump; |
59 | using MacroAssemblerX86Common::addDouble; |
60 | using MacroAssemblerX86Common::loadDouble; |
61 | using MacroAssemblerX86Common::convertInt32ToDouble; |
62 | |
63 | void add32(TrustedImm32 imm, AbsoluteAddress address) |
64 | { |
65 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
66 | add32(imm, Address(scratchRegister())); |
67 | } |
68 | |
69 | void and32(TrustedImm32 imm, AbsoluteAddress address) |
70 | { |
71 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
72 | and32(imm, Address(scratchRegister())); |
73 | } |
74 | |
75 | void add32(AbsoluteAddress address, RegisterID dest) |
76 | { |
77 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
78 | add32(Address(scratchRegister()), dest); |
79 | } |
80 | |
81 | void or32(TrustedImm32 imm, AbsoluteAddress address) |
82 | { |
83 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
84 | or32(imm, Address(scratchRegister())); |
85 | } |
86 | |
87 | void or32(RegisterID reg, AbsoluteAddress address) |
88 | { |
89 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
90 | or32(reg, Address(scratchRegister())); |
91 | } |
92 | |
93 | void or16(TrustedImm32 imm, AbsoluteAddress address) |
94 | { |
95 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
96 | or16(imm, Address(scratchRegister())); |
97 | } |
98 | |
99 | void sub32(TrustedImm32 imm, AbsoluteAddress address) |
100 | { |
101 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
102 | sub32(imm, Address(scratchRegister())); |
103 | } |
104 | |
105 | void load8(const void* address, RegisterID dest) |
106 | { |
107 | move(TrustedImmPtr(address), dest); |
108 | load8(dest, dest); |
109 | } |
110 | |
111 | void load16(ExtendedAddress address, RegisterID dest) |
112 | { |
113 | TrustedImmPtr addr(reinterpret_cast<void*>(address.offset)); |
114 | MacroAssemblerX86Common::move(addr, scratchRegister()); |
115 | MacroAssemblerX86Common::load16(BaseIndex(scratchRegister(), address.base, TimesTwo), dest); |
116 | } |
117 | |
118 | void load16(BaseIndex address, RegisterID dest) |
119 | { |
120 | MacroAssemblerX86Common::load16(address, dest); |
121 | } |
122 | |
123 | void load16(Address address, RegisterID dest) |
124 | { |
125 | MacroAssemblerX86Common::load16(address, dest); |
126 | } |
127 | |
128 | void load32(const void* address, RegisterID dest) |
129 | { |
130 | if (dest == X86Registers::eax) |
131 | m_assembler.movl_mEAX(address); |
132 | else { |
133 | move(TrustedImmPtr(address), dest); |
134 | load32(dest, dest); |
135 | } |
136 | } |
137 | |
138 | void addDouble(AbsoluteAddress address, FPRegisterID dest) |
139 | { |
140 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
141 | m_assembler.addsd_mr(0, scratchRegister(), dest); |
142 | } |
143 | |
144 | void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest) |
145 | { |
146 | move(imm, scratchRegister()); |
147 | m_assembler.cvtsi2sd_rr(scratchRegister(), dest); |
148 | } |
149 | |
150 | void store32(TrustedImm32 imm, void* address) |
151 | { |
152 | move(TrustedImmPtr(address), scratchRegister()); |
153 | store32(imm, scratchRegister()); |
154 | } |
155 | |
156 | void store32(RegisterID source, void* address) |
157 | { |
158 | if (source == X86Registers::eax) |
159 | m_assembler.movl_EAXm(address); |
160 | else { |
161 | move(TrustedImmPtr(address), scratchRegister()); |
162 | store32(source, scratchRegister()); |
163 | } |
164 | } |
165 | |
166 | void store8(TrustedImm32 imm, void* address) |
167 | { |
168 | TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); |
169 | move(TrustedImmPtr(address), scratchRegister()); |
170 | store8(imm8, Address(scratchRegister())); |
171 | } |
172 | |
173 | void store8(RegisterID reg, void* address) |
174 | { |
175 | move(TrustedImmPtr(address), scratchRegister()); |
176 | store8(reg, Address(scratchRegister())); |
177 | } |
178 | |
179 | #if OS(WINDOWS) |
180 | Call callWithSlowPathReturnType(PtrTag) |
181 | { |
182 | // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value. |
183 | // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right, |
184 | // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument. |
185 | // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx. |
186 | // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two. |
187 | // It is assumed that the parameters are already shifted to the right, when entering this method. |
188 | // Note: this implementation supports up to 3 parameters. |
189 | |
190 | // JIT relies on the CallerFrame (frame pointer) being put on the stack, |
191 | // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. |
192 | // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. |
193 | store64(X86Registers::ebp, Address(X86Registers::esp, -16)); |
194 | |
195 | // We also need to allocate the shadow space on the stack for the 4 parameter registers. |
196 | // In addition, we need to allocate 16 bytes for the return value. |
197 | // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). |
198 | sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); |
199 | |
200 | // The first parameter register should contain a pointer to the stack allocated space for the return value. |
201 | move(X86Registers::esp, X86Registers::ecx); |
202 | add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx); |
203 | |
204 | DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister()); |
205 | Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable); |
206 | |
207 | add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); |
208 | |
209 | // Copy the return value into rax and rdx. |
210 | load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx); |
211 | load64(Address(X86Registers::eax), X86Registers::eax); |
212 | |
213 | ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11); |
214 | return result; |
215 | } |
216 | #endif |
217 | |
218 | Call call(PtrTag) |
219 | { |
220 | #if OS(WINDOWS) |
221 | // JIT relies on the CallerFrame (frame pointer) being put on the stack, |
222 | // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. |
223 | // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. |
224 | store64(X86Registers::ebp, Address(X86Registers::esp, -16)); |
225 | |
226 | // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them. |
227 | // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied. |
228 | |
229 | // Copy argument 5 |
230 | load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister()); |
231 | store64(scratchRegister(), Address(X86Registers::esp, -4 * static_cast<int32_t>(sizeof(int64_t)))); |
232 | |
233 | // Copy argument 6 |
234 | load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister()); |
235 | store64(scratchRegister(), Address(X86Registers::esp, -3 * static_cast<int32_t>(sizeof(int64_t)))); |
236 | |
237 | // We also need to allocate the shadow space on the stack for the 4 parameter registers. |
238 | // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). |
239 | // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters. |
240 | sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); |
241 | #endif |
242 | DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister()); |
243 | Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable); |
244 | #if OS(WINDOWS) |
245 | add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); |
246 | #endif |
247 | ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11); |
248 | return result; |
249 | } |
250 | |
251 | void callOperation(const FunctionPtr<OperationPtrTag> operation) |
252 | { |
253 | move(TrustedImmPtr(operation.executableAddress()), scratchRegister()); |
254 | m_assembler.call(scratchRegister()); |
255 | } |
256 | |
257 | ALWAYS_INLINE Call call(RegisterID callTag) { return UNUSED_PARAM(callTag), call(NoPtrTag); } |
258 | |
259 | // Address is a memory location containing the address to jump to |
260 | void farJump(AbsoluteAddress address, PtrTag tag) |
261 | { |
262 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
263 | farJump(Address(scratchRegister()), tag); |
264 | } |
265 | |
266 | ALWAYS_INLINE void farJump(AbsoluteAddress address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), farJump(address, NoPtrTag); } |
267 | |
268 | Call threadSafePatchableNearCall() |
269 | { |
270 | const size_t nearCallOpcodeSize = 1; |
271 | const size_t nearCallRelativeLocationSize = sizeof(int32_t); |
272 | // We want to make sure the 32-bit near call immediate is 32-bit aligned. |
273 | size_t codeSize = m_assembler.codeSize(); |
274 | size_t alignedSize = WTF::roundUpToMultipleOf<nearCallRelativeLocationSize>(codeSize + nearCallOpcodeSize); |
275 | emitNops(alignedSize - (codeSize + nearCallOpcodeSize)); |
276 | DataLabelPtr label = DataLabelPtr(this); |
277 | Call result = nearCall(); |
278 | ASSERT_UNUSED(label, differenceBetween(label, result) == (nearCallOpcodeSize + nearCallRelativeLocationSize)); |
279 | return result; |
280 | } |
281 | |
282 | Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest) |
283 | { |
284 | move(TrustedImmPtr(dest.m_ptr), scratchRegister()); |
285 | add32(src, Address(scratchRegister())); |
286 | return Jump(m_assembler.jCC(x86Condition(cond))); |
287 | } |
288 | |
289 | void add64(RegisterID src, RegisterID dest) |
290 | { |
291 | m_assembler.addq_rr(src, dest); |
292 | } |
293 | |
294 | void add64(Address src, RegisterID dest) |
295 | { |
296 | m_assembler.addq_mr(src.offset, src.base, dest); |
297 | } |
298 | |
299 | void add64(BaseIndex src, RegisterID dest) |
300 | { |
301 | m_assembler.addq_mr(src.offset, src.base, src.index, src.scale, dest); |
302 | } |
303 | |
304 | void add64(RegisterID src, Address dest) |
305 | { |
306 | m_assembler.addq_rm(src, dest.offset, dest.base); |
307 | } |
308 | |
309 | void add64(RegisterID src, BaseIndex dest) |
310 | { |
311 | m_assembler.addq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
312 | } |
313 | |
314 | void add64(AbsoluteAddress src, RegisterID dest) |
315 | { |
316 | move(TrustedImmPtr(src.m_ptr), scratchRegister()); |
317 | add64(Address(scratchRegister()), dest); |
318 | } |
319 | |
320 | void add64(TrustedImm32 imm, RegisterID srcDest) |
321 | { |
322 | if (imm.m_value == 1) |
323 | m_assembler.incq_r(srcDest); |
324 | else |
325 | m_assembler.addq_ir(imm.m_value, srcDest); |
326 | } |
327 | |
328 | void add64(TrustedImm64 imm, RegisterID dest) |
329 | { |
330 | if (imm.m_value == 1) |
331 | m_assembler.incq_r(dest); |
332 | else { |
333 | move(imm, scratchRegister()); |
334 | add64(scratchRegister(), dest); |
335 | } |
336 | } |
337 | |
338 | void add64(TrustedImm32 imm, RegisterID src, RegisterID dest) |
339 | { |
340 | m_assembler.leaq_mr(imm.m_value, src, dest); |
341 | } |
342 | |
343 | void add64(TrustedImm32 imm, Address address) |
344 | { |
345 | if (imm.m_value == 1) |
346 | m_assembler.incq_m(address.offset, address.base); |
347 | else |
348 | m_assembler.addq_im(imm.m_value, address.offset, address.base); |
349 | } |
350 | |
351 | void add64(TrustedImm32 imm, BaseIndex address) |
352 | { |
353 | if (imm.m_value == 1) |
354 | m_assembler.incq_m(address.offset, address.base, address.index, address.scale); |
355 | else |
356 | m_assembler.addq_im(imm.m_value, address.offset, address.base, address.index, address.scale); |
357 | } |
358 | |
359 | void add64(TrustedImm32 imm, AbsoluteAddress address) |
360 | { |
361 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
362 | add64(imm, Address(scratchRegister())); |
363 | } |
364 | |
365 | void add64(RegisterID a, RegisterID b, RegisterID dest) |
366 | { |
367 | x86Lea64(BaseIndex(a, b, TimesOne), dest); |
368 | } |
369 | |
370 | void x86Lea64(BaseIndex index, RegisterID dest) |
371 | { |
372 | if (!index.scale && !index.offset) { |
373 | if (index.base == dest) { |
374 | add64(index.index, dest); |
375 | return; |
376 | } |
377 | if (index.index == dest) { |
378 | add64(index.base, dest); |
379 | return; |
380 | } |
381 | } |
382 | m_assembler.leaq_mr(index.offset, index.base, index.index, index.scale, dest); |
383 | } |
384 | |
385 | void getEffectiveAddress(BaseIndex address, RegisterID dest) |
386 | { |
387 | return x86Lea64(address, dest); |
388 | } |
389 | |
390 | void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) |
391 | { |
392 | m_assembler.leaq_mr(imm.m_value, srcDest, srcDest); |
393 | } |
394 | |
395 | void and64(RegisterID src, RegisterID dest) |
396 | { |
397 | m_assembler.andq_rr(src, dest); |
398 | } |
399 | |
400 | void and64(RegisterID src, Address dest) |
401 | { |
402 | m_assembler.andq_rm(src, dest.offset, dest.base); |
403 | } |
404 | |
405 | void and64(RegisterID src, BaseIndex dest) |
406 | { |
407 | m_assembler.andq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
408 | } |
409 | |
410 | void and64(Address src, RegisterID dest) |
411 | { |
412 | m_assembler.andq_mr(src.offset, src.base, dest); |
413 | } |
414 | |
415 | void and64(BaseIndex src, RegisterID dest) |
416 | { |
417 | m_assembler.andq_mr(src.offset, src.base, src.index, src.scale, dest); |
418 | } |
419 | |
420 | void and64(TrustedImm32 imm, RegisterID srcDest) |
421 | { |
422 | m_assembler.andq_ir(imm.m_value, srcDest); |
423 | } |
424 | |
425 | void and64(TrustedImm32 imm, Address dest) |
426 | { |
427 | m_assembler.andq_im(imm.m_value, dest.offset, dest.base); |
428 | } |
429 | |
430 | void and64(TrustedImm32 imm, BaseIndex dest) |
431 | { |
432 | m_assembler.andq_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale); |
433 | } |
434 | |
435 | void and64(TrustedImmPtr imm, RegisterID srcDest) |
436 | { |
437 | intptr_t intValue = imm.asIntptr(); |
438 | if (intValue <= std::numeric_limits<int32_t>::max() |
439 | && intValue >= std::numeric_limits<int32_t>::min()) { |
440 | and64(TrustedImm32(static_cast<int32_t>(intValue)), srcDest); |
441 | return; |
442 | } |
443 | move(imm, scratchRegister()); |
444 | and64(scratchRegister(), srcDest); |
445 | } |
446 | |
447 | void and64(RegisterID op1, RegisterID op2, RegisterID dest) |
448 | { |
449 | if (op1 == op2 && op1 != dest && op2 != dest) |
450 | move(op1, dest); |
451 | else if (op1 == dest) |
452 | and64(op2, dest); |
453 | else { |
454 | move(op2, dest); |
455 | and64(op1, dest); |
456 | } |
457 | } |
458 | |
459 | void countLeadingZeros64(RegisterID src, RegisterID dst) |
460 | { |
461 | if (supportsLZCNT()) { |
462 | m_assembler.lzcntq_rr(src, dst); |
463 | return; |
464 | } |
465 | m_assembler.bsrq_rr(src, dst); |
466 | clz64AfterBsr(dst); |
467 | } |
468 | |
469 | void countLeadingZeros64(Address src, RegisterID dst) |
470 | { |
471 | if (supportsLZCNT()) { |
472 | m_assembler.lzcntq_mr(src.offset, src.base, dst); |
473 | return; |
474 | } |
475 | m_assembler.bsrq_mr(src.offset, src.base, dst); |
476 | clz64AfterBsr(dst); |
477 | } |
478 | |
479 | void countTrailingZeros64(RegisterID src, RegisterID dst) |
480 | { |
481 | if (supportsBMI1()) { |
482 | m_assembler.tzcntq_rr(src, dst); |
483 | return; |
484 | } |
485 | m_assembler.bsfq_rr(src, dst); |
486 | ctzAfterBsf<64>(dst); |
487 | } |
488 | |
489 | void countPopulation64(RegisterID src, RegisterID dst) |
490 | { |
491 | ASSERT(supportsCountPopulation()); |
492 | m_assembler.popcntq_rr(src, dst); |
493 | } |
494 | |
495 | void countPopulation64(Address src, RegisterID dst) |
496 | { |
497 | ASSERT(supportsCountPopulation()); |
498 | m_assembler.popcntq_mr(src.offset, src.base, dst); |
499 | } |
500 | |
501 | void lshift64(TrustedImm32 imm, RegisterID dest) |
502 | { |
503 | m_assembler.shlq_i8r(imm.m_value, dest); |
504 | } |
505 | |
506 | void lshift64(RegisterID src, RegisterID dest) |
507 | { |
508 | if (src == X86Registers::ecx) |
509 | m_assembler.shlq_CLr(dest); |
510 | else { |
511 | ASSERT(src != dest); |
512 | |
513 | // Can only shift by ecx, so we do some swapping if we see anything else. |
514 | swap(src, X86Registers::ecx); |
515 | m_assembler.shlq_CLr(dest == X86Registers::ecx ? src : dest); |
516 | swap(src, X86Registers::ecx); |
517 | } |
518 | } |
519 | |
520 | void rshift64(TrustedImm32 imm, RegisterID dest) |
521 | { |
522 | m_assembler.sarq_i8r(imm.m_value, dest); |
523 | } |
524 | |
525 | void rshift64(RegisterID src, RegisterID dest) |
526 | { |
527 | if (src == X86Registers::ecx) |
528 | m_assembler.sarq_CLr(dest); |
529 | else { |
530 | ASSERT(src != dest); |
531 | |
532 | // Can only shift by ecx, so we do some swapping if we see anything else. |
533 | swap(src, X86Registers::ecx); |
534 | m_assembler.sarq_CLr(dest == X86Registers::ecx ? src : dest); |
535 | swap(src, X86Registers::ecx); |
536 | } |
537 | } |
538 | |
539 | void urshift64(TrustedImm32 imm, RegisterID dest) |
540 | { |
541 | m_assembler.shrq_i8r(imm.m_value, dest); |
542 | } |
543 | |
544 | void urshift64(RegisterID src, RegisterID dest) |
545 | { |
546 | if (src == X86Registers::ecx) |
547 | m_assembler.shrq_CLr(dest); |
548 | else { |
549 | ASSERT(src != dest); |
550 | |
551 | // Can only shift by ecx, so we do some swapping if we see anything else. |
552 | swap(src, X86Registers::ecx); |
553 | m_assembler.shrq_CLr(dest == X86Registers::ecx ? src : dest); |
554 | swap(src, X86Registers::ecx); |
555 | } |
556 | } |
557 | |
558 | void rotateRight64(TrustedImm32 imm, RegisterID dest) |
559 | { |
560 | m_assembler.rorq_i8r(imm.m_value, dest); |
561 | } |
562 | |
563 | void rotateRight64(RegisterID src, RegisterID dest) |
564 | { |
565 | if (src == X86Registers::ecx) |
566 | m_assembler.rorq_CLr(dest); |
567 | else { |
568 | ASSERT(src != dest); |
569 | |
570 | // Can only rotate by ecx, so we do some swapping if we see anything else. |
571 | swap(src, X86Registers::ecx); |
572 | m_assembler.rorq_CLr(dest == X86Registers::ecx ? src : dest); |
573 | swap(src, X86Registers::ecx); |
574 | } |
575 | } |
576 | |
577 | void rotateLeft64(TrustedImm32 imm, RegisterID dest) |
578 | { |
579 | m_assembler.rolq_i8r(imm.m_value, dest); |
580 | } |
581 | |
582 | void rotateLeft64(RegisterID src, RegisterID dest) |
583 | { |
584 | if (src == X86Registers::ecx) |
585 | m_assembler.rolq_CLr(dest); |
586 | else { |
587 | ASSERT(src != dest); |
588 | |
589 | // Can only rotate by ecx, so we do some swapping if we see anything else. |
590 | swap(src, X86Registers::ecx); |
591 | m_assembler.rolq_CLr(dest == X86Registers::ecx ? src : dest); |
592 | swap(src, X86Registers::ecx); |
593 | } |
594 | } |
595 | |
596 | void mul64(RegisterID src, RegisterID dest) |
597 | { |
598 | m_assembler.imulq_rr(src, dest); |
599 | } |
600 | |
601 | void mul64(RegisterID src1, RegisterID src2, RegisterID dest) |
602 | { |
603 | if (src2 == dest) { |
604 | m_assembler.imulq_rr(src1, dest); |
605 | return; |
606 | } |
607 | move(src1, dest); |
608 | m_assembler.imulq_rr(src2, dest); |
609 | } |
610 | |
611 | void x86ConvertToQuadWord64() |
612 | { |
613 | m_assembler.cqo(); |
614 | } |
615 | |
616 | void x86ConvertToQuadWord64(RegisterID rax, RegisterID rdx) |
617 | { |
618 | ASSERT_UNUSED(rax, rax == X86Registers::eax); |
619 | ASSERT_UNUSED(rdx, rdx == X86Registers::edx); |
620 | x86ConvertToQuadWord64(); |
621 | } |
622 | |
623 | void x86Div64(RegisterID denominator) |
624 | { |
625 | m_assembler.idivq_r(denominator); |
626 | } |
627 | |
628 | void x86Div64(RegisterID rax, RegisterID rdx, RegisterID denominator) |
629 | { |
630 | ASSERT_UNUSED(rax, rax == X86Registers::eax); |
631 | ASSERT_UNUSED(rdx, rdx == X86Registers::edx); |
632 | x86Div64(denominator); |
633 | } |
634 | |
635 | void x86UDiv64(RegisterID denominator) |
636 | { |
637 | m_assembler.divq_r(denominator); |
638 | } |
639 | |
640 | void x86UDiv64(RegisterID rax, RegisterID rdx, RegisterID denominator) |
641 | { |
642 | ASSERT_UNUSED(rax, rax == X86Registers::eax); |
643 | ASSERT_UNUSED(rdx, rdx == X86Registers::edx); |
644 | x86UDiv64(denominator); |
645 | } |
646 | |
647 | void neg64(RegisterID dest) |
648 | { |
649 | m_assembler.negq_r(dest); |
650 | } |
651 | |
652 | void neg64(RegisterID src, RegisterID dest) |
653 | { |
654 | move(src, dest); |
655 | m_assembler.negq_r(dest); |
656 | } |
657 | |
658 | void neg64(Address dest) |
659 | { |
660 | m_assembler.negq_m(dest.offset, dest.base); |
661 | } |
662 | |
663 | void neg64(BaseIndex dest) |
664 | { |
665 | m_assembler.negq_m(dest.offset, dest.base, dest.index, dest.scale); |
666 | } |
667 | |
668 | void or64(RegisterID src, RegisterID dest) |
669 | { |
670 | m_assembler.orq_rr(src, dest); |
671 | } |
672 | |
673 | void or64(RegisterID src, Address dest) |
674 | { |
675 | m_assembler.orq_rm(src, dest.offset, dest.base); |
676 | } |
677 | |
678 | void or64(RegisterID src, BaseIndex dest) |
679 | { |
680 | m_assembler.orq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
681 | } |
682 | |
683 | void or64(Address src, RegisterID dest) |
684 | { |
685 | m_assembler.orq_mr(src.offset, src.base, dest); |
686 | } |
687 | |
688 | void or64(BaseIndex src, RegisterID dest) |
689 | { |
690 | m_assembler.orq_mr(src.offset, src.base, src.index, src.scale, dest); |
691 | } |
692 | |
693 | void or64(TrustedImm32 imm, Address dest) |
694 | { |
695 | m_assembler.orq_im(imm.m_value, dest.offset, dest.base); |
696 | } |
697 | |
698 | void or64(TrustedImm32 imm, BaseIndex dest) |
699 | { |
700 | m_assembler.orq_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale); |
701 | } |
702 | |
703 | void or64(TrustedImm64 imm, RegisterID srcDest) |
704 | { |
705 | if (imm.m_value <= std::numeric_limits<int32_t>::max() |
706 | && imm.m_value >= std::numeric_limits<int32_t>::min()) { |
707 | or64(TrustedImm32(static_cast<int32_t>(imm.m_value)), srcDest); |
708 | return; |
709 | } |
710 | move(imm, scratchRegister()); |
711 | or64(scratchRegister(), srcDest); |
712 | } |
713 | |
714 | void or64(TrustedImm32 imm, RegisterID dest) |
715 | { |
716 | m_assembler.orq_ir(imm.m_value, dest); |
717 | } |
718 | |
719 | void or64(RegisterID op1, RegisterID op2, RegisterID dest) |
720 | { |
721 | if (op1 == op2) |
722 | move(op1, dest); |
723 | else if (op1 == dest) |
724 | or64(op2, dest); |
725 | else { |
726 | move(op2, dest); |
727 | or64(op1, dest); |
728 | } |
729 | } |
730 | |
731 | void or64(TrustedImm32 imm, RegisterID src, RegisterID dest) |
732 | { |
733 | move(src, dest); |
734 | or64(imm, dest); |
735 | } |
736 | |
737 | void sub64(RegisterID src, RegisterID dest) |
738 | { |
739 | m_assembler.subq_rr(src, dest); |
740 | } |
741 | |
742 | void sub64(TrustedImm32 imm, RegisterID dest) |
743 | { |
744 | if (imm.m_value == 1) |
745 | m_assembler.decq_r(dest); |
746 | else |
747 | m_assembler.subq_ir(imm.m_value, dest); |
748 | } |
749 | |
750 | void sub64(TrustedImm64 imm, RegisterID dest) |
751 | { |
752 | if (imm.m_value == 1) |
753 | m_assembler.decq_r(dest); |
754 | else { |
755 | move(imm, scratchRegister()); |
756 | sub64(scratchRegister(), dest); |
757 | } |
758 | } |
759 | |
760 | void sub64(TrustedImm32 imm, Address address) |
761 | { |
762 | m_assembler.subq_im(imm.m_value, address.offset, address.base); |
763 | } |
764 | |
765 | void sub64(TrustedImm32 imm, BaseIndex address) |
766 | { |
767 | m_assembler.subq_im(imm.m_value, address.offset, address.base, address.index, address.scale); |
768 | } |
769 | |
770 | void sub64(Address src, RegisterID dest) |
771 | { |
772 | m_assembler.subq_mr(src.offset, src.base, dest); |
773 | } |
774 | |
775 | void sub64(BaseIndex src, RegisterID dest) |
776 | { |
777 | m_assembler.subq_mr(src.offset, src.base, src.index, src.scale, dest); |
778 | } |
779 | |
780 | void sub64(RegisterID src, Address dest) |
781 | { |
782 | m_assembler.subq_rm(src, dest.offset, dest.base); |
783 | } |
784 | |
785 | void sub64(RegisterID src, BaseIndex dest) |
786 | { |
787 | m_assembler.subq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
788 | } |
789 | |
790 | void xor64(RegisterID src, RegisterID dest) |
791 | { |
792 | m_assembler.xorq_rr(src, dest); |
793 | } |
794 | |
795 | void xor64(RegisterID op1, RegisterID op2, RegisterID dest) |
796 | { |
797 | if (op1 == op2) |
798 | move(TrustedImm32(0), dest); |
799 | else if (op1 == dest) |
800 | xor64(op2, dest); |
801 | else { |
802 | move(op2, dest); |
803 | xor64(op1, dest); |
804 | } |
805 | } |
806 | |
807 | void xor64(RegisterID src, Address dest) |
808 | { |
809 | m_assembler.xorq_rm(src, dest.offset, dest.base); |
810 | } |
811 | |
812 | void xor64(RegisterID src, BaseIndex dest) |
813 | { |
814 | m_assembler.xorq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
815 | } |
816 | |
817 | void xor64(Address src, RegisterID dest) |
818 | { |
819 | m_assembler.xorq_mr(src.offset, src.base, dest); |
820 | } |
821 | |
822 | void xor64(BaseIndex src, RegisterID dest) |
823 | { |
824 | m_assembler.xorq_mr(src.offset, src.base, src.index, src.scale, dest); |
825 | } |
826 | |
827 | void xor64(TrustedImm32 imm, Address dest) |
828 | { |
829 | m_assembler.xorq_im(imm.m_value, dest.offset, dest.base); |
830 | } |
831 | |
832 | void xor64(TrustedImm32 imm, BaseIndex dest) |
833 | { |
834 | m_assembler.xorq_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale); |
835 | } |
836 | |
837 | void xor64(TrustedImm32 imm, RegisterID srcDest) |
838 | { |
839 | m_assembler.xorq_ir(imm.m_value, srcDest); |
840 | } |
841 | |
842 | void xor64(TrustedImm64 imm, RegisterID srcDest) |
843 | { |
844 | move(imm, scratchRegister()); |
845 | xor64(scratchRegister(), srcDest); |
846 | } |
847 | |
848 | void not64(RegisterID srcDest) |
849 | { |
850 | m_assembler.notq_r(srcDest); |
851 | } |
852 | |
853 | void not64(Address dest) |
854 | { |
855 | m_assembler.notq_m(dest.offset, dest.base); |
856 | } |
857 | |
858 | void not64(BaseIndex dest) |
859 | { |
860 | m_assembler.notq_m(dest.offset, dest.base, dest.index, dest.scale); |
861 | } |
862 | |
863 | void load64(ImplicitAddress address, RegisterID dest) |
864 | { |
865 | m_assembler.movq_mr(address.offset, address.base, dest); |
866 | } |
867 | |
868 | void load64(BaseIndex address, RegisterID dest) |
869 | { |
870 | m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest); |
871 | } |
872 | |
873 | void load64(const void* address, RegisterID dest) |
874 | { |
875 | if (dest == X86Registers::eax) |
876 | m_assembler.movq_mEAX(address); |
877 | else { |
878 | move(TrustedImmPtr(address), dest); |
879 | load64(dest, dest); |
880 | } |
881 | } |
882 | |
883 | DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest) |
884 | { |
885 | padBeforePatch(); |
886 | m_assembler.movq_mr_disp32(address.offset, address.base, dest); |
887 | return DataLabel32(this); |
888 | } |
889 | |
890 | DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest) |
891 | { |
892 | padBeforePatch(); |
893 | m_assembler.movq_mr_disp8(address.offset, address.base, dest); |
894 | return DataLabelCompact(this); |
895 | } |
896 | |
897 | void store64(RegisterID src, ImplicitAddress address) |
898 | { |
899 | m_assembler.movq_rm(src, address.offset, address.base); |
900 | } |
901 | |
902 | void store64(RegisterID src, BaseIndex address) |
903 | { |
904 | m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale); |
905 | } |
906 | |
907 | void store64(RegisterID src, void* address) |
908 | { |
909 | if (src == X86Registers::eax) |
910 | m_assembler.movq_EAXm(address); |
911 | else { |
912 | move(TrustedImmPtr(address), scratchRegister()); |
913 | store64(src, scratchRegister()); |
914 | } |
915 | } |
916 | |
917 | void store64(TrustedImm32 imm, ImplicitAddress address) |
918 | { |
919 | m_assembler.movq_i32m(imm.m_value, address.offset, address.base); |
920 | } |
921 | |
922 | void store64(TrustedImm32 imm, BaseIndex address) |
923 | { |
924 | m_assembler.movq_i32m(imm.m_value, address.offset, address.base, address.index, address.scale); |
925 | } |
926 | |
927 | void store64(TrustedImm64 imm, ImplicitAddress address) |
928 | { |
929 | if (CAN_SIGN_EXTEND_32_64(imm.m_value)) { |
930 | store64(TrustedImm32(static_cast<int32_t>(imm.m_value)), address); |
931 | return; |
932 | } |
933 | |
934 | move(imm, scratchRegister()); |
935 | store64(scratchRegister(), address); |
936 | } |
937 | |
938 | void store64(TrustedImm64 imm, BaseIndex address) |
939 | { |
940 | move(imm, scratchRegister()); |
941 | m_assembler.movq_rm(scratchRegister(), address.offset, address.base, address.index, address.scale); |
942 | } |
943 | |
944 | void storeZero64(ImplicitAddress address) |
945 | { |
946 | store64(TrustedImm32(0), address); |
947 | } |
948 | |
949 | void storeZero64(BaseIndex address) |
950 | { |
951 | store64(TrustedImm32(0), address); |
952 | } |
953 | |
954 | DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) |
955 | { |
956 | padBeforePatch(); |
957 | m_assembler.movq_rm_disp32(src, address.offset, address.base); |
958 | return DataLabel32(this); |
959 | } |
960 | |
961 | void swap64(RegisterID src, RegisterID dest) |
962 | { |
963 | m_assembler.xchgq_rr(src, dest); |
964 | } |
965 | |
966 | void swap64(RegisterID src, Address dest) |
967 | { |
968 | m_assembler.xchgq_rm(src, dest.offset, dest.base); |
969 | } |
970 | |
971 | void move64ToDouble(RegisterID src, FPRegisterID dest) |
972 | { |
973 | m_assembler.movq_rr(src, dest); |
974 | } |
975 | |
976 | void moveDoubleTo64(FPRegisterID src, RegisterID dest) |
977 | { |
978 | m_assembler.movq_rr(src, dest); |
979 | } |
980 | |
981 | void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) |
982 | { |
983 | if (!right.m_value) { |
984 | if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { |
985 | test64(*resultCondition, left, left, dest); |
986 | return; |
987 | } |
988 | } |
989 | |
990 | m_assembler.cmpq_ir(right.m_value, left); |
991 | set32(x86Condition(cond), dest); |
992 | } |
993 | |
994 | void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) |
995 | { |
996 | m_assembler.cmpq_rr(right, left); |
997 | set32(x86Condition(cond), dest); |
998 | } |
999 | |
1000 | Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right) |
1001 | { |
1002 | m_assembler.cmpq_rr(right, left); |
1003 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1004 | } |
1005 | |
1006 | Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right) |
1007 | { |
1008 | if (!right.m_value) { |
1009 | if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) |
1010 | return branchTest64(*resultCondition, left, left); |
1011 | } |
1012 | m_assembler.cmpq_ir(right.m_value, left); |
1013 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1014 | } |
1015 | |
1016 | Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right) |
1017 | { |
1018 | if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) { |
1019 | m_assembler.testq_rr(left, left); |
1020 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1021 | } |
1022 | move(right, scratchRegister()); |
1023 | return branch64(cond, left, scratchRegister()); |
1024 | } |
1025 | |
1026 | Jump branch64(RelationalCondition cond, RegisterID left, Address right) |
1027 | { |
1028 | m_assembler.cmpq_mr(right.offset, right.base, left); |
1029 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1030 | } |
1031 | |
1032 | Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right) |
1033 | { |
1034 | move(TrustedImmPtr(left.m_ptr), scratchRegister()); |
1035 | return branch64(cond, Address(scratchRegister()), right); |
1036 | } |
1037 | |
1038 | Jump branch64(RelationalCondition cond, Address left, RegisterID right) |
1039 | { |
1040 | m_assembler.cmpq_rm(right, left.offset, left.base); |
1041 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1042 | } |
1043 | |
1044 | Jump branch64(RelationalCondition cond, Address left, TrustedImm32 right) |
1045 | { |
1046 | m_assembler.cmpq_im(right.m_value, left.offset, left.base); |
1047 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1048 | } |
1049 | |
1050 | Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right) |
1051 | { |
1052 | move(right, scratchRegister()); |
1053 | return branch64(cond, left, scratchRegister()); |
1054 | } |
1055 | |
1056 | Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right) |
1057 | { |
1058 | m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale); |
1059 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1060 | } |
1061 | |
1062 | Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) |
1063 | { |
1064 | load32(left.m_ptr, scratchRegister()); |
1065 | return branch32(cond, scratchRegister(), right); |
1066 | } |
1067 | |
1068 | Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) |
1069 | { |
1070 | return branch64(cond, left, right); |
1071 | } |
1072 | |
1073 | Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right) |
1074 | { |
1075 | move(right, scratchRegister()); |
1076 | return branchPtr(cond, left, scratchRegister()); |
1077 | } |
1078 | |
1079 | Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask) |
1080 | { |
1081 | m_assembler.testq_rr(reg, mask); |
1082 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1083 | } |
1084 | |
1085 | Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) |
1086 | { |
1087 | // if we are only interested in the low seven bits, this can be tested with a testb |
1088 | if (mask.m_value == -1) |
1089 | m_assembler.testq_rr(reg, reg); |
1090 | else if ((mask.m_value & ~0x7f) == 0) |
1091 | m_assembler.testb_i8r(mask.m_value, reg); |
1092 | else |
1093 | m_assembler.testq_i32r(mask.m_value, reg); |
1094 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1095 | } |
1096 | |
1097 | Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask) |
1098 | { |
1099 | move(mask, scratchRegister()); |
1100 | return branchTest64(cond, reg, scratchRegister()); |
1101 | } |
1102 | |
1103 | Jump branchTestBit64(ResultCondition cond, RegisterID testValue, TrustedImm32 bit) |
1104 | { |
1105 | m_assembler.btw_ir(static_cast<unsigned>(bit.m_value) % 64, testValue); |
1106 | if (cond == NonZero) |
1107 | return Jump(m_assembler.jb()); |
1108 | if (cond == Zero) |
1109 | return Jump(m_assembler.jae()); |
1110 | RELEASE_ASSERT_NOT_REACHED(); |
1111 | } |
1112 | |
1113 | Jump branchTestBit64(ResultCondition cond, Address testValue, TrustedImm32 bit) |
1114 | { |
1115 | m_assembler.btw_im(static_cast<unsigned>(bit.m_value) % 64, testValue.offset, testValue.base); |
1116 | if (cond == NonZero) |
1117 | return Jump(m_assembler.jb()); |
1118 | if (cond == Zero) |
1119 | return Jump(m_assembler.jae()); |
1120 | RELEASE_ASSERT_NOT_REACHED(); |
1121 | } |
1122 | |
1123 | Jump branchTestBit64(ResultCondition cond, RegisterID reg, RegisterID bit) |
1124 | { |
1125 | m_assembler.btw_ir(bit, reg); |
1126 | if (cond == NonZero) |
1127 | return Jump(m_assembler.jb()); |
1128 | if (cond == Zero) |
1129 | return Jump(m_assembler.jae()); |
1130 | RELEASE_ASSERT_NOT_REACHED(); |
1131 | } |
1132 | |
1133 | void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) |
1134 | { |
1135 | if (mask.m_value == -1) |
1136 | m_assembler.testq_rr(reg, reg); |
1137 | else if ((mask.m_value & ~0x7f) == 0) |
1138 | m_assembler.testb_i8r(mask.m_value, reg); |
1139 | else |
1140 | m_assembler.testq_i32r(mask.m_value, reg); |
1141 | set32(x86Condition(cond), dest); |
1142 | } |
1143 | |
1144 | void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest) |
1145 | { |
1146 | m_assembler.testq_rr(reg, mask); |
1147 | set32(x86Condition(cond), dest); |
1148 | } |
1149 | |
1150 | Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
1151 | { |
1152 | load64(address.m_ptr, scratchRegister()); |
1153 | return branchTest64(cond, scratchRegister(), mask); |
1154 | } |
1155 | |
1156 | Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) |
1157 | { |
1158 | if (mask.m_value == -1) |
1159 | m_assembler.cmpq_im(0, address.offset, address.base); |
1160 | else |
1161 | m_assembler.testq_i32m(mask.m_value, address.offset, address.base); |
1162 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1163 | } |
1164 | |
1165 | Jump branchTest64(ResultCondition cond, Address address, RegisterID reg) |
1166 | { |
1167 | m_assembler.testq_rm(reg, address.offset, address.base); |
1168 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1169 | } |
1170 | |
1171 | Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) |
1172 | { |
1173 | if (mask.m_value == -1) |
1174 | m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale); |
1175 | else |
1176 | m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); |
1177 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1178 | } |
1179 | |
1180 | |
1181 | Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) |
1182 | { |
1183 | add64(imm, dest); |
1184 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1185 | } |
1186 | |
1187 | Jump branchAdd64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) |
1188 | { |
1189 | if (src1 == dest) |
1190 | return branchAdd64(cond, src2, dest); |
1191 | move(src2, dest); |
1192 | return branchAdd64(cond, src1, dest); |
1193 | } |
1194 | |
1195 | Jump branchAdd64(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest) |
1196 | { |
1197 | if (op2 == dest) |
1198 | return branchAdd64(cond, op1, dest); |
1199 | if (op1.base == dest) { |
1200 | load32(op1, dest); |
1201 | return branchAdd64(cond, op2, dest); |
1202 | } |
1203 | move(op2, dest); |
1204 | return branchAdd64(cond, op1, dest); |
1205 | } |
1206 | |
1207 | Jump branchAdd64(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest) |
1208 | { |
1209 | return branchAdd64(cond, src2, src1, dest); |
1210 | } |
1211 | |
1212 | Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest) |
1213 | { |
1214 | add64(src, dest); |
1215 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1216 | } |
1217 | |
1218 | Jump branchAdd64(ResultCondition cond, Address src, RegisterID dest) |
1219 | { |
1220 | add64(src, dest); |
1221 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1222 | } |
1223 | |
1224 | Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest) |
1225 | { |
1226 | mul64(src, dest); |
1227 | if (cond != Overflow) |
1228 | m_assembler.testq_rr(dest, dest); |
1229 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1230 | } |
1231 | |
1232 | Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) |
1233 | { |
1234 | if (src1 == dest) |
1235 | return branchMul64(cond, src2, dest); |
1236 | move(src2, dest); |
1237 | return branchMul64(cond, src1, dest); |
1238 | } |
1239 | |
1240 | Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) |
1241 | { |
1242 | sub64(imm, dest); |
1243 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1244 | } |
1245 | |
1246 | Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest) |
1247 | { |
1248 | sub64(src, dest); |
1249 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1250 | } |
1251 | |
1252 | Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) |
1253 | { |
1254 | move(src1, dest); |
1255 | return branchSub64(cond, src2, dest); |
1256 | } |
1257 | |
1258 | Jump branchNeg64(ResultCondition cond, RegisterID srcDest) |
1259 | { |
1260 | neg64(srcDest); |
1261 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1262 | } |
1263 | |
1264 | void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) |
1265 | { |
1266 | m_assembler.cmpq_rr(right, left); |
1267 | cmov(x86Condition(cond), src, dest); |
1268 | } |
1269 | |
1270 | void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) |
1271 | { |
1272 | m_assembler.cmpq_rr(right, left); |
1273 | |
1274 | if (thenCase != dest && elseCase != dest) { |
1275 | move(elseCase, dest); |
1276 | elseCase = dest; |
1277 | } |
1278 | |
1279 | if (elseCase == dest) |
1280 | cmov(x86Condition(cond), thenCase, dest); |
1281 | else |
1282 | cmov(x86Condition(invert(cond)), elseCase, dest); |
1283 | } |
1284 | |
1285 | void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) |
1286 | { |
1287 | if (!right.m_value) { |
1288 | if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { |
1289 | moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest); |
1290 | return; |
1291 | } |
1292 | } |
1293 | |
1294 | m_assembler.cmpq_ir(right.m_value, left); |
1295 | |
1296 | if (thenCase != dest && elseCase != dest) { |
1297 | move(elseCase, dest); |
1298 | elseCase = dest; |
1299 | } |
1300 | |
1301 | if (elseCase == dest) |
1302 | cmov(x86Condition(cond), thenCase, dest); |
1303 | else |
1304 | cmov(x86Condition(invert(cond)), elseCase, dest); |
1305 | } |
1306 | |
1307 | void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) |
1308 | { |
1309 | m_assembler.testq_rr(testReg, mask); |
1310 | cmov(x86Condition(cond), src, dest); |
1311 | } |
1312 | |
1313 | void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) |
1314 | { |
1315 | ASSERT(isInvertible(cond)); |
1316 | ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag." ); |
1317 | |
1318 | m_assembler.testq_rr(right, left); |
1319 | |
1320 | if (thenCase != dest && elseCase != dest) { |
1321 | move(elseCase, dest); |
1322 | elseCase = dest; |
1323 | } |
1324 | |
1325 | if (elseCase == dest) |
1326 | cmov(x86Condition(cond), thenCase, dest); |
1327 | else |
1328 | cmov(x86Condition(invert(cond)), elseCase, dest); |
1329 | } |
1330 | |
1331 | void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest) |
1332 | { |
1333 | // if we are only interested in the low seven bits, this can be tested with a testb |
1334 | if (mask.m_value == -1) |
1335 | m_assembler.testq_rr(testReg, testReg); |
1336 | else if ((mask.m_value & ~0x7f) == 0) |
1337 | m_assembler.testb_i8r(mask.m_value, testReg); |
1338 | else |
1339 | m_assembler.testq_i32r(mask.m_value, testReg); |
1340 | cmov(x86Condition(cond), src, dest); |
1341 | } |
1342 | |
1343 | void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest) |
1344 | { |
1345 | ASSERT(isInvertible(cond)); |
1346 | ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag." ); |
1347 | |
1348 | if (mask.m_value == -1) |
1349 | m_assembler.testq_rr(testReg, testReg); |
1350 | else if (!(mask.m_value & ~0x7f)) |
1351 | m_assembler.testb_i8r(mask.m_value, testReg); |
1352 | else |
1353 | m_assembler.testq_i32r(mask.m_value, testReg); |
1354 | |
1355 | if (thenCase != dest && elseCase != dest) { |
1356 | move(elseCase, dest); |
1357 | elseCase = dest; |
1358 | } |
1359 | |
1360 | if (elseCase == dest) |
1361 | cmov(x86Condition(cond), thenCase, dest); |
1362 | else |
1363 | cmov(x86Condition(invert(cond)), elseCase, dest); |
1364 | } |
1365 | |
1366 | template<typename LeftType, typename RightType> |
1367 | void moveDoubleConditionally64(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) |
1368 | { |
1369 | static_assert(!std::is_same<LeftType, FPRegisterID>::value && !std::is_same<RightType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()." ); |
1370 | |
1371 | if (thenCase != dest && elseCase != dest) { |
1372 | moveDouble(elseCase, dest); |
1373 | elseCase = dest; |
1374 | } |
1375 | |
1376 | if (elseCase == dest) { |
1377 | Jump falseCase = branch64(invert(cond), left, right); |
1378 | moveDouble(thenCase, dest); |
1379 | falseCase.link(this); |
1380 | } else { |
1381 | Jump trueCase = branch64(cond, left, right); |
1382 | moveDouble(elseCase, dest); |
1383 | trueCase.link(this); |
1384 | } |
1385 | } |
1386 | |
1387 | template<typename TestType, typename MaskType> |
1388 | void moveDoubleConditionallyTest64(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) |
1389 | { |
1390 | static_assert(!std::is_same<TestType, FPRegisterID>::value && !std::is_same<MaskType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()." ); |
1391 | |
1392 | if (elseCase == dest && isInvertible(cond)) { |
1393 | Jump falseCase = branchTest64(invert(cond), test, mask); |
1394 | moveDouble(thenCase, dest); |
1395 | falseCase.link(this); |
1396 | } else if (thenCase == dest) { |
1397 | Jump trueCase = branchTest64(cond, test, mask); |
1398 | moveDouble(elseCase, dest); |
1399 | trueCase.link(this); |
1400 | } |
1401 | |
1402 | Jump trueCase = branchTest64(cond, test, mask); |
1403 | moveDouble(elseCase, dest); |
1404 | Jump falseCase = jump(); |
1405 | trueCase.link(this); |
1406 | moveDouble(thenCase, dest); |
1407 | falseCase.link(this); |
1408 | } |
1409 | |
1410 | void abortWithReason(AbortReason reason) |
1411 | { |
1412 | move(TrustedImm32(reason), X86Registers::r11); |
1413 | breakpoint(); |
1414 | } |
1415 | |
1416 | void abortWithReason(AbortReason reason, intptr_t misc) |
1417 | { |
1418 | move(TrustedImm64(misc), X86Registers::r10); |
1419 | abortWithReason(reason); |
1420 | } |
1421 | |
1422 | ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) |
1423 | { |
1424 | ConvertibleLoadLabel result = ConvertibleLoadLabel(this); |
1425 | m_assembler.movq_mr(address.offset, address.base, dest); |
1426 | return result; |
1427 | } |
1428 | |
1429 | DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) |
1430 | { |
1431 | padBeforePatch(); |
1432 | m_assembler.movq_i64r(initialValue.asIntptr(), dest); |
1433 | return DataLabelPtr(this); |
1434 | } |
1435 | |
1436 | DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest) |
1437 | { |
1438 | padBeforePatch(); |
1439 | m_assembler.movq_i64r(initialValue.m_value, dest); |
1440 | return DataLabelPtr(this); |
1441 | } |
1442 | |
1443 | Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr)) |
1444 | { |
1445 | dataLabel = moveWithPatch(initialRightValue, scratchRegister()); |
1446 | return branch64(cond, left, scratchRegister()); |
1447 | } |
1448 | |
1449 | Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr)) |
1450 | { |
1451 | dataLabel = moveWithPatch(initialRightValue, scratchRegister()); |
1452 | return branch64(cond, left, scratchRegister()); |
1453 | } |
1454 | |
1455 | Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) |
1456 | { |
1457 | padBeforePatch(); |
1458 | m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister()); |
1459 | dataLabel = DataLabel32(this); |
1460 | return branch32(cond, left, scratchRegister()); |
1461 | } |
1462 | |
1463 | DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) |
1464 | { |
1465 | DataLabelPtr label = moveWithPatch(initialValue, scratchRegister()); |
1466 | store64(scratchRegister(), address); |
1467 | return label; |
1468 | } |
1469 | |
1470 | PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm) |
1471 | { |
1472 | return PatchableJump(branch64(cond, reg, imm)); |
1473 | } |
1474 | |
1475 | PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right) |
1476 | { |
1477 | return PatchableJump(branch64(cond, left, right)); |
1478 | } |
1479 | |
1480 | using MacroAssemblerX86Common::branch8; |
1481 | Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) |
1482 | { |
1483 | MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister()); |
1484 | return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister()), right); |
1485 | } |
1486 | |
1487 | using MacroAssemblerX86Common::branchTest8; |
1488 | Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
1489 | { |
1490 | TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); |
1491 | TrustedImmPtr addr(reinterpret_cast<void*>(address.offset)); |
1492 | MacroAssemblerX86Common::move(addr, scratchRegister()); |
1493 | return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister(), address.base, TimesOne), mask8); |
1494 | } |
1495 | |
1496 | Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
1497 | { |
1498 | TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); |
1499 | MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
1500 | return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister()), mask8); |
1501 | } |
1502 | |
1503 | void xchg64(RegisterID reg, Address address) |
1504 | { |
1505 | m_assembler.xchgq_rm(reg, address.offset, address.base); |
1506 | } |
1507 | |
1508 | void xchg64(RegisterID reg, BaseIndex address) |
1509 | { |
1510 | m_assembler.xchgq_rm(reg, address.offset, address.base, address.index, address.scale); |
1511 | } |
1512 | |
1513 | void atomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address, RegisterID result) |
1514 | { |
1515 | atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base); }); |
1516 | } |
1517 | |
1518 | void atomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address, RegisterID result) |
1519 | { |
1520 | atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base, address.index, address.scale); }); |
1521 | } |
1522 | |
1523 | void atomicStrongCAS64(RegisterID expectedAndResult, RegisterID newValue, Address address) |
1524 | { |
1525 | atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base); }); |
1526 | } |
1527 | |
1528 | void atomicStrongCAS64(RegisterID expectedAndResult, RegisterID newValue, BaseIndex address) |
1529 | { |
1530 | atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base, address.index, address.scale); }); |
1531 | } |
1532 | |
1533 | Jump branchAtomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address) |
1534 | { |
1535 | return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base); }); |
1536 | } |
1537 | |
1538 | Jump branchAtomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address) |
1539 | { |
1540 | return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base, address.index, address.scale); }); |
1541 | } |
1542 | |
1543 | void atomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result) |
1544 | { |
1545 | atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result); |
1546 | } |
1547 | |
1548 | void atomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result) |
1549 | { |
1550 | atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result); |
1551 | } |
1552 | |
1553 | Jump branchAtomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address) |
1554 | { |
1555 | return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address); |
1556 | } |
1557 | |
1558 | Jump branchAtomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address) |
1559 | { |
1560 | return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address); |
1561 | } |
1562 | |
1563 | void atomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result) |
1564 | { |
1565 | atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result); |
1566 | } |
1567 | |
1568 | void atomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result) |
1569 | { |
1570 | atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result); |
1571 | } |
1572 | |
1573 | Jump branchAtomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address) |
1574 | { |
1575 | return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address); |
1576 | } |
1577 | |
1578 | Jump branchAtomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address) |
1579 | { |
1580 | return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address); |
1581 | } |
1582 | |
1583 | void atomicAdd64(TrustedImm32 imm, Address address) |
1584 | { |
1585 | m_assembler.lock(); |
1586 | add64(imm, address); |
1587 | } |
1588 | |
1589 | void atomicAdd64(TrustedImm32 imm, BaseIndex address) |
1590 | { |
1591 | m_assembler.lock(); |
1592 | add64(imm, address); |
1593 | } |
1594 | |
1595 | void atomicAdd64(RegisterID reg, Address address) |
1596 | { |
1597 | m_assembler.lock(); |
1598 | add64(reg, address); |
1599 | } |
1600 | |
1601 | void atomicAdd64(RegisterID reg, BaseIndex address) |
1602 | { |
1603 | m_assembler.lock(); |
1604 | add64(reg, address); |
1605 | } |
1606 | |
1607 | void atomicSub64(TrustedImm32 imm, Address address) |
1608 | { |
1609 | m_assembler.lock(); |
1610 | sub64(imm, address); |
1611 | } |
1612 | |
1613 | void atomicSub64(TrustedImm32 imm, BaseIndex address) |
1614 | { |
1615 | m_assembler.lock(); |
1616 | sub64(imm, address); |
1617 | } |
1618 | |
1619 | void atomicSub64(RegisterID reg, Address address) |
1620 | { |
1621 | m_assembler.lock(); |
1622 | sub64(reg, address); |
1623 | } |
1624 | |
1625 | void atomicSub64(RegisterID reg, BaseIndex address) |
1626 | { |
1627 | m_assembler.lock(); |
1628 | sub64(reg, address); |
1629 | } |
1630 | |
1631 | void atomicAnd64(TrustedImm32 imm, Address address) |
1632 | { |
1633 | m_assembler.lock(); |
1634 | and64(imm, address); |
1635 | } |
1636 | |
1637 | void atomicAnd64(TrustedImm32 imm, BaseIndex address) |
1638 | { |
1639 | m_assembler.lock(); |
1640 | and64(imm, address); |
1641 | } |
1642 | |
1643 | void atomicAnd64(RegisterID reg, Address address) |
1644 | { |
1645 | m_assembler.lock(); |
1646 | and64(reg, address); |
1647 | } |
1648 | |
1649 | void atomicAnd64(RegisterID reg, BaseIndex address) |
1650 | { |
1651 | m_assembler.lock(); |
1652 | and64(reg, address); |
1653 | } |
1654 | |
1655 | void atomicOr64(TrustedImm32 imm, Address address) |
1656 | { |
1657 | m_assembler.lock(); |
1658 | or64(imm, address); |
1659 | } |
1660 | |
1661 | void atomicOr64(TrustedImm32 imm, BaseIndex address) |
1662 | { |
1663 | m_assembler.lock(); |
1664 | or64(imm, address); |
1665 | } |
1666 | |
1667 | void atomicOr64(RegisterID reg, Address address) |
1668 | { |
1669 | m_assembler.lock(); |
1670 | or64(reg, address); |
1671 | } |
1672 | |
1673 | void atomicOr64(RegisterID reg, BaseIndex address) |
1674 | { |
1675 | m_assembler.lock(); |
1676 | or64(reg, address); |
1677 | } |
1678 | |
1679 | void atomicXor64(TrustedImm32 imm, Address address) |
1680 | { |
1681 | m_assembler.lock(); |
1682 | xor64(imm, address); |
1683 | } |
1684 | |
1685 | void atomicXor64(TrustedImm32 imm, BaseIndex address) |
1686 | { |
1687 | m_assembler.lock(); |
1688 | xor64(imm, address); |
1689 | } |
1690 | |
1691 | void atomicXor64(RegisterID reg, Address address) |
1692 | { |
1693 | m_assembler.lock(); |
1694 | xor64(reg, address); |
1695 | } |
1696 | |
1697 | void atomicXor64(RegisterID reg, BaseIndex address) |
1698 | { |
1699 | m_assembler.lock(); |
1700 | xor64(reg, address); |
1701 | } |
1702 | |
1703 | void atomicNeg64(Address address) |
1704 | { |
1705 | m_assembler.lock(); |
1706 | neg64(address); |
1707 | } |
1708 | |
1709 | void atomicNeg64(BaseIndex address) |
1710 | { |
1711 | m_assembler.lock(); |
1712 | neg64(address); |
1713 | } |
1714 | |
1715 | void atomicNot64(Address address) |
1716 | { |
1717 | m_assembler.lock(); |
1718 | not64(address); |
1719 | } |
1720 | |
1721 | void atomicNot64(BaseIndex address) |
1722 | { |
1723 | m_assembler.lock(); |
1724 | not64(address); |
1725 | } |
1726 | |
1727 | void atomicXchgAdd64(RegisterID reg, Address address) |
1728 | { |
1729 | m_assembler.lock(); |
1730 | m_assembler.xaddq_rm(reg, address.offset, address.base); |
1731 | } |
1732 | |
1733 | void atomicXchgAdd64(RegisterID reg, BaseIndex address) |
1734 | { |
1735 | m_assembler.lock(); |
1736 | m_assembler.xaddq_rm(reg, address.offset, address.base, address.index, address.scale); |
1737 | } |
1738 | |
1739 | void atomicXchg64(RegisterID reg, Address address) |
1740 | { |
1741 | m_assembler.lock(); |
1742 | m_assembler.xchgq_rm(reg, address.offset, address.base); |
1743 | } |
1744 | |
1745 | void atomicXchg64(RegisterID reg, BaseIndex address) |
1746 | { |
1747 | m_assembler.lock(); |
1748 | m_assembler.xchgq_rm(reg, address.offset, address.base, address.index, address.scale); |
1749 | } |
1750 | |
1751 | #if ENABLE(FAST_TLS_JIT) |
1752 | void loadFromTLS64(uint32_t offset, RegisterID dst) |
1753 | { |
1754 | m_assembler.gs(); |
1755 | m_assembler.movq_mr(offset, dst); |
1756 | } |
1757 | |
1758 | void storeToTLS64(RegisterID src, uint32_t offset) |
1759 | { |
1760 | m_assembler.gs(); |
1761 | m_assembler.movq_rm(src, offset); |
1762 | } |
1763 | #endif |
1764 | |
1765 | void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) |
1766 | { |
1767 | m_assembler.cvttsd2siq_rr(src, dest); |
1768 | } |
1769 | |
1770 | void truncateDoubleToInt64(FPRegisterID src, RegisterID dest) |
1771 | { |
1772 | m_assembler.cvttsd2siq_rr(src, dest); |
1773 | } |
1774 | |
1775 | // int64Min should contain exactly 0x43E0000000000000 == static_cast<double>(int64_t::min()). scratch may |
1776 | // be the same FPR as src. |
1777 | void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min) |
1778 | { |
1779 | ASSERT(scratch != int64Min); |
1780 | |
1781 | // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed |
1782 | // integer conversion instruction. If the src is less than int64_t::min() then the results of the two |
1783 | // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to |
1784 | // uint64_t; then add back int64_t::min() in the destination gpr. |
1785 | |
1786 | Jump large = branchDouble(DoubleGreaterThanOrEqual, src, int64Min); |
1787 | m_assembler.cvttsd2siq_rr(src, dest); |
1788 | Jump done = jump(); |
1789 | large.link(this); |
1790 | moveDouble(src, scratch); |
1791 | m_assembler.subsd_rr(int64Min, scratch); |
1792 | m_assembler.movq_i64r(0x8000000000000000, scratchRegister()); |
1793 | m_assembler.cvttsd2siq_rr(scratch, dest); |
1794 | m_assembler.orq_rr(scratchRegister(), dest); |
1795 | done.link(this); |
1796 | } |
1797 | |
1798 | void truncateFloatToUint32(FPRegisterID src, RegisterID dest) |
1799 | { |
1800 | m_assembler.cvttss2siq_rr(src, dest); |
1801 | } |
1802 | |
1803 | void truncateFloatToInt64(FPRegisterID src, RegisterID dest) |
1804 | { |
1805 | m_assembler.cvttss2siq_rr(src, dest); |
1806 | } |
1807 | |
1808 | // int64Min should contain exactly 0x5f000000 == static_cast<float>(int64_t::min()). scratch may be the |
1809 | // same FPR as src. |
1810 | void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min) |
1811 | { |
1812 | ASSERT(scratch != int64Min); |
1813 | |
1814 | // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed |
1815 | // integer conversion instruction. If the src is less than int64_t::min() then the results of the two |
1816 | // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to |
1817 | // uint64_t; then add back int64_t::min() in the destination gpr. |
1818 | |
1819 | Jump large = branchFloat(DoubleGreaterThanOrEqual, src, int64Min); |
1820 | m_assembler.cvttss2siq_rr(src, dest); |
1821 | Jump done = jump(); |
1822 | large.link(this); |
1823 | moveDouble(src, scratch); |
1824 | m_assembler.subss_rr(int64Min, scratch); |
1825 | m_assembler.movq_i64r(0x8000000000000000, scratchRegister()); |
1826 | m_assembler.cvttss2siq_rr(scratch, dest); |
1827 | m_assembler.orq_rr(scratchRegister(), dest); |
1828 | done.link(this); |
1829 | } |
1830 | |
1831 | void convertInt64ToDouble(RegisterID src, FPRegisterID dest) |
1832 | { |
1833 | m_assembler.cvtsi2sdq_rr(src, dest); |
1834 | } |
1835 | |
1836 | void convertInt64ToDouble(Address src, FPRegisterID dest) |
1837 | { |
1838 | m_assembler.cvtsi2sdq_mr(src.offset, src.base, dest); |
1839 | } |
1840 | |
1841 | void convertInt64ToFloat(RegisterID src, FPRegisterID dest) |
1842 | { |
1843 | m_assembler.cvtsi2ssq_rr(src, dest); |
1844 | } |
1845 | |
1846 | void convertInt64ToFloat(Address src, FPRegisterID dest) |
1847 | { |
1848 | m_assembler.cvtsi2ssq_mr(src.offset, src.base, dest); |
1849 | } |
1850 | |
1851 | // One of scratch or scratch2 may be the same as src |
1852 | void convertUInt64ToDouble(RegisterID src, FPRegisterID dest, RegisterID scratch) |
1853 | { |
1854 | RegisterID scratch2 = scratchRegister(); |
1855 | |
1856 | m_assembler.testq_rr(src, src); |
1857 | AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed)); |
1858 | m_assembler.cvtsi2sdq_rr(src, dest); |
1859 | AssemblerLabel done = m_assembler.jmp(); |
1860 | m_assembler.linkJump(signBitSet, m_assembler.label()); |
1861 | if (scratch != src) |
1862 | m_assembler.movq_rr(src, scratch); |
1863 | m_assembler.movq_rr(src, scratch2); |
1864 | m_assembler.shrq_i8r(1, scratch); |
1865 | m_assembler.andq_ir(1, scratch2); |
1866 | m_assembler.orq_rr(scratch, scratch2); |
1867 | m_assembler.cvtsi2sdq_rr(scratch2, dest); |
1868 | m_assembler.addsd_rr(dest, dest); |
1869 | m_assembler.linkJump(done, m_assembler.label()); |
1870 | } |
1871 | |
1872 | // One of scratch or scratch2 may be the same as src |
1873 | void convertUInt64ToFloat(RegisterID src, FPRegisterID dest, RegisterID scratch) |
1874 | { |
1875 | RegisterID scratch2 = scratchRegister(); |
1876 | m_assembler.testq_rr(src, src); |
1877 | AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed)); |
1878 | m_assembler.cvtsi2ssq_rr(src, dest); |
1879 | AssemblerLabel done = m_assembler.jmp(); |
1880 | m_assembler.linkJump(signBitSet, m_assembler.label()); |
1881 | if (scratch != src) |
1882 | m_assembler.movq_rr(src, scratch); |
1883 | m_assembler.movq_rr(src, scratch2); |
1884 | m_assembler.shrq_i8r(1, scratch); |
1885 | m_assembler.andq_ir(1, scratch2); |
1886 | m_assembler.orq_rr(scratch, scratch2); |
1887 | m_assembler.cvtsi2ssq_rr(scratch2, dest); |
1888 | m_assembler.addss_rr(dest, dest); |
1889 | m_assembler.linkJump(done, m_assembler.label()); |
1890 | } |
1891 | |
1892 | static bool supportsFloatingPoint() { return true; } |
1893 | static bool supportsFloatingPointTruncate() { return true; } |
1894 | static bool supportsFloatingPointSqrt() { return true; } |
1895 | static bool supportsFloatingPointAbs() { return true; } |
1896 | |
1897 | template<PtrTag resultTag, PtrTag locationTag> |
1898 | static FunctionPtr<resultTag> readCallTarget(CodeLocationCall<locationTag> call) |
1899 | { |
1900 | return FunctionPtr<resultTag>(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation())); |
1901 | } |
1902 | |
1903 | bool haveScratchRegisterForBlinding() { return m_allowScratchRegister; } |
1904 | RegisterID scratchRegisterForBlinding() { return scratchRegister(); } |
1905 | |
1906 | static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } |
1907 | static bool canJumpReplacePatchableBranch32WithPatch() { return true; } |
1908 | |
1909 | template<PtrTag tag> |
1910 | static CodeLocationLabel<tag> startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr<tag> label) |
1911 | { |
1912 | const int rexBytes = 1; |
1913 | const int opcodeBytes = 1; |
1914 | const int immediateBytes = 8; |
1915 | const int totalBytes = rexBytes + opcodeBytes + immediateBytes; |
1916 | ASSERT(totalBytes >= maxJumpReplacementSize()); |
1917 | return label.labelAtOffset(-totalBytes); |
1918 | } |
1919 | |
1920 | template<PtrTag tag> |
1921 | static CodeLocationLabel<tag> startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32<tag> label) |
1922 | { |
1923 | const int rexBytes = 1; |
1924 | const int opcodeBytes = 1; |
1925 | const int immediateBytes = 4; |
1926 | const int totalBytes = rexBytes + opcodeBytes + immediateBytes; |
1927 | ASSERT(totalBytes >= maxJumpReplacementSize()); |
1928 | return label.labelAtOffset(-totalBytes); |
1929 | } |
1930 | |
1931 | template<PtrTag tag> |
1932 | static CodeLocationLabel<tag> startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr<tag> label) |
1933 | { |
1934 | return startOfBranchPtrWithPatchOnRegister(label); |
1935 | } |
1936 | |
1937 | template<PtrTag tag> |
1938 | static CodeLocationLabel<tag> startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32<tag> label) |
1939 | { |
1940 | return startOfBranch32WithPatchOnRegister(label); |
1941 | } |
1942 | |
1943 | template<PtrTag tag> |
1944 | static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel<tag> instructionStart, Address, void* initialValue) |
1945 | { |
1946 | X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister); |
1947 | } |
1948 | |
1949 | template<PtrTag tag> |
1950 | static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel<tag> instructionStart, Address, int32_t initialValue) |
1951 | { |
1952 | X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, s_scratchRegister); |
1953 | } |
1954 | |
1955 | template<PtrTag tag> |
1956 | static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel<tag> instructionStart, RegisterID, void* initialValue) |
1957 | { |
1958 | X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister); |
1959 | } |
1960 | |
1961 | template<PtrTag callTag, PtrTag destTag> |
1962 | static void repatchCall(CodeLocationCall<callTag> call, CodeLocationLabel<destTag> destination) |
1963 | { |
1964 | X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); |
1965 | } |
1966 | |
1967 | template<PtrTag callTag, PtrTag destTag> |
1968 | static void repatchCall(CodeLocationCall<callTag> call, FunctionPtr<destTag> destination) |
1969 | { |
1970 | X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); |
1971 | } |
1972 | |
1973 | private: |
1974 | // If lzcnt is not available, use this after BSR |
1975 | // to count the leading zeros. |
1976 | void clz64AfterBsr(RegisterID dst) |
1977 | { |
1978 | Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero)); |
1979 | move(TrustedImm32(64), dst); |
1980 | |
1981 | Jump skipNonZeroCase = jump(); |
1982 | srcIsNonZero.link(this); |
1983 | xor64(TrustedImm32(0x3f), dst); |
1984 | skipNonZeroCase.link(this); |
1985 | } |
1986 | |
1987 | friend class LinkBuffer; |
1988 | |
1989 | template<PtrTag tag> |
1990 | static void linkCall(void* code, Call call, FunctionPtr<tag> function) |
1991 | { |
1992 | if (!call.isFlagSet(Call::Near)) |
1993 | X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPATCH_OFFSET_CALL_R11), function.executableAddress()); |
1994 | else if (call.isFlagSet(Call::Tail)) |
1995 | X86Assembler::linkJump(code, call.m_label, function.executableAddress()); |
1996 | else |
1997 | X86Assembler::linkCall(code, call.m_label, function.executableAddress()); |
1998 | } |
1999 | }; |
2000 | |
2001 | } // namespace JSC |
2002 | |
2003 | #endif // ENABLE(ASSEMBLER) |
2004 | |