1 | /* |
2 | * Copyright (C) 2008-2018 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #pragma once |
27 | |
28 | #if ENABLE(ASSEMBLER) && CPU(X86_64) |
29 | |
30 | #include "MacroAssemblerX86Common.h" |
31 | |
32 | #define REPATCH_OFFSET_CALL_R11 3 |
33 | |
34 | inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; } |
35 | |
36 | namespace JSC { |
37 | |
38 | class MacroAssemblerX86_64 : public MacroAssemblerX86Common { |
39 | public: |
40 | static const unsigned numGPRs = 16; |
41 | static const unsigned numFPRs = 16; |
42 | |
43 | static const Scale ScalePtr = TimesEight; |
44 | |
45 | using MacroAssemblerX86Common::add32; |
46 | using MacroAssemblerX86Common::and32; |
47 | using MacroAssemblerX86Common::branch32; |
48 | using MacroAssemblerX86Common::branchAdd32; |
49 | using MacroAssemblerX86Common::or32; |
50 | using MacroAssemblerX86Common::sub32; |
51 | using MacroAssemblerX86Common::load8; |
52 | using MacroAssemblerX86Common::load32; |
53 | using MacroAssemblerX86Common::store32; |
54 | using MacroAssemblerX86Common::store8; |
55 | using MacroAssemblerX86Common::call; |
56 | using MacroAssemblerX86Common::jump; |
57 | using MacroAssemblerX86Common::addDouble; |
58 | using MacroAssemblerX86Common::loadDouble; |
59 | using MacroAssemblerX86Common::convertInt32ToDouble; |
60 | |
61 | void add32(TrustedImm32 imm, AbsoluteAddress address) |
62 | { |
63 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
64 | add32(imm, Address(scratchRegister())); |
65 | } |
66 | |
67 | void and32(TrustedImm32 imm, AbsoluteAddress address) |
68 | { |
69 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
70 | and32(imm, Address(scratchRegister())); |
71 | } |
72 | |
73 | void add32(AbsoluteAddress address, RegisterID dest) |
74 | { |
75 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
76 | add32(Address(scratchRegister()), dest); |
77 | } |
78 | |
79 | void or32(TrustedImm32 imm, AbsoluteAddress address) |
80 | { |
81 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
82 | or32(imm, Address(scratchRegister())); |
83 | } |
84 | |
85 | void or32(RegisterID reg, AbsoluteAddress address) |
86 | { |
87 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
88 | or32(reg, Address(scratchRegister())); |
89 | } |
90 | |
91 | void sub32(TrustedImm32 imm, AbsoluteAddress address) |
92 | { |
93 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
94 | sub32(imm, Address(scratchRegister())); |
95 | } |
96 | |
97 | void load8(const void* address, RegisterID dest) |
98 | { |
99 | move(TrustedImmPtr(address), dest); |
100 | load8(dest, dest); |
101 | } |
102 | |
103 | void load16(ExtendedAddress address, RegisterID dest) |
104 | { |
105 | TrustedImmPtr addr(reinterpret_cast<void*>(address.offset)); |
106 | MacroAssemblerX86Common::move(addr, scratchRegister()); |
107 | MacroAssemblerX86Common::load16(BaseIndex(scratchRegister(), address.base, TimesTwo), dest); |
108 | } |
109 | |
110 | void load16(BaseIndex address, RegisterID dest) |
111 | { |
112 | MacroAssemblerX86Common::load16(address, dest); |
113 | } |
114 | |
115 | void load16(Address address, RegisterID dest) |
116 | { |
117 | MacroAssemblerX86Common::load16(address, dest); |
118 | } |
119 | |
120 | void load32(const void* address, RegisterID dest) |
121 | { |
122 | if (dest == X86Registers::eax) |
123 | m_assembler.movl_mEAX(address); |
124 | else { |
125 | move(TrustedImmPtr(address), dest); |
126 | load32(dest, dest); |
127 | } |
128 | } |
129 | |
130 | void addDouble(AbsoluteAddress address, FPRegisterID dest) |
131 | { |
132 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
133 | m_assembler.addsd_mr(0, scratchRegister(), dest); |
134 | } |
135 | |
136 | void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest) |
137 | { |
138 | move(imm, scratchRegister()); |
139 | m_assembler.cvtsi2sd_rr(scratchRegister(), dest); |
140 | } |
141 | |
142 | void store32(TrustedImm32 imm, void* address) |
143 | { |
144 | move(TrustedImmPtr(address), scratchRegister()); |
145 | store32(imm, scratchRegister()); |
146 | } |
147 | |
148 | void store32(RegisterID source, void* address) |
149 | { |
150 | if (source == X86Registers::eax) |
151 | m_assembler.movl_EAXm(address); |
152 | else { |
153 | move(TrustedImmPtr(address), scratchRegister()); |
154 | store32(source, scratchRegister()); |
155 | } |
156 | } |
157 | |
158 | void store8(TrustedImm32 imm, void* address) |
159 | { |
160 | TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); |
161 | move(TrustedImmPtr(address), scratchRegister()); |
162 | store8(imm8, Address(scratchRegister())); |
163 | } |
164 | |
165 | void store8(RegisterID reg, void* address) |
166 | { |
167 | move(TrustedImmPtr(address), scratchRegister()); |
168 | store8(reg, Address(scratchRegister())); |
169 | } |
170 | |
171 | #if OS(WINDOWS) |
172 | Call callWithSlowPathReturnType(PtrTag) |
173 | { |
174 | // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value. |
175 | // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right, |
176 | // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument. |
177 | // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx. |
178 | // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two. |
179 | // It is assumed that the parameters are already shifted to the right, when entering this method. |
180 | // Note: this implementation supports up to 3 parameters. |
181 | |
182 | // JIT relies on the CallerFrame (frame pointer) being put on the stack, |
183 | // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. |
184 | // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. |
185 | store64(X86Registers::ebp, Address(X86Registers::esp, -16)); |
186 | |
187 | // We also need to allocate the shadow space on the stack for the 4 parameter registers. |
188 | // In addition, we need to allocate 16 bytes for the return value. |
189 | // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). |
190 | sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); |
191 | |
192 | // The first parameter register should contain a pointer to the stack allocated space for the return value. |
193 | move(X86Registers::esp, X86Registers::ecx); |
194 | add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx); |
195 | |
196 | DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister()); |
197 | Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable); |
198 | |
199 | add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); |
200 | |
201 | // Copy the return value into rax and rdx. |
202 | load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx); |
203 | load64(Address(X86Registers::eax), X86Registers::eax); |
204 | |
205 | ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11); |
206 | return result; |
207 | } |
208 | #endif |
209 | |
210 | Call call(PtrTag) |
211 | { |
212 | #if OS(WINDOWS) |
213 | // JIT relies on the CallerFrame (frame pointer) being put on the stack, |
214 | // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. |
215 | // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. |
216 | store64(X86Registers::ebp, Address(X86Registers::esp, -16)); |
217 | |
218 | // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them. |
219 | // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied. |
220 | |
221 | // Copy argument 5 |
222 | load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister()); |
223 | store64(scratchRegister(), Address(X86Registers::esp, -4 * static_cast<int32_t>(sizeof(int64_t)))); |
224 | |
225 | // Copy argument 6 |
226 | load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister()); |
227 | store64(scratchRegister(), Address(X86Registers::esp, -3 * static_cast<int32_t>(sizeof(int64_t)))); |
228 | |
229 | // We also need to allocate the shadow space on the stack for the 4 parameter registers. |
230 | // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). |
231 | // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters. |
232 | sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); |
233 | #endif |
234 | DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister()); |
235 | Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable); |
236 | #if OS(WINDOWS) |
237 | add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); |
238 | #endif |
239 | ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11); |
240 | return result; |
241 | } |
242 | |
243 | ALWAYS_INLINE Call call(RegisterID callTag) { return UNUSED_PARAM(callTag), call(NoPtrTag); } |
244 | |
245 | // Address is a memory location containing the address to jump to |
246 | void jump(AbsoluteAddress address, PtrTag tag) |
247 | { |
248 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
249 | jump(Address(scratchRegister()), tag); |
250 | } |
251 | |
252 | ALWAYS_INLINE void jump(AbsoluteAddress address, RegisterID jumpTag) { UNUSED_PARAM(jumpTag), jump(address, NoPtrTag); } |
253 | |
254 | Call tailRecursiveCall() |
255 | { |
256 | DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister()); |
257 | Jump newJump = Jump(m_assembler.jmp_r(scratchRegister())); |
258 | ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11); |
259 | return Call::fromTailJump(newJump); |
260 | } |
261 | |
262 | Call makeTailRecursiveCall(Jump oldJump) |
263 | { |
264 | oldJump.link(this); |
265 | DataLabelPtr label = moveWithPatch(TrustedImmPtr(nullptr), scratchRegister()); |
266 | Jump newJump = Jump(m_assembler.jmp_r(scratchRegister())); |
267 | ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11); |
268 | return Call::fromTailJump(newJump); |
269 | } |
270 | |
271 | Call threadSafePatchableNearCall() |
272 | { |
273 | const size_t nearCallOpcodeSize = 1; |
274 | const size_t nearCallRelativeLocationSize = sizeof(int32_t); |
275 | // We want to make sure the 32-bit near call immediate is 32-bit aligned. |
276 | size_t codeSize = m_assembler.codeSize(); |
277 | size_t alignedSize = WTF::roundUpToMultipleOf<nearCallRelativeLocationSize>(codeSize + nearCallOpcodeSize); |
278 | emitNops(alignedSize - (codeSize + nearCallOpcodeSize)); |
279 | DataLabelPtr label = DataLabelPtr(this); |
280 | Call result = nearCall(); |
281 | ASSERT_UNUSED(label, differenceBetween(label, result) == (nearCallOpcodeSize + nearCallRelativeLocationSize)); |
282 | return result; |
283 | } |
284 | |
285 | Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest) |
286 | { |
287 | move(TrustedImmPtr(dest.m_ptr), scratchRegister()); |
288 | add32(src, Address(scratchRegister())); |
289 | return Jump(m_assembler.jCC(x86Condition(cond))); |
290 | } |
291 | |
292 | void add64(RegisterID src, RegisterID dest) |
293 | { |
294 | m_assembler.addq_rr(src, dest); |
295 | } |
296 | |
297 | void add64(Address src, RegisterID dest) |
298 | { |
299 | m_assembler.addq_mr(src.offset, src.base, dest); |
300 | } |
301 | |
302 | void add64(BaseIndex src, RegisterID dest) |
303 | { |
304 | m_assembler.addq_mr(src.offset, src.base, src.index, src.scale, dest); |
305 | } |
306 | |
307 | void add64(RegisterID src, Address dest) |
308 | { |
309 | m_assembler.addq_rm(src, dest.offset, dest.base); |
310 | } |
311 | |
312 | void add64(RegisterID src, BaseIndex dest) |
313 | { |
314 | m_assembler.addq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
315 | } |
316 | |
317 | void add64(AbsoluteAddress src, RegisterID dest) |
318 | { |
319 | move(TrustedImmPtr(src.m_ptr), scratchRegister()); |
320 | add64(Address(scratchRegister()), dest); |
321 | } |
322 | |
323 | void add64(TrustedImm32 imm, RegisterID srcDest) |
324 | { |
325 | if (imm.m_value == 1) |
326 | m_assembler.incq_r(srcDest); |
327 | else |
328 | m_assembler.addq_ir(imm.m_value, srcDest); |
329 | } |
330 | |
331 | void add64(TrustedImm64 imm, RegisterID dest) |
332 | { |
333 | if (imm.m_value == 1) |
334 | m_assembler.incq_r(dest); |
335 | else { |
336 | move(imm, scratchRegister()); |
337 | add64(scratchRegister(), dest); |
338 | } |
339 | } |
340 | |
341 | void add64(TrustedImm32 imm, RegisterID src, RegisterID dest) |
342 | { |
343 | m_assembler.leaq_mr(imm.m_value, src, dest); |
344 | } |
345 | |
346 | void add64(TrustedImm32 imm, Address address) |
347 | { |
348 | if (imm.m_value == 1) |
349 | m_assembler.incq_m(address.offset, address.base); |
350 | else |
351 | m_assembler.addq_im(imm.m_value, address.offset, address.base); |
352 | } |
353 | |
354 | void add64(TrustedImm32 imm, BaseIndex address) |
355 | { |
356 | if (imm.m_value == 1) |
357 | m_assembler.incq_m(address.offset, address.base, address.index, address.scale); |
358 | else |
359 | m_assembler.addq_im(imm.m_value, address.offset, address.base, address.index, address.scale); |
360 | } |
361 | |
362 | void add64(TrustedImm32 imm, AbsoluteAddress address) |
363 | { |
364 | move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
365 | add64(imm, Address(scratchRegister())); |
366 | } |
367 | |
368 | void add64(RegisterID a, RegisterID b, RegisterID dest) |
369 | { |
370 | x86Lea64(BaseIndex(a, b, TimesOne), dest); |
371 | } |
372 | |
373 | void x86Lea64(BaseIndex index, RegisterID dest) |
374 | { |
375 | if (!index.scale && !index.offset) { |
376 | if (index.base == dest) { |
377 | add64(index.index, dest); |
378 | return; |
379 | } |
380 | if (index.index == dest) { |
381 | add64(index.base, dest); |
382 | return; |
383 | } |
384 | } |
385 | m_assembler.leaq_mr(index.offset, index.base, index.index, index.scale, dest); |
386 | } |
387 | |
388 | void getEffectiveAddress(BaseIndex address, RegisterID dest) |
389 | { |
390 | return x86Lea64(address, dest); |
391 | } |
392 | |
393 | void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) |
394 | { |
395 | m_assembler.leaq_mr(imm.m_value, srcDest, srcDest); |
396 | } |
397 | |
398 | void and64(RegisterID src, RegisterID dest) |
399 | { |
400 | m_assembler.andq_rr(src, dest); |
401 | } |
402 | |
403 | void and64(RegisterID src, Address dest) |
404 | { |
405 | m_assembler.andq_rm(src, dest.offset, dest.base); |
406 | } |
407 | |
408 | void and64(RegisterID src, BaseIndex dest) |
409 | { |
410 | m_assembler.andq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
411 | } |
412 | |
413 | void and64(Address src, RegisterID dest) |
414 | { |
415 | m_assembler.andq_mr(src.offset, src.base, dest); |
416 | } |
417 | |
418 | void and64(BaseIndex src, RegisterID dest) |
419 | { |
420 | m_assembler.andq_mr(src.offset, src.base, src.index, src.scale, dest); |
421 | } |
422 | |
423 | void and64(TrustedImm32 imm, RegisterID srcDest) |
424 | { |
425 | m_assembler.andq_ir(imm.m_value, srcDest); |
426 | } |
427 | |
428 | void and64(TrustedImm32 imm, Address dest) |
429 | { |
430 | m_assembler.andq_im(imm.m_value, dest.offset, dest.base); |
431 | } |
432 | |
433 | void and64(TrustedImm32 imm, BaseIndex dest) |
434 | { |
435 | m_assembler.andq_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale); |
436 | } |
437 | |
438 | void and64(TrustedImmPtr imm, RegisterID srcDest) |
439 | { |
440 | intptr_t intValue = imm.asIntptr(); |
441 | if (intValue <= std::numeric_limits<int32_t>::max() |
442 | && intValue >= std::numeric_limits<int32_t>::min()) { |
443 | and64(TrustedImm32(static_cast<int32_t>(intValue)), srcDest); |
444 | return; |
445 | } |
446 | move(imm, scratchRegister()); |
447 | and64(scratchRegister(), srcDest); |
448 | } |
449 | |
450 | void and64(RegisterID op1, RegisterID op2, RegisterID dest) |
451 | { |
452 | if (op1 == op2 && op1 != dest && op2 != dest) |
453 | move(op1, dest); |
454 | else if (op1 == dest) |
455 | and64(op2, dest); |
456 | else { |
457 | move(op2, dest); |
458 | and64(op1, dest); |
459 | } |
460 | } |
461 | |
462 | void countLeadingZeros64(RegisterID src, RegisterID dst) |
463 | { |
464 | if (supportsLZCNT()) { |
465 | m_assembler.lzcntq_rr(src, dst); |
466 | return; |
467 | } |
468 | m_assembler.bsrq_rr(src, dst); |
469 | clz64AfterBsr(dst); |
470 | } |
471 | |
472 | void countLeadingZeros64(Address src, RegisterID dst) |
473 | { |
474 | if (supportsLZCNT()) { |
475 | m_assembler.lzcntq_mr(src.offset, src.base, dst); |
476 | return; |
477 | } |
478 | m_assembler.bsrq_mr(src.offset, src.base, dst); |
479 | clz64AfterBsr(dst); |
480 | } |
481 | |
482 | void countTrailingZeros64(RegisterID src, RegisterID dst) |
483 | { |
484 | if (supportsBMI1()) { |
485 | m_assembler.tzcntq_rr(src, dst); |
486 | return; |
487 | } |
488 | m_assembler.bsfq_rr(src, dst); |
489 | ctzAfterBsf<64>(dst); |
490 | } |
491 | |
492 | void countPopulation64(RegisterID src, RegisterID dst) |
493 | { |
494 | ASSERT(supportsCountPopulation()); |
495 | m_assembler.popcntq_rr(src, dst); |
496 | } |
497 | |
498 | void countPopulation64(Address src, RegisterID dst) |
499 | { |
500 | ASSERT(supportsCountPopulation()); |
501 | m_assembler.popcntq_mr(src.offset, src.base, dst); |
502 | } |
503 | |
504 | void lshift64(TrustedImm32 imm, RegisterID dest) |
505 | { |
506 | m_assembler.shlq_i8r(imm.m_value, dest); |
507 | } |
508 | |
509 | void lshift64(RegisterID src, RegisterID dest) |
510 | { |
511 | if (src == X86Registers::ecx) |
512 | m_assembler.shlq_CLr(dest); |
513 | else { |
514 | ASSERT(src != dest); |
515 | |
516 | // Can only shift by ecx, so we do some swapping if we see anything else. |
517 | swap(src, X86Registers::ecx); |
518 | m_assembler.shlq_CLr(dest == X86Registers::ecx ? src : dest); |
519 | swap(src, X86Registers::ecx); |
520 | } |
521 | } |
522 | |
523 | void rshift64(TrustedImm32 imm, RegisterID dest) |
524 | { |
525 | m_assembler.sarq_i8r(imm.m_value, dest); |
526 | } |
527 | |
528 | void rshift64(RegisterID src, RegisterID dest) |
529 | { |
530 | if (src == X86Registers::ecx) |
531 | m_assembler.sarq_CLr(dest); |
532 | else { |
533 | ASSERT(src != dest); |
534 | |
535 | // Can only shift by ecx, so we do some swapping if we see anything else. |
536 | swap(src, X86Registers::ecx); |
537 | m_assembler.sarq_CLr(dest == X86Registers::ecx ? src : dest); |
538 | swap(src, X86Registers::ecx); |
539 | } |
540 | } |
541 | |
542 | void urshift64(TrustedImm32 imm, RegisterID dest) |
543 | { |
544 | m_assembler.shrq_i8r(imm.m_value, dest); |
545 | } |
546 | |
547 | void urshift64(RegisterID src, RegisterID dest) |
548 | { |
549 | if (src == X86Registers::ecx) |
550 | m_assembler.shrq_CLr(dest); |
551 | else { |
552 | ASSERT(src != dest); |
553 | |
554 | // Can only shift by ecx, so we do some swapping if we see anything else. |
555 | swap(src, X86Registers::ecx); |
556 | m_assembler.shrq_CLr(dest == X86Registers::ecx ? src : dest); |
557 | swap(src, X86Registers::ecx); |
558 | } |
559 | } |
560 | |
561 | void rotateRight64(TrustedImm32 imm, RegisterID dest) |
562 | { |
563 | m_assembler.rorq_i8r(imm.m_value, dest); |
564 | } |
565 | |
566 | void rotateRight64(RegisterID src, RegisterID dest) |
567 | { |
568 | if (src == X86Registers::ecx) |
569 | m_assembler.rorq_CLr(dest); |
570 | else { |
571 | ASSERT(src != dest); |
572 | |
573 | // Can only rotate by ecx, so we do some swapping if we see anything else. |
574 | swap(src, X86Registers::ecx); |
575 | m_assembler.rorq_CLr(dest == X86Registers::ecx ? src : dest); |
576 | swap(src, X86Registers::ecx); |
577 | } |
578 | } |
579 | |
580 | void rotateLeft64(TrustedImm32 imm, RegisterID dest) |
581 | { |
582 | m_assembler.rolq_i8r(imm.m_value, dest); |
583 | } |
584 | |
585 | void rotateLeft64(RegisterID src, RegisterID dest) |
586 | { |
587 | if (src == X86Registers::ecx) |
588 | m_assembler.rolq_CLr(dest); |
589 | else { |
590 | ASSERT(src != dest); |
591 | |
592 | // Can only rotate by ecx, so we do some swapping if we see anything else. |
593 | swap(src, X86Registers::ecx); |
594 | m_assembler.rolq_CLr(dest == X86Registers::ecx ? src : dest); |
595 | swap(src, X86Registers::ecx); |
596 | } |
597 | } |
598 | |
599 | void mul64(RegisterID src, RegisterID dest) |
600 | { |
601 | m_assembler.imulq_rr(src, dest); |
602 | } |
603 | |
604 | void mul64(RegisterID src1, RegisterID src2, RegisterID dest) |
605 | { |
606 | if (src2 == dest) { |
607 | m_assembler.imulq_rr(src1, dest); |
608 | return; |
609 | } |
610 | move(src1, dest); |
611 | m_assembler.imulq_rr(src2, dest); |
612 | } |
613 | |
614 | void x86ConvertToQuadWord64() |
615 | { |
616 | m_assembler.cqo(); |
617 | } |
618 | |
619 | void x86ConvertToQuadWord64(RegisterID rax, RegisterID rdx) |
620 | { |
621 | ASSERT_UNUSED(rax, rax == X86Registers::eax); |
622 | ASSERT_UNUSED(rdx, rdx == X86Registers::edx); |
623 | x86ConvertToQuadWord64(); |
624 | } |
625 | |
626 | void x86Div64(RegisterID denominator) |
627 | { |
628 | m_assembler.idivq_r(denominator); |
629 | } |
630 | |
631 | void x86Div64(RegisterID rax, RegisterID rdx, RegisterID denominator) |
632 | { |
633 | ASSERT_UNUSED(rax, rax == X86Registers::eax); |
634 | ASSERT_UNUSED(rdx, rdx == X86Registers::edx); |
635 | x86Div64(denominator); |
636 | } |
637 | |
638 | void x86UDiv64(RegisterID denominator) |
639 | { |
640 | m_assembler.divq_r(denominator); |
641 | } |
642 | |
643 | void x86UDiv64(RegisterID rax, RegisterID rdx, RegisterID denominator) |
644 | { |
645 | ASSERT_UNUSED(rax, rax == X86Registers::eax); |
646 | ASSERT_UNUSED(rdx, rdx == X86Registers::edx); |
647 | x86UDiv64(denominator); |
648 | } |
649 | |
650 | void neg64(RegisterID dest) |
651 | { |
652 | m_assembler.negq_r(dest); |
653 | } |
654 | |
655 | void neg64(RegisterID src, RegisterID dest) |
656 | { |
657 | move(src, dest); |
658 | m_assembler.negq_r(dest); |
659 | } |
660 | |
661 | void neg64(Address dest) |
662 | { |
663 | m_assembler.negq_m(dest.offset, dest.base); |
664 | } |
665 | |
666 | void neg64(BaseIndex dest) |
667 | { |
668 | m_assembler.negq_m(dest.offset, dest.base, dest.index, dest.scale); |
669 | } |
670 | |
671 | void or64(RegisterID src, RegisterID dest) |
672 | { |
673 | m_assembler.orq_rr(src, dest); |
674 | } |
675 | |
676 | void or64(RegisterID src, Address dest) |
677 | { |
678 | m_assembler.orq_rm(src, dest.offset, dest.base); |
679 | } |
680 | |
681 | void or64(RegisterID src, BaseIndex dest) |
682 | { |
683 | m_assembler.orq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
684 | } |
685 | |
686 | void or64(Address src, RegisterID dest) |
687 | { |
688 | m_assembler.orq_mr(src.offset, src.base, dest); |
689 | } |
690 | |
691 | void or64(BaseIndex src, RegisterID dest) |
692 | { |
693 | m_assembler.orq_mr(src.offset, src.base, src.index, src.scale, dest); |
694 | } |
695 | |
696 | void or64(TrustedImm32 imm, Address dest) |
697 | { |
698 | m_assembler.orq_im(imm.m_value, dest.offset, dest.base); |
699 | } |
700 | |
701 | void or64(TrustedImm32 imm, BaseIndex dest) |
702 | { |
703 | m_assembler.orq_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale); |
704 | } |
705 | |
706 | void or64(TrustedImm64 imm, RegisterID srcDest) |
707 | { |
708 | if (imm.m_value <= std::numeric_limits<int32_t>::max() |
709 | && imm.m_value >= std::numeric_limits<int32_t>::min()) { |
710 | or64(TrustedImm32(static_cast<int32_t>(imm.m_value)), srcDest); |
711 | return; |
712 | } |
713 | move(imm, scratchRegister()); |
714 | or64(scratchRegister(), srcDest); |
715 | } |
716 | |
717 | void or64(TrustedImm32 imm, RegisterID dest) |
718 | { |
719 | m_assembler.orq_ir(imm.m_value, dest); |
720 | } |
721 | |
722 | void or64(RegisterID op1, RegisterID op2, RegisterID dest) |
723 | { |
724 | if (op1 == op2) |
725 | move(op1, dest); |
726 | else if (op1 == dest) |
727 | or64(op2, dest); |
728 | else { |
729 | move(op2, dest); |
730 | or64(op1, dest); |
731 | } |
732 | } |
733 | |
734 | void or64(TrustedImm32 imm, RegisterID src, RegisterID dest) |
735 | { |
736 | move(src, dest); |
737 | or64(imm, dest); |
738 | } |
739 | |
740 | void sub64(RegisterID src, RegisterID dest) |
741 | { |
742 | m_assembler.subq_rr(src, dest); |
743 | } |
744 | |
745 | void sub64(TrustedImm32 imm, RegisterID dest) |
746 | { |
747 | if (imm.m_value == 1) |
748 | m_assembler.decq_r(dest); |
749 | else |
750 | m_assembler.subq_ir(imm.m_value, dest); |
751 | } |
752 | |
753 | void sub64(TrustedImm64 imm, RegisterID dest) |
754 | { |
755 | if (imm.m_value == 1) |
756 | m_assembler.decq_r(dest); |
757 | else { |
758 | move(imm, scratchRegister()); |
759 | sub64(scratchRegister(), dest); |
760 | } |
761 | } |
762 | |
763 | void sub64(TrustedImm32 imm, Address address) |
764 | { |
765 | m_assembler.subq_im(imm.m_value, address.offset, address.base); |
766 | } |
767 | |
768 | void sub64(TrustedImm32 imm, BaseIndex address) |
769 | { |
770 | m_assembler.subq_im(imm.m_value, address.offset, address.base, address.index, address.scale); |
771 | } |
772 | |
773 | void sub64(Address src, RegisterID dest) |
774 | { |
775 | m_assembler.subq_mr(src.offset, src.base, dest); |
776 | } |
777 | |
778 | void sub64(BaseIndex src, RegisterID dest) |
779 | { |
780 | m_assembler.subq_mr(src.offset, src.base, src.index, src.scale, dest); |
781 | } |
782 | |
783 | void sub64(RegisterID src, Address dest) |
784 | { |
785 | m_assembler.subq_rm(src, dest.offset, dest.base); |
786 | } |
787 | |
788 | void sub64(RegisterID src, BaseIndex dest) |
789 | { |
790 | m_assembler.subq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
791 | } |
792 | |
793 | void xor64(RegisterID src, RegisterID dest) |
794 | { |
795 | m_assembler.xorq_rr(src, dest); |
796 | } |
797 | |
798 | void xor64(RegisterID op1, RegisterID op2, RegisterID dest) |
799 | { |
800 | if (op1 == op2) |
801 | move(TrustedImm32(0), dest); |
802 | else if (op1 == dest) |
803 | xor64(op2, dest); |
804 | else { |
805 | move(op2, dest); |
806 | xor64(op1, dest); |
807 | } |
808 | } |
809 | |
810 | void xor64(RegisterID src, Address dest) |
811 | { |
812 | m_assembler.xorq_rm(src, dest.offset, dest.base); |
813 | } |
814 | |
815 | void xor64(RegisterID src, BaseIndex dest) |
816 | { |
817 | m_assembler.xorq_rm(src, dest.offset, dest.base, dest.index, dest.scale); |
818 | } |
819 | |
820 | void xor64(Address src, RegisterID dest) |
821 | { |
822 | m_assembler.xorq_mr(src.offset, src.base, dest); |
823 | } |
824 | |
825 | void xor64(BaseIndex src, RegisterID dest) |
826 | { |
827 | m_assembler.xorq_mr(src.offset, src.base, src.index, src.scale, dest); |
828 | } |
829 | |
830 | void xor64(TrustedImm32 imm, Address dest) |
831 | { |
832 | m_assembler.xorq_im(imm.m_value, dest.offset, dest.base); |
833 | } |
834 | |
835 | void xor64(TrustedImm32 imm, BaseIndex dest) |
836 | { |
837 | m_assembler.xorq_im(imm.m_value, dest.offset, dest.base, dest.index, dest.scale); |
838 | } |
839 | |
840 | void xor64(TrustedImm32 imm, RegisterID srcDest) |
841 | { |
842 | m_assembler.xorq_ir(imm.m_value, srcDest); |
843 | } |
844 | |
845 | void xor64(TrustedImm64 imm, RegisterID srcDest) |
846 | { |
847 | move(imm, scratchRegister()); |
848 | xor64(scratchRegister(), srcDest); |
849 | } |
850 | |
851 | void not64(RegisterID srcDest) |
852 | { |
853 | m_assembler.notq_r(srcDest); |
854 | } |
855 | |
856 | void not64(Address dest) |
857 | { |
858 | m_assembler.notq_m(dest.offset, dest.base); |
859 | } |
860 | |
861 | void not64(BaseIndex dest) |
862 | { |
863 | m_assembler.notq_m(dest.offset, dest.base, dest.index, dest.scale); |
864 | } |
865 | |
866 | void load64(ImplicitAddress address, RegisterID dest) |
867 | { |
868 | m_assembler.movq_mr(address.offset, address.base, dest); |
869 | } |
870 | |
871 | void load64(BaseIndex address, RegisterID dest) |
872 | { |
873 | m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest); |
874 | } |
875 | |
876 | void load64(const void* address, RegisterID dest) |
877 | { |
878 | if (dest == X86Registers::eax) |
879 | m_assembler.movq_mEAX(address); |
880 | else { |
881 | move(TrustedImmPtr(address), dest); |
882 | load64(dest, dest); |
883 | } |
884 | } |
885 | |
886 | DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest) |
887 | { |
888 | padBeforePatch(); |
889 | m_assembler.movq_mr_disp32(address.offset, address.base, dest); |
890 | return DataLabel32(this); |
891 | } |
892 | |
893 | DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest) |
894 | { |
895 | padBeforePatch(); |
896 | m_assembler.movq_mr_disp8(address.offset, address.base, dest); |
897 | return DataLabelCompact(this); |
898 | } |
899 | |
900 | void store64(RegisterID src, ImplicitAddress address) |
901 | { |
902 | m_assembler.movq_rm(src, address.offset, address.base); |
903 | } |
904 | |
905 | void store64(RegisterID src, BaseIndex address) |
906 | { |
907 | m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale); |
908 | } |
909 | |
910 | void store64(RegisterID src, void* address) |
911 | { |
912 | if (src == X86Registers::eax) |
913 | m_assembler.movq_EAXm(address); |
914 | else { |
915 | move(TrustedImmPtr(address), scratchRegister()); |
916 | store64(src, scratchRegister()); |
917 | } |
918 | } |
919 | |
920 | void store64(TrustedImm32 imm, ImplicitAddress address) |
921 | { |
922 | m_assembler.movq_i32m(imm.m_value, address.offset, address.base); |
923 | } |
924 | |
925 | void store64(TrustedImm32 imm, BaseIndex address) |
926 | { |
927 | m_assembler.movq_i32m(imm.m_value, address.offset, address.base, address.index, address.scale); |
928 | } |
929 | |
930 | void store64(TrustedImm64 imm, ImplicitAddress address) |
931 | { |
932 | if (CAN_SIGN_EXTEND_32_64(imm.m_value)) { |
933 | store64(TrustedImm32(static_cast<int32_t>(imm.m_value)), address); |
934 | return; |
935 | } |
936 | |
937 | move(imm, scratchRegister()); |
938 | store64(scratchRegister(), address); |
939 | } |
940 | |
941 | void store64(TrustedImm64 imm, BaseIndex address) |
942 | { |
943 | move(imm, scratchRegister()); |
944 | m_assembler.movq_rm(scratchRegister(), address.offset, address.base, address.index, address.scale); |
945 | } |
946 | |
947 | void storeZero64(ImplicitAddress address) |
948 | { |
949 | store64(TrustedImm32(0), address); |
950 | } |
951 | |
952 | void storeZero64(BaseIndex address) |
953 | { |
954 | store64(TrustedImm32(0), address); |
955 | } |
956 | |
957 | DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) |
958 | { |
959 | padBeforePatch(); |
960 | m_assembler.movq_rm_disp32(src, address.offset, address.base); |
961 | return DataLabel32(this); |
962 | } |
963 | |
964 | void swap64(RegisterID src, RegisterID dest) |
965 | { |
966 | m_assembler.xchgq_rr(src, dest); |
967 | } |
968 | |
969 | void swap64(RegisterID src, Address dest) |
970 | { |
971 | m_assembler.xchgq_rm(src, dest.offset, dest.base); |
972 | } |
973 | |
974 | void move64ToDouble(RegisterID src, FPRegisterID dest) |
975 | { |
976 | m_assembler.movq_rr(src, dest); |
977 | } |
978 | |
979 | void moveDoubleTo64(FPRegisterID src, RegisterID dest) |
980 | { |
981 | m_assembler.movq_rr(src, dest); |
982 | } |
983 | |
984 | void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) |
985 | { |
986 | if (!right.m_value) { |
987 | if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { |
988 | test64(*resultCondition, left, left, dest); |
989 | return; |
990 | } |
991 | } |
992 | |
993 | m_assembler.cmpq_ir(right.m_value, left); |
994 | set32(x86Condition(cond), dest); |
995 | } |
996 | |
997 | void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) |
998 | { |
999 | m_assembler.cmpq_rr(right, left); |
1000 | set32(x86Condition(cond), dest); |
1001 | } |
1002 | |
1003 | Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right) |
1004 | { |
1005 | m_assembler.cmpq_rr(right, left); |
1006 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1007 | } |
1008 | |
1009 | Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right) |
1010 | { |
1011 | if (!right.m_value) { |
1012 | if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) |
1013 | return branchTest64(*resultCondition, left, left); |
1014 | } |
1015 | m_assembler.cmpq_ir(right.m_value, left); |
1016 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1017 | } |
1018 | |
1019 | Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right) |
1020 | { |
1021 | if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) { |
1022 | m_assembler.testq_rr(left, left); |
1023 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1024 | } |
1025 | move(right, scratchRegister()); |
1026 | return branch64(cond, left, scratchRegister()); |
1027 | } |
1028 | |
1029 | Jump branch64(RelationalCondition cond, RegisterID left, Address right) |
1030 | { |
1031 | m_assembler.cmpq_mr(right.offset, right.base, left); |
1032 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1033 | } |
1034 | |
1035 | Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right) |
1036 | { |
1037 | move(TrustedImmPtr(left.m_ptr), scratchRegister()); |
1038 | return branch64(cond, Address(scratchRegister()), right); |
1039 | } |
1040 | |
1041 | Jump branch64(RelationalCondition cond, Address left, RegisterID right) |
1042 | { |
1043 | m_assembler.cmpq_rm(right, left.offset, left.base); |
1044 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1045 | } |
1046 | |
1047 | Jump branch64(RelationalCondition cond, Address left, TrustedImm32 right) |
1048 | { |
1049 | m_assembler.cmpq_im(right.m_value, left.offset, left.base); |
1050 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1051 | } |
1052 | |
1053 | Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right) |
1054 | { |
1055 | move(right, scratchRegister()); |
1056 | return branch64(cond, left, scratchRegister()); |
1057 | } |
1058 | |
1059 | Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right) |
1060 | { |
1061 | m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale); |
1062 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1063 | } |
1064 | |
1065 | Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) |
1066 | { |
1067 | load32(left.m_ptr, scratchRegister()); |
1068 | return branch32(cond, scratchRegister(), right); |
1069 | } |
1070 | |
1071 | Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) |
1072 | { |
1073 | return branch64(cond, left, right); |
1074 | } |
1075 | |
1076 | Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right) |
1077 | { |
1078 | move(right, scratchRegister()); |
1079 | return branchPtr(cond, left, scratchRegister()); |
1080 | } |
1081 | |
1082 | Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask) |
1083 | { |
1084 | m_assembler.testq_rr(reg, mask); |
1085 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1086 | } |
1087 | |
1088 | Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) |
1089 | { |
1090 | // if we are only interested in the low seven bits, this can be tested with a testb |
1091 | if (mask.m_value == -1) |
1092 | m_assembler.testq_rr(reg, reg); |
1093 | else if ((mask.m_value & ~0x7f) == 0) |
1094 | m_assembler.testb_i8r(mask.m_value, reg); |
1095 | else |
1096 | m_assembler.testq_i32r(mask.m_value, reg); |
1097 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1098 | } |
1099 | |
1100 | Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask) |
1101 | { |
1102 | move(mask, scratchRegister()); |
1103 | return branchTest64(cond, reg, scratchRegister()); |
1104 | } |
1105 | |
1106 | void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) |
1107 | { |
1108 | if (mask.m_value == -1) |
1109 | m_assembler.testq_rr(reg, reg); |
1110 | else if ((mask.m_value & ~0x7f) == 0) |
1111 | m_assembler.testb_i8r(mask.m_value, reg); |
1112 | else |
1113 | m_assembler.testq_i32r(mask.m_value, reg); |
1114 | set32(x86Condition(cond), dest); |
1115 | } |
1116 | |
1117 | void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest) |
1118 | { |
1119 | m_assembler.testq_rr(reg, mask); |
1120 | set32(x86Condition(cond), dest); |
1121 | } |
1122 | |
1123 | Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
1124 | { |
1125 | load64(address.m_ptr, scratchRegister()); |
1126 | return branchTest64(cond, scratchRegister(), mask); |
1127 | } |
1128 | |
1129 | Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) |
1130 | { |
1131 | if (mask.m_value == -1) |
1132 | m_assembler.cmpq_im(0, address.offset, address.base); |
1133 | else |
1134 | m_assembler.testq_i32m(mask.m_value, address.offset, address.base); |
1135 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1136 | } |
1137 | |
1138 | Jump branchTest64(ResultCondition cond, Address address, RegisterID reg) |
1139 | { |
1140 | m_assembler.testq_rm(reg, address.offset, address.base); |
1141 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1142 | } |
1143 | |
1144 | Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) |
1145 | { |
1146 | if (mask.m_value == -1) |
1147 | m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale); |
1148 | else |
1149 | m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); |
1150 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1151 | } |
1152 | |
1153 | |
1154 | Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) |
1155 | { |
1156 | add64(imm, dest); |
1157 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1158 | } |
1159 | |
1160 | Jump branchAdd64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) |
1161 | { |
1162 | if (src1 == dest) |
1163 | return branchAdd64(cond, src2, dest); |
1164 | move(src2, dest); |
1165 | return branchAdd64(cond, src1, dest); |
1166 | } |
1167 | |
1168 | Jump branchAdd64(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest) |
1169 | { |
1170 | if (op2 == dest) |
1171 | return branchAdd64(cond, op1, dest); |
1172 | if (op1.base == dest) { |
1173 | load32(op1, dest); |
1174 | return branchAdd64(cond, op2, dest); |
1175 | } |
1176 | move(op2, dest); |
1177 | return branchAdd64(cond, op1, dest); |
1178 | } |
1179 | |
1180 | Jump branchAdd64(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest) |
1181 | { |
1182 | return branchAdd64(cond, src2, src1, dest); |
1183 | } |
1184 | |
1185 | Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest) |
1186 | { |
1187 | add64(src, dest); |
1188 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1189 | } |
1190 | |
1191 | Jump branchAdd64(ResultCondition cond, Address src, RegisterID dest) |
1192 | { |
1193 | add64(src, dest); |
1194 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1195 | } |
1196 | |
1197 | Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest) |
1198 | { |
1199 | mul64(src, dest); |
1200 | if (cond != Overflow) |
1201 | m_assembler.testq_rr(dest, dest); |
1202 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1203 | } |
1204 | |
1205 | Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) |
1206 | { |
1207 | if (src1 == dest) |
1208 | return branchMul64(cond, src2, dest); |
1209 | move(src2, dest); |
1210 | return branchMul64(cond, src1, dest); |
1211 | } |
1212 | |
1213 | Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) |
1214 | { |
1215 | sub64(imm, dest); |
1216 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1217 | } |
1218 | |
1219 | Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest) |
1220 | { |
1221 | sub64(src, dest); |
1222 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1223 | } |
1224 | |
1225 | Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) |
1226 | { |
1227 | move(src1, dest); |
1228 | return branchSub64(cond, src2, dest); |
1229 | } |
1230 | |
1231 | Jump branchNeg64(ResultCondition cond, RegisterID srcDest) |
1232 | { |
1233 | neg64(srcDest); |
1234 | return Jump(m_assembler.jCC(x86Condition(cond))); |
1235 | } |
1236 | |
1237 | void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) |
1238 | { |
1239 | m_assembler.cmpq_rr(right, left); |
1240 | cmov(x86Condition(cond), src, dest); |
1241 | } |
1242 | |
1243 | void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) |
1244 | { |
1245 | m_assembler.cmpq_rr(right, left); |
1246 | |
1247 | if (thenCase != dest && elseCase != dest) { |
1248 | move(elseCase, dest); |
1249 | elseCase = dest; |
1250 | } |
1251 | |
1252 | if (elseCase == dest) |
1253 | cmov(x86Condition(cond), thenCase, dest); |
1254 | else |
1255 | cmov(x86Condition(invert(cond)), elseCase, dest); |
1256 | } |
1257 | |
1258 | void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) |
1259 | { |
1260 | if (!right.m_value) { |
1261 | if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { |
1262 | moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest); |
1263 | return; |
1264 | } |
1265 | } |
1266 | |
1267 | m_assembler.cmpq_ir(right.m_value, left); |
1268 | |
1269 | if (thenCase != dest && elseCase != dest) { |
1270 | move(elseCase, dest); |
1271 | elseCase = dest; |
1272 | } |
1273 | |
1274 | if (elseCase == dest) |
1275 | cmov(x86Condition(cond), thenCase, dest); |
1276 | else |
1277 | cmov(x86Condition(invert(cond)), elseCase, dest); |
1278 | } |
1279 | |
1280 | void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) |
1281 | { |
1282 | m_assembler.testq_rr(testReg, mask); |
1283 | cmov(x86Condition(cond), src, dest); |
1284 | } |
1285 | |
1286 | void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) |
1287 | { |
1288 | ASSERT(isInvertible(cond)); |
1289 | ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag." ); |
1290 | |
1291 | m_assembler.testq_rr(right, left); |
1292 | |
1293 | if (thenCase != dest && elseCase != dest) { |
1294 | move(elseCase, dest); |
1295 | elseCase = dest; |
1296 | } |
1297 | |
1298 | if (elseCase == dest) |
1299 | cmov(x86Condition(cond), thenCase, dest); |
1300 | else |
1301 | cmov(x86Condition(invert(cond)), elseCase, dest); |
1302 | } |
1303 | |
1304 | void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest) |
1305 | { |
1306 | // if we are only interested in the low seven bits, this can be tested with a testb |
1307 | if (mask.m_value == -1) |
1308 | m_assembler.testq_rr(testReg, testReg); |
1309 | else if ((mask.m_value & ~0x7f) == 0) |
1310 | m_assembler.testb_i8r(mask.m_value, testReg); |
1311 | else |
1312 | m_assembler.testq_i32r(mask.m_value, testReg); |
1313 | cmov(x86Condition(cond), src, dest); |
1314 | } |
1315 | |
1316 | void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest) |
1317 | { |
1318 | ASSERT(isInvertible(cond)); |
1319 | ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag." ); |
1320 | |
1321 | if (mask.m_value == -1) |
1322 | m_assembler.testq_rr(testReg, testReg); |
1323 | else if (!(mask.m_value & ~0x7f)) |
1324 | m_assembler.testb_i8r(mask.m_value, testReg); |
1325 | else |
1326 | m_assembler.testq_i32r(mask.m_value, testReg); |
1327 | |
1328 | if (thenCase != dest && elseCase != dest) { |
1329 | move(elseCase, dest); |
1330 | elseCase = dest; |
1331 | } |
1332 | |
1333 | if (elseCase == dest) |
1334 | cmov(x86Condition(cond), thenCase, dest); |
1335 | else |
1336 | cmov(x86Condition(invert(cond)), elseCase, dest); |
1337 | } |
1338 | |
1339 | template<typename LeftType, typename RightType> |
1340 | void moveDoubleConditionally64(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) |
1341 | { |
1342 | static_assert(!std::is_same<LeftType, FPRegisterID>::value && !std::is_same<RightType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()." ); |
1343 | |
1344 | if (thenCase != dest && elseCase != dest) { |
1345 | moveDouble(elseCase, dest); |
1346 | elseCase = dest; |
1347 | } |
1348 | |
1349 | if (elseCase == dest) { |
1350 | Jump falseCase = branch64(invert(cond), left, right); |
1351 | moveDouble(thenCase, dest); |
1352 | falseCase.link(this); |
1353 | } else { |
1354 | Jump trueCase = branch64(cond, left, right); |
1355 | moveDouble(elseCase, dest); |
1356 | trueCase.link(this); |
1357 | } |
1358 | } |
1359 | |
1360 | template<typename TestType, typename MaskType> |
1361 | void moveDoubleConditionallyTest64(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) |
1362 | { |
1363 | static_assert(!std::is_same<TestType, FPRegisterID>::value && !std::is_same<MaskType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()." ); |
1364 | |
1365 | if (elseCase == dest && isInvertible(cond)) { |
1366 | Jump falseCase = branchTest64(invert(cond), test, mask); |
1367 | moveDouble(thenCase, dest); |
1368 | falseCase.link(this); |
1369 | } else if (thenCase == dest) { |
1370 | Jump trueCase = branchTest64(cond, test, mask); |
1371 | moveDouble(elseCase, dest); |
1372 | trueCase.link(this); |
1373 | } |
1374 | |
1375 | Jump trueCase = branchTest64(cond, test, mask); |
1376 | moveDouble(elseCase, dest); |
1377 | Jump falseCase = jump(); |
1378 | trueCase.link(this); |
1379 | moveDouble(thenCase, dest); |
1380 | falseCase.link(this); |
1381 | } |
1382 | |
1383 | void abortWithReason(AbortReason reason) |
1384 | { |
1385 | move(TrustedImm32(reason), X86Registers::r11); |
1386 | breakpoint(); |
1387 | } |
1388 | |
1389 | void abortWithReason(AbortReason reason, intptr_t misc) |
1390 | { |
1391 | move(TrustedImm64(misc), X86Registers::r10); |
1392 | abortWithReason(reason); |
1393 | } |
1394 | |
1395 | ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) |
1396 | { |
1397 | ConvertibleLoadLabel result = ConvertibleLoadLabel(this); |
1398 | m_assembler.movq_mr(address.offset, address.base, dest); |
1399 | return result; |
1400 | } |
1401 | |
1402 | DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) |
1403 | { |
1404 | padBeforePatch(); |
1405 | m_assembler.movq_i64r(initialValue.asIntptr(), dest); |
1406 | return DataLabelPtr(this); |
1407 | } |
1408 | |
1409 | DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest) |
1410 | { |
1411 | padBeforePatch(); |
1412 | m_assembler.movq_i64r(initialValue.m_value, dest); |
1413 | return DataLabelPtr(this); |
1414 | } |
1415 | |
1416 | Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr)) |
1417 | { |
1418 | dataLabel = moveWithPatch(initialRightValue, scratchRegister()); |
1419 | return branch64(cond, left, scratchRegister()); |
1420 | } |
1421 | |
1422 | Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(nullptr)) |
1423 | { |
1424 | dataLabel = moveWithPatch(initialRightValue, scratchRegister()); |
1425 | return branch64(cond, left, scratchRegister()); |
1426 | } |
1427 | |
1428 | Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) |
1429 | { |
1430 | padBeforePatch(); |
1431 | m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister()); |
1432 | dataLabel = DataLabel32(this); |
1433 | return branch32(cond, left, scratchRegister()); |
1434 | } |
1435 | |
1436 | DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) |
1437 | { |
1438 | DataLabelPtr label = moveWithPatch(initialValue, scratchRegister()); |
1439 | store64(scratchRegister(), address); |
1440 | return label; |
1441 | } |
1442 | |
1443 | PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm) |
1444 | { |
1445 | return PatchableJump(branch64(cond, reg, imm)); |
1446 | } |
1447 | |
1448 | PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right) |
1449 | { |
1450 | return PatchableJump(branch64(cond, left, right)); |
1451 | } |
1452 | |
1453 | using MacroAssemblerX86Common::branch8; |
1454 | Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) |
1455 | { |
1456 | MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister()); |
1457 | return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister()), right); |
1458 | } |
1459 | |
1460 | using MacroAssemblerX86Common::branchTest8; |
1461 | Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
1462 | { |
1463 | TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); |
1464 | TrustedImmPtr addr(reinterpret_cast<void*>(address.offset)); |
1465 | MacroAssemblerX86Common::move(addr, scratchRegister()); |
1466 | return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister(), address.base, TimesOne), mask8); |
1467 | } |
1468 | |
1469 | Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
1470 | { |
1471 | TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); |
1472 | MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister()); |
1473 | return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister()), mask8); |
1474 | } |
1475 | |
1476 | void xchg64(RegisterID reg, Address address) |
1477 | { |
1478 | m_assembler.xchgq_rm(reg, address.offset, address.base); |
1479 | } |
1480 | |
1481 | void xchg64(RegisterID reg, BaseIndex address) |
1482 | { |
1483 | m_assembler.xchgq_rm(reg, address.offset, address.base, address.index, address.scale); |
1484 | } |
1485 | |
1486 | void atomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address, RegisterID result) |
1487 | { |
1488 | atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base); }); |
1489 | } |
1490 | |
1491 | void atomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address, RegisterID result) |
1492 | { |
1493 | atomicStrongCAS(cond, expectedAndResult, result, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base, address.index, address.scale); }); |
1494 | } |
1495 | |
1496 | void atomicStrongCAS64(RegisterID expectedAndResult, RegisterID newValue, Address address) |
1497 | { |
1498 | atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base); }); |
1499 | } |
1500 | |
1501 | void atomicStrongCAS64(RegisterID expectedAndResult, RegisterID newValue, BaseIndex address) |
1502 | { |
1503 | atomicStrongCAS(expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base, address.index, address.scale); }); |
1504 | } |
1505 | |
1506 | Jump branchAtomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, Address address) |
1507 | { |
1508 | return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base); }); |
1509 | } |
1510 | |
1511 | Jump branchAtomicStrongCAS64(StatusCondition cond, RegisterID expectedAndResult, RegisterID newValue, BaseIndex address) |
1512 | { |
1513 | return branchAtomicStrongCAS(cond, expectedAndResult, address, [&] { m_assembler.cmpxchgq_rm(newValue, address.offset, address.base, address.index, address.scale); }); |
1514 | } |
1515 | |
1516 | void atomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result) |
1517 | { |
1518 | atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result); |
1519 | } |
1520 | |
1521 | void atomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result) |
1522 | { |
1523 | atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result); |
1524 | } |
1525 | |
1526 | Jump branchAtomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address) |
1527 | { |
1528 | return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address); |
1529 | } |
1530 | |
1531 | Jump branchAtomicWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address) |
1532 | { |
1533 | return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address); |
1534 | } |
1535 | |
1536 | void atomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address, RegisterID result) |
1537 | { |
1538 | atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result); |
1539 | } |
1540 | |
1541 | void atomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address, RegisterID result) |
1542 | { |
1543 | atomicStrongCAS64(cond, expectedAndClobbered, newValue, address, result); |
1544 | } |
1545 | |
1546 | Jump branchAtomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, Address address) |
1547 | { |
1548 | return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address); |
1549 | } |
1550 | |
1551 | Jump branchAtomicRelaxedWeakCAS64(StatusCondition cond, RegisterID expectedAndClobbered, RegisterID newValue, BaseIndex address) |
1552 | { |
1553 | return branchAtomicStrongCAS64(cond, expectedAndClobbered, newValue, address); |
1554 | } |
1555 | |
1556 | void atomicAdd64(TrustedImm32 imm, Address address) |
1557 | { |
1558 | m_assembler.lock(); |
1559 | add64(imm, address); |
1560 | } |
1561 | |
1562 | void atomicAdd64(TrustedImm32 imm, BaseIndex address) |
1563 | { |
1564 | m_assembler.lock(); |
1565 | add64(imm, address); |
1566 | } |
1567 | |
1568 | void atomicAdd64(RegisterID reg, Address address) |
1569 | { |
1570 | m_assembler.lock(); |
1571 | add64(reg, address); |
1572 | } |
1573 | |
1574 | void atomicAdd64(RegisterID reg, BaseIndex address) |
1575 | { |
1576 | m_assembler.lock(); |
1577 | add64(reg, address); |
1578 | } |
1579 | |
1580 | void atomicSub64(TrustedImm32 imm, Address address) |
1581 | { |
1582 | m_assembler.lock(); |
1583 | sub64(imm, address); |
1584 | } |
1585 | |
1586 | void atomicSub64(TrustedImm32 imm, BaseIndex address) |
1587 | { |
1588 | m_assembler.lock(); |
1589 | sub64(imm, address); |
1590 | } |
1591 | |
1592 | void atomicSub64(RegisterID reg, Address address) |
1593 | { |
1594 | m_assembler.lock(); |
1595 | sub64(reg, address); |
1596 | } |
1597 | |
1598 | void atomicSub64(RegisterID reg, BaseIndex address) |
1599 | { |
1600 | m_assembler.lock(); |
1601 | sub64(reg, address); |
1602 | } |
1603 | |
1604 | void atomicAnd64(TrustedImm32 imm, Address address) |
1605 | { |
1606 | m_assembler.lock(); |
1607 | and64(imm, address); |
1608 | } |
1609 | |
1610 | void atomicAnd64(TrustedImm32 imm, BaseIndex address) |
1611 | { |
1612 | m_assembler.lock(); |
1613 | and64(imm, address); |
1614 | } |
1615 | |
1616 | void atomicAnd64(RegisterID reg, Address address) |
1617 | { |
1618 | m_assembler.lock(); |
1619 | and64(reg, address); |
1620 | } |
1621 | |
1622 | void atomicAnd64(RegisterID reg, BaseIndex address) |
1623 | { |
1624 | m_assembler.lock(); |
1625 | and64(reg, address); |
1626 | } |
1627 | |
1628 | void atomicOr64(TrustedImm32 imm, Address address) |
1629 | { |
1630 | m_assembler.lock(); |
1631 | or64(imm, address); |
1632 | } |
1633 | |
1634 | void atomicOr64(TrustedImm32 imm, BaseIndex address) |
1635 | { |
1636 | m_assembler.lock(); |
1637 | or64(imm, address); |
1638 | } |
1639 | |
1640 | void atomicOr64(RegisterID reg, Address address) |
1641 | { |
1642 | m_assembler.lock(); |
1643 | or64(reg, address); |
1644 | } |
1645 | |
1646 | void atomicOr64(RegisterID reg, BaseIndex address) |
1647 | { |
1648 | m_assembler.lock(); |
1649 | or64(reg, address); |
1650 | } |
1651 | |
1652 | void atomicXor64(TrustedImm32 imm, Address address) |
1653 | { |
1654 | m_assembler.lock(); |
1655 | xor64(imm, address); |
1656 | } |
1657 | |
1658 | void atomicXor64(TrustedImm32 imm, BaseIndex address) |
1659 | { |
1660 | m_assembler.lock(); |
1661 | xor64(imm, address); |
1662 | } |
1663 | |
1664 | void atomicXor64(RegisterID reg, Address address) |
1665 | { |
1666 | m_assembler.lock(); |
1667 | xor64(reg, address); |
1668 | } |
1669 | |
1670 | void atomicXor64(RegisterID reg, BaseIndex address) |
1671 | { |
1672 | m_assembler.lock(); |
1673 | xor64(reg, address); |
1674 | } |
1675 | |
1676 | void atomicNeg64(Address address) |
1677 | { |
1678 | m_assembler.lock(); |
1679 | neg64(address); |
1680 | } |
1681 | |
1682 | void atomicNeg64(BaseIndex address) |
1683 | { |
1684 | m_assembler.lock(); |
1685 | neg64(address); |
1686 | } |
1687 | |
1688 | void atomicNot64(Address address) |
1689 | { |
1690 | m_assembler.lock(); |
1691 | not64(address); |
1692 | } |
1693 | |
1694 | void atomicNot64(BaseIndex address) |
1695 | { |
1696 | m_assembler.lock(); |
1697 | not64(address); |
1698 | } |
1699 | |
1700 | void atomicXchgAdd64(RegisterID reg, Address address) |
1701 | { |
1702 | m_assembler.lock(); |
1703 | m_assembler.xaddq_rm(reg, address.offset, address.base); |
1704 | } |
1705 | |
1706 | void atomicXchgAdd64(RegisterID reg, BaseIndex address) |
1707 | { |
1708 | m_assembler.lock(); |
1709 | m_assembler.xaddq_rm(reg, address.offset, address.base, address.index, address.scale); |
1710 | } |
1711 | |
1712 | void atomicXchg64(RegisterID reg, Address address) |
1713 | { |
1714 | m_assembler.lock(); |
1715 | m_assembler.xchgq_rm(reg, address.offset, address.base); |
1716 | } |
1717 | |
1718 | void atomicXchg64(RegisterID reg, BaseIndex address) |
1719 | { |
1720 | m_assembler.lock(); |
1721 | m_assembler.xchgq_rm(reg, address.offset, address.base, address.index, address.scale); |
1722 | } |
1723 | |
1724 | #if ENABLE(FAST_TLS_JIT) |
1725 | void loadFromTLS64(uint32_t offset, RegisterID dst) |
1726 | { |
1727 | m_assembler.gs(); |
1728 | m_assembler.movq_mr(offset, dst); |
1729 | } |
1730 | |
1731 | void storeToTLS64(RegisterID src, uint32_t offset) |
1732 | { |
1733 | m_assembler.gs(); |
1734 | m_assembler.movq_rm(src, offset); |
1735 | } |
1736 | #endif |
1737 | |
1738 | void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) |
1739 | { |
1740 | m_assembler.cvttsd2siq_rr(src, dest); |
1741 | } |
1742 | |
1743 | void truncateDoubleToInt64(FPRegisterID src, RegisterID dest) |
1744 | { |
1745 | m_assembler.cvttsd2siq_rr(src, dest); |
1746 | } |
1747 | |
1748 | // int64Min should contain exactly 0x43E0000000000000 == static_cast<double>(int64_t::min()). scratch may |
1749 | // be the same FPR as src. |
1750 | void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min) |
1751 | { |
1752 | ASSERT(scratch != int64Min); |
1753 | |
1754 | // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed |
1755 | // integer conversion instruction. If the src is less than int64_t::min() then the results of the two |
1756 | // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to |
1757 | // uint64_t; then add back int64_t::min() in the destination gpr. |
1758 | |
1759 | Jump large = branchDouble(DoubleGreaterThanOrEqual, src, int64Min); |
1760 | m_assembler.cvttsd2siq_rr(src, dest); |
1761 | Jump done = jump(); |
1762 | large.link(this); |
1763 | moveDouble(src, scratch); |
1764 | m_assembler.subsd_rr(int64Min, scratch); |
1765 | m_assembler.movq_i64r(0x8000000000000000, scratchRegister()); |
1766 | m_assembler.cvttsd2siq_rr(scratch, dest); |
1767 | m_assembler.orq_rr(scratchRegister(), dest); |
1768 | done.link(this); |
1769 | } |
1770 | |
1771 | void truncateFloatToUint32(FPRegisterID src, RegisterID dest) |
1772 | { |
1773 | m_assembler.cvttss2siq_rr(src, dest); |
1774 | } |
1775 | |
1776 | void truncateFloatToInt64(FPRegisterID src, RegisterID dest) |
1777 | { |
1778 | m_assembler.cvttss2siq_rr(src, dest); |
1779 | } |
1780 | |
1781 | // int64Min should contain exactly 0x5f000000 == static_cast<float>(int64_t::min()). scratch may be the |
1782 | // same FPR as src. |
1783 | void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min) |
1784 | { |
1785 | ASSERT(scratch != int64Min); |
1786 | |
1787 | // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed |
1788 | // integer conversion instruction. If the src is less than int64_t::min() then the results of the two |
1789 | // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to |
1790 | // uint64_t; then add back int64_t::min() in the destination gpr. |
1791 | |
1792 | Jump large = branchFloat(DoubleGreaterThanOrEqual, src, int64Min); |
1793 | m_assembler.cvttss2siq_rr(src, dest); |
1794 | Jump done = jump(); |
1795 | large.link(this); |
1796 | moveDouble(src, scratch); |
1797 | m_assembler.subss_rr(int64Min, scratch); |
1798 | m_assembler.movq_i64r(0x8000000000000000, scratchRegister()); |
1799 | m_assembler.cvttss2siq_rr(scratch, dest); |
1800 | m_assembler.orq_rr(scratchRegister(), dest); |
1801 | done.link(this); |
1802 | } |
1803 | |
1804 | void convertInt64ToDouble(RegisterID src, FPRegisterID dest) |
1805 | { |
1806 | m_assembler.cvtsi2sdq_rr(src, dest); |
1807 | } |
1808 | |
1809 | void convertInt64ToDouble(Address src, FPRegisterID dest) |
1810 | { |
1811 | m_assembler.cvtsi2sdq_mr(src.offset, src.base, dest); |
1812 | } |
1813 | |
1814 | void convertInt64ToFloat(RegisterID src, FPRegisterID dest) |
1815 | { |
1816 | m_assembler.cvtsi2ssq_rr(src, dest); |
1817 | } |
1818 | |
1819 | void convertInt64ToFloat(Address src, FPRegisterID dest) |
1820 | { |
1821 | m_assembler.cvtsi2ssq_mr(src.offset, src.base, dest); |
1822 | } |
1823 | |
1824 | // One of scratch or scratch2 may be the same as src |
1825 | void convertUInt64ToDouble(RegisterID src, FPRegisterID dest, RegisterID scratch) |
1826 | { |
1827 | RegisterID scratch2 = scratchRegister(); |
1828 | |
1829 | m_assembler.testq_rr(src, src); |
1830 | AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed)); |
1831 | m_assembler.cvtsi2sdq_rr(src, dest); |
1832 | AssemblerLabel done = m_assembler.jmp(); |
1833 | m_assembler.linkJump(signBitSet, m_assembler.label()); |
1834 | if (scratch != src) |
1835 | m_assembler.movq_rr(src, scratch); |
1836 | m_assembler.movq_rr(src, scratch2); |
1837 | m_assembler.shrq_i8r(1, scratch); |
1838 | m_assembler.andq_ir(1, scratch2); |
1839 | m_assembler.orq_rr(scratch, scratch2); |
1840 | m_assembler.cvtsi2sdq_rr(scratch2, dest); |
1841 | m_assembler.addsd_rr(dest, dest); |
1842 | m_assembler.linkJump(done, m_assembler.label()); |
1843 | } |
1844 | |
1845 | // One of scratch or scratch2 may be the same as src |
1846 | void convertUInt64ToFloat(RegisterID src, FPRegisterID dest, RegisterID scratch) |
1847 | { |
1848 | RegisterID scratch2 = scratchRegister(); |
1849 | m_assembler.testq_rr(src, src); |
1850 | AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed)); |
1851 | m_assembler.cvtsi2ssq_rr(src, dest); |
1852 | AssemblerLabel done = m_assembler.jmp(); |
1853 | m_assembler.linkJump(signBitSet, m_assembler.label()); |
1854 | if (scratch != src) |
1855 | m_assembler.movq_rr(src, scratch); |
1856 | m_assembler.movq_rr(src, scratch2); |
1857 | m_assembler.shrq_i8r(1, scratch); |
1858 | m_assembler.andq_ir(1, scratch2); |
1859 | m_assembler.orq_rr(scratch, scratch2); |
1860 | m_assembler.cvtsi2ssq_rr(scratch2, dest); |
1861 | m_assembler.addss_rr(dest, dest); |
1862 | m_assembler.linkJump(done, m_assembler.label()); |
1863 | } |
1864 | |
1865 | static bool supportsFloatingPoint() { return true; } |
1866 | static bool supportsFloatingPointTruncate() { return true; } |
1867 | static bool supportsFloatingPointSqrt() { return true; } |
1868 | static bool supportsFloatingPointAbs() { return true; } |
1869 | |
1870 | template<PtrTag resultTag, PtrTag locationTag> |
1871 | static FunctionPtr<resultTag> readCallTarget(CodeLocationCall<locationTag> call) |
1872 | { |
1873 | return FunctionPtr<resultTag>(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation())); |
1874 | } |
1875 | |
1876 | bool haveScratchRegisterForBlinding() { return m_allowScratchRegister; } |
1877 | RegisterID scratchRegisterForBlinding() { return scratchRegister(); } |
1878 | |
1879 | static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } |
1880 | static bool canJumpReplacePatchableBranch32WithPatch() { return true; } |
1881 | |
1882 | template<PtrTag tag> |
1883 | static CodeLocationLabel<tag> startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr<tag> label) |
1884 | { |
1885 | const int rexBytes = 1; |
1886 | const int opcodeBytes = 1; |
1887 | const int immediateBytes = 8; |
1888 | const int totalBytes = rexBytes + opcodeBytes + immediateBytes; |
1889 | ASSERT(totalBytes >= maxJumpReplacementSize()); |
1890 | return label.labelAtOffset(-totalBytes); |
1891 | } |
1892 | |
1893 | template<PtrTag tag> |
1894 | static CodeLocationLabel<tag> startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32<tag> label) |
1895 | { |
1896 | const int rexBytes = 1; |
1897 | const int opcodeBytes = 1; |
1898 | const int immediateBytes = 4; |
1899 | const int totalBytes = rexBytes + opcodeBytes + immediateBytes; |
1900 | ASSERT(totalBytes >= maxJumpReplacementSize()); |
1901 | return label.labelAtOffset(-totalBytes); |
1902 | } |
1903 | |
1904 | template<PtrTag tag> |
1905 | static CodeLocationLabel<tag> startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr<tag> label) |
1906 | { |
1907 | return startOfBranchPtrWithPatchOnRegister(label); |
1908 | } |
1909 | |
1910 | template<PtrTag tag> |
1911 | static CodeLocationLabel<tag> startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32<tag> label) |
1912 | { |
1913 | return startOfBranch32WithPatchOnRegister(label); |
1914 | } |
1915 | |
1916 | template<PtrTag tag> |
1917 | static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel<tag> instructionStart, Address, void* initialValue) |
1918 | { |
1919 | X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister); |
1920 | } |
1921 | |
1922 | template<PtrTag tag> |
1923 | static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel<tag> instructionStart, Address, int32_t initialValue) |
1924 | { |
1925 | X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, s_scratchRegister); |
1926 | } |
1927 | |
1928 | template<PtrTag tag> |
1929 | static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel<tag> instructionStart, RegisterID, void* initialValue) |
1930 | { |
1931 | X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister); |
1932 | } |
1933 | |
1934 | template<PtrTag callTag, PtrTag destTag> |
1935 | static void repatchCall(CodeLocationCall<callTag> call, CodeLocationLabel<destTag> destination) |
1936 | { |
1937 | X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); |
1938 | } |
1939 | |
1940 | template<PtrTag callTag, PtrTag destTag> |
1941 | static void repatchCall(CodeLocationCall<callTag> call, FunctionPtr<destTag> destination) |
1942 | { |
1943 | X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); |
1944 | } |
1945 | |
1946 | private: |
1947 | // If lzcnt is not available, use this after BSR |
1948 | // to count the leading zeros. |
1949 | void clz64AfterBsr(RegisterID dst) |
1950 | { |
1951 | Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero)); |
1952 | move(TrustedImm32(64), dst); |
1953 | |
1954 | Jump skipNonZeroCase = jump(); |
1955 | srcIsNonZero.link(this); |
1956 | xor64(TrustedImm32(0x3f), dst); |
1957 | skipNonZeroCase.link(this); |
1958 | } |
1959 | |
1960 | friend class LinkBuffer; |
1961 | |
1962 | template<PtrTag tag> |
1963 | static void linkCall(void* code, Call call, FunctionPtr<tag> function) |
1964 | { |
1965 | if (!call.isFlagSet(Call::Near)) |
1966 | X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPATCH_OFFSET_CALL_R11), function.executableAddress()); |
1967 | else if (call.isFlagSet(Call::Tail)) |
1968 | X86Assembler::linkJump(code, call.m_label, function.executableAddress()); |
1969 | else |
1970 | X86Assembler::linkCall(code, call.m_label, function.executableAddress()); |
1971 | } |
1972 | }; |
1973 | |
1974 | } // namespace JSC |
1975 | |
1976 | #endif // ENABLE(ASSEMBLER) |
1977 | |